code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _is_empty(self):
"""
True if this cell contains only a single empty ``<w:p>`` element.
"""
block_items = list(self.iter_block_items())
if len(block_items) > 1:
return False
p = block_items[0] # cell must include at least one <w:p> element
if len(p.r_lst) == 0:
return True
return False | def function[_is_empty, parameter[self]]:
constant[
True if this cell contains only a single empty ``<w:p>`` element.
]
variable[block_items] assign[=] call[name[list], parameter[call[name[self].iter_block_items, parameter[]]]]
if compare[call[name[len], parameter[name[block_items]]] greater[>] constant[1]] begin[:]
return[constant[False]]
variable[p] assign[=] call[name[block_items]][constant[0]]
if compare[call[name[len], parameter[name[p].r_lst]] equal[==] constant[0]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[_is_empty] ( identifier[self] ):
literal[string]
identifier[block_items] = identifier[list] ( identifier[self] . identifier[iter_block_items] ())
keyword[if] identifier[len] ( identifier[block_items] )> literal[int] :
keyword[return] keyword[False]
identifier[p] = identifier[block_items] [ literal[int] ]
keyword[if] identifier[len] ( identifier[p] . identifier[r_lst] )== literal[int] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def _is_empty(self):
"""
True if this cell contains only a single empty ``<w:p>`` element.
"""
block_items = list(self.iter_block_items())
if len(block_items) > 1:
return False # depends on [control=['if'], data=[]]
p = block_items[0] # cell must include at least one <w:p> element
if len(p.r_lst) == 0:
return True # depends on [control=['if'], data=[]]
return False |
def queryMore(self, queryLocator):
'''
Retrieves the next batch of objects from a query.
'''
self._setHeaders('queryMore')
return self._sforce.service.queryMore(queryLocator) | def function[queryMore, parameter[self, queryLocator]]:
constant[
Retrieves the next batch of objects from a query.
]
call[name[self]._setHeaders, parameter[constant[queryMore]]]
return[call[name[self]._sforce.service.queryMore, parameter[name[queryLocator]]]] | keyword[def] identifier[queryMore] ( identifier[self] , identifier[queryLocator] ):
literal[string]
identifier[self] . identifier[_setHeaders] ( literal[string] )
keyword[return] identifier[self] . identifier[_sforce] . identifier[service] . identifier[queryMore] ( identifier[queryLocator] ) | def queryMore(self, queryLocator):
"""
Retrieves the next batch of objects from a query.
"""
self._setHeaders('queryMore')
return self._sforce.service.queryMore(queryLocator) |
def get_finders():
"""
Set the media fixtures finders on settings.py.
Example:
MEDIA_FIXTURES_FILES_FINDERS = (
'django_media_fixtures.finders.FileSystemFinder',
'django_media_fixtures.finders.AppDirectoriesFinder', # default
)
"""
if hasattr(settings, 'MEDIA_FIXTURES_FILES_FINDERS'):
finders = settings.MEDIA_FIXTURES_FILES_FINDERS
else:
finders = (
'django_media_fixtures.finders.AppDirectoriesFinder',
)
for finder_path in finders:
yield get_finder(finder_path) | def function[get_finders, parameter[]]:
constant[
Set the media fixtures finders on settings.py.
Example:
MEDIA_FIXTURES_FILES_FINDERS = (
'django_media_fixtures.finders.FileSystemFinder',
'django_media_fixtures.finders.AppDirectoriesFinder', # default
)
]
if call[name[hasattr], parameter[name[settings], constant[MEDIA_FIXTURES_FILES_FINDERS]]] begin[:]
variable[finders] assign[=] name[settings].MEDIA_FIXTURES_FILES_FINDERS
for taget[name[finder_path]] in starred[name[finders]] begin[:]
<ast.Yield object at 0x7da1aff1f790> | keyword[def] identifier[get_finders] ():
literal[string]
keyword[if] identifier[hasattr] ( identifier[settings] , literal[string] ):
identifier[finders] = identifier[settings] . identifier[MEDIA_FIXTURES_FILES_FINDERS]
keyword[else] :
identifier[finders] =(
literal[string] ,
)
keyword[for] identifier[finder_path] keyword[in] identifier[finders] :
keyword[yield] identifier[get_finder] ( identifier[finder_path] ) | def get_finders():
"""
Set the media fixtures finders on settings.py.
Example:
MEDIA_FIXTURES_FILES_FINDERS = (
'django_media_fixtures.finders.FileSystemFinder',
'django_media_fixtures.finders.AppDirectoriesFinder', # default
)
"""
if hasattr(settings, 'MEDIA_FIXTURES_FILES_FINDERS'):
finders = settings.MEDIA_FIXTURES_FILES_FINDERS # depends on [control=['if'], data=[]]
else:
finders = ('django_media_fixtures.finders.AppDirectoriesFinder',)
for finder_path in finders:
yield get_finder(finder_path) # depends on [control=['for'], data=['finder_path']] |
def _get_index_of_monomial(self, element, enablesubstitution=True,
daggered=False):
"""Returns the index of a monomial.
"""
result = []
processed_element, coeff1 = separate_scalar_factor(element)
if processed_element in self.moment_substitutions:
r = self._get_index_of_monomial(self.moment_substitutions[processed_element], enablesubstitution)
return [(k, coeff*coeff1) for k, coeff in r]
if enablesubstitution:
processed_element = \
apply_substitutions(processed_element, self.substitutions,
self.pure_substitution_rules)
# Given the monomial, we need its mapping L_y(w) to push it into
# a corresponding constraint matrix
if is_number_type(processed_element):
return [(0, coeff1)]
elif processed_element.is_Add:
monomials = processed_element.args
else:
monomials = [processed_element]
for monomial in monomials:
monomial, coeff2 = separate_scalar_factor(monomial)
coeff = coeff1*coeff2
if is_number_type(monomial):
result.append((0, coeff))
continue
k = -1
if monomial != 0:
if monomial.as_coeff_Mul()[0] < 0:
monomial = -monomial
coeff = -1.0 * coeff
try:
new_element = self.moment_substitutions[monomial]
r = self._get_index_of_monomial(self.moment_substitutions[new_element], enablesubstitution)
result += [(k, coeff*coeff3) for k, coeff3 in r]
except KeyError:
try:
k = self.monomial_index[monomial]
result.append((k, coeff))
except KeyError:
if not daggered:
dag_result = self._get_index_of_monomial(monomial.adjoint(),
daggered=True)
result += [(k, coeff0*coeff) for k, coeff0 in dag_result]
else:
raise RuntimeError("The requested monomial " +
str(monomial) + " could not be found.")
return result | def function[_get_index_of_monomial, parameter[self, element, enablesubstitution, daggered]]:
constant[Returns the index of a monomial.
]
variable[result] assign[=] list[[]]
<ast.Tuple object at 0x7da1b0fadb70> assign[=] call[name[separate_scalar_factor], parameter[name[element]]]
if compare[name[processed_element] in name[self].moment_substitutions] begin[:]
variable[r] assign[=] call[name[self]._get_index_of_monomial, parameter[call[name[self].moment_substitutions][name[processed_element]], name[enablesubstitution]]]
return[<ast.ListComp object at 0x7da1b0fae410>]
if name[enablesubstitution] begin[:]
variable[processed_element] assign[=] call[name[apply_substitutions], parameter[name[processed_element], name[self].substitutions, name[self].pure_substitution_rules]]
if call[name[is_number_type], parameter[name[processed_element]]] begin[:]
return[list[[<ast.Tuple object at 0x7da1b0fad6c0>]]]
for taget[name[monomial]] in starred[name[monomials]] begin[:]
<ast.Tuple object at 0x7da1b0fad5a0> assign[=] call[name[separate_scalar_factor], parameter[name[monomial]]]
variable[coeff] assign[=] binary_operation[name[coeff1] * name[coeff2]]
if call[name[is_number_type], parameter[name[monomial]]] begin[:]
call[name[result].append, parameter[tuple[[<ast.Constant object at 0x7da1b0fadde0>, <ast.Name object at 0x7da1b0f13940>]]]]
continue
variable[k] assign[=] <ast.UnaryOp object at 0x7da1b0f11f90>
if compare[name[monomial] not_equal[!=] constant[0]] begin[:]
if compare[call[call[name[monomial].as_coeff_Mul, parameter[]]][constant[0]] less[<] constant[0]] begin[:]
variable[monomial] assign[=] <ast.UnaryOp object at 0x7da1b0f118d0>
variable[coeff] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b0f10dc0> * name[coeff]]
<ast.Try object at 0x7da1b0f18d30>
return[name[result]] | keyword[def] identifier[_get_index_of_monomial] ( identifier[self] , identifier[element] , identifier[enablesubstitution] = keyword[True] ,
identifier[daggered] = keyword[False] ):
literal[string]
identifier[result] =[]
identifier[processed_element] , identifier[coeff1] = identifier[separate_scalar_factor] ( identifier[element] )
keyword[if] identifier[processed_element] keyword[in] identifier[self] . identifier[moment_substitutions] :
identifier[r] = identifier[self] . identifier[_get_index_of_monomial] ( identifier[self] . identifier[moment_substitutions] [ identifier[processed_element] ], identifier[enablesubstitution] )
keyword[return] [( identifier[k] , identifier[coeff] * identifier[coeff1] ) keyword[for] identifier[k] , identifier[coeff] keyword[in] identifier[r] ]
keyword[if] identifier[enablesubstitution] :
identifier[processed_element] = identifier[apply_substitutions] ( identifier[processed_element] , identifier[self] . identifier[substitutions] ,
identifier[self] . identifier[pure_substitution_rules] )
keyword[if] identifier[is_number_type] ( identifier[processed_element] ):
keyword[return] [( literal[int] , identifier[coeff1] )]
keyword[elif] identifier[processed_element] . identifier[is_Add] :
identifier[monomials] = identifier[processed_element] . identifier[args]
keyword[else] :
identifier[monomials] =[ identifier[processed_element] ]
keyword[for] identifier[monomial] keyword[in] identifier[monomials] :
identifier[monomial] , identifier[coeff2] = identifier[separate_scalar_factor] ( identifier[monomial] )
identifier[coeff] = identifier[coeff1] * identifier[coeff2]
keyword[if] identifier[is_number_type] ( identifier[monomial] ):
identifier[result] . identifier[append] (( literal[int] , identifier[coeff] ))
keyword[continue]
identifier[k] =- literal[int]
keyword[if] identifier[monomial] != literal[int] :
keyword[if] identifier[monomial] . identifier[as_coeff_Mul] ()[ literal[int] ]< literal[int] :
identifier[monomial] =- identifier[monomial]
identifier[coeff] =- literal[int] * identifier[coeff]
keyword[try] :
identifier[new_element] = identifier[self] . identifier[moment_substitutions] [ identifier[monomial] ]
identifier[r] = identifier[self] . identifier[_get_index_of_monomial] ( identifier[self] . identifier[moment_substitutions] [ identifier[new_element] ], identifier[enablesubstitution] )
identifier[result] +=[( identifier[k] , identifier[coeff] * identifier[coeff3] ) keyword[for] identifier[k] , identifier[coeff3] keyword[in] identifier[r] ]
keyword[except] identifier[KeyError] :
keyword[try] :
identifier[k] = identifier[self] . identifier[monomial_index] [ identifier[monomial] ]
identifier[result] . identifier[append] (( identifier[k] , identifier[coeff] ))
keyword[except] identifier[KeyError] :
keyword[if] keyword[not] identifier[daggered] :
identifier[dag_result] = identifier[self] . identifier[_get_index_of_monomial] ( identifier[monomial] . identifier[adjoint] (),
identifier[daggered] = keyword[True] )
identifier[result] +=[( identifier[k] , identifier[coeff0] * identifier[coeff] ) keyword[for] identifier[k] , identifier[coeff0] keyword[in] identifier[dag_result] ]
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] +
identifier[str] ( identifier[monomial] )+ literal[string] )
keyword[return] identifier[result] | def _get_index_of_monomial(self, element, enablesubstitution=True, daggered=False):
"""Returns the index of a monomial.
"""
result = []
(processed_element, coeff1) = separate_scalar_factor(element)
if processed_element in self.moment_substitutions:
r = self._get_index_of_monomial(self.moment_substitutions[processed_element], enablesubstitution)
return [(k, coeff * coeff1) for (k, coeff) in r] # depends on [control=['if'], data=['processed_element']]
if enablesubstitution:
processed_element = apply_substitutions(processed_element, self.substitutions, self.pure_substitution_rules) # depends on [control=['if'], data=[]]
# Given the monomial, we need its mapping L_y(w) to push it into
# a corresponding constraint matrix
if is_number_type(processed_element):
return [(0, coeff1)] # depends on [control=['if'], data=[]]
elif processed_element.is_Add:
monomials = processed_element.args # depends on [control=['if'], data=[]]
else:
monomials = [processed_element]
for monomial in monomials:
(monomial, coeff2) = separate_scalar_factor(monomial)
coeff = coeff1 * coeff2
if is_number_type(monomial):
result.append((0, coeff))
continue # depends on [control=['if'], data=[]]
k = -1
if monomial != 0:
if monomial.as_coeff_Mul()[0] < 0:
monomial = -monomial
coeff = -1.0 * coeff # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['monomial']]
try:
new_element = self.moment_substitutions[monomial]
r = self._get_index_of_monomial(self.moment_substitutions[new_element], enablesubstitution)
result += [(k, coeff * coeff3) for (k, coeff3) in r] # depends on [control=['try'], data=[]]
except KeyError:
try:
k = self.monomial_index[monomial]
result.append((k, coeff)) # depends on [control=['try'], data=[]]
except KeyError:
if not daggered:
dag_result = self._get_index_of_monomial(monomial.adjoint(), daggered=True)
result += [(k, coeff0 * coeff) for (k, coeff0) in dag_result] # depends on [control=['if'], data=[]]
else:
raise RuntimeError('The requested monomial ' + str(monomial) + ' could not be found.') # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['monomial']]
return result |
def ovsdb_server_method(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ovsdb_server = ET.SubElement(config, "ovsdb-server", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(ovsdb_server, "name")
name_key.text = kwargs.pop('name')
method = ET.SubElement(ovsdb_server, "method")
method.text = kwargs.pop('method')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[ovsdb_server_method, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[ovsdb_server] assign[=] call[name[ET].SubElement, parameter[name[config], constant[ovsdb-server]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[ovsdb_server], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[method] assign[=] call[name[ET].SubElement, parameter[name[ovsdb_server], constant[method]]]
name[method].text assign[=] call[name[kwargs].pop, parameter[constant[method]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[ovsdb_server_method] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[ovsdb_server] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[ovsdb_server] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[method] = identifier[ET] . identifier[SubElement] ( identifier[ovsdb_server] , literal[string] )
identifier[method] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def ovsdb_server_method(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
ovsdb_server = ET.SubElement(config, 'ovsdb-server', xmlns='urn:brocade.com:mgmt:brocade-tunnels')
name_key = ET.SubElement(ovsdb_server, 'name')
name_key.text = kwargs.pop('name')
method = ET.SubElement(ovsdb_server, 'method')
method.text = kwargs.pop('method')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def configure(cls, name, registry_host: str="0.0.0.0", registry_port: int=4500,
pubsub_host: str="0.0.0.0", pubsub_port: int=6379):
""" A convenience method for providing registry and pubsub(redis) endpoints
:param name: Used for process name
:param registry_host: IP Address for vyked-registry; default = 0.0.0.0
:param registry_port: Port for vyked-registry; default = 4500
:param pubsub_host: IP Address for pubsub component, usually redis; default = 0.0.0.0
:param pubsub_port: Port for pubsub component; default= 6379
:return: None
"""
Host.name = name
Host.registry_host = registry_host
Host.registry_port = registry_port
Host.pubsub_host = pubsub_host
Host.pubsub_port = pubsub_port | def function[configure, parameter[cls, name, registry_host, registry_port, pubsub_host, pubsub_port]]:
constant[ A convenience method for providing registry and pubsub(redis) endpoints
:param name: Used for process name
:param registry_host: IP Address for vyked-registry; default = 0.0.0.0
:param registry_port: Port for vyked-registry; default = 4500
:param pubsub_host: IP Address for pubsub component, usually redis; default = 0.0.0.0
:param pubsub_port: Port for pubsub component; default= 6379
:return: None
]
name[Host].name assign[=] name[name]
name[Host].registry_host assign[=] name[registry_host]
name[Host].registry_port assign[=] name[registry_port]
name[Host].pubsub_host assign[=] name[pubsub_host]
name[Host].pubsub_port assign[=] name[pubsub_port] | keyword[def] identifier[configure] ( identifier[cls] , identifier[name] , identifier[registry_host] : identifier[str] = literal[string] , identifier[registry_port] : identifier[int] = literal[int] ,
identifier[pubsub_host] : identifier[str] = literal[string] , identifier[pubsub_port] : identifier[int] = literal[int] ):
literal[string]
identifier[Host] . identifier[name] = identifier[name]
identifier[Host] . identifier[registry_host] = identifier[registry_host]
identifier[Host] . identifier[registry_port] = identifier[registry_port]
identifier[Host] . identifier[pubsub_host] = identifier[pubsub_host]
identifier[Host] . identifier[pubsub_port] = identifier[pubsub_port] | def configure(cls, name, registry_host: str='0.0.0.0', registry_port: int=4500, pubsub_host: str='0.0.0.0', pubsub_port: int=6379):
""" A convenience method for providing registry and pubsub(redis) endpoints
:param name: Used for process name
:param registry_host: IP Address for vyked-registry; default = 0.0.0.0
:param registry_port: Port for vyked-registry; default = 4500
:param pubsub_host: IP Address for pubsub component, usually redis; default = 0.0.0.0
:param pubsub_port: Port for pubsub component; default= 6379
:return: None
"""
Host.name = name
Host.registry_host = registry_host
Host.registry_port = registry_port
Host.pubsub_host = pubsub_host
Host.pubsub_port = pubsub_port |
def encode(self):
"""Encode this matrix in binary suitable for including in a PDF"""
return '{:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}'.format(
self.a, self.b, self.c, self.d, self.e, self.f
).encode() | def function[encode, parameter[self]]:
constant[Encode this matrix in binary suitable for including in a PDF]
return[call[call[constant[{:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}].format, parameter[name[self].a, name[self].b, name[self].c, name[self].d, name[self].e, name[self].f]].encode, parameter[]]] | keyword[def] identifier[encode] ( identifier[self] ):
literal[string]
keyword[return] literal[string] . identifier[format] (
identifier[self] . identifier[a] , identifier[self] . identifier[b] , identifier[self] . identifier[c] , identifier[self] . identifier[d] , identifier[self] . identifier[e] , identifier[self] . identifier[f]
). identifier[encode] () | def encode(self):
"""Encode this matrix in binary suitable for including in a PDF"""
return '{:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}'.format(self.a, self.b, self.c, self.d, self.e, self.f).encode() |
def reset(self):
"""
Set everything back to zero
"""
self.trnOverlaps = []
self.activeTRNSegments = []
self.activeTRNCellIndices = []
self.relayOverlaps = []
self.activeRelaySegments = []
self.burstReadyCellIndices = []
self.burstReadyCells = np.zeros((self.relayWidth, self.relayHeight)) | def function[reset, parameter[self]]:
constant[
Set everything back to zero
]
name[self].trnOverlaps assign[=] list[[]]
name[self].activeTRNSegments assign[=] list[[]]
name[self].activeTRNCellIndices assign[=] list[[]]
name[self].relayOverlaps assign[=] list[[]]
name[self].activeRelaySegments assign[=] list[[]]
name[self].burstReadyCellIndices assign[=] list[[]]
name[self].burstReadyCells assign[=] call[name[np].zeros, parameter[tuple[[<ast.Attribute object at 0x7da1b0900700>, <ast.Attribute object at 0x7da1b0900a90>]]]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
identifier[self] . identifier[trnOverlaps] =[]
identifier[self] . identifier[activeTRNSegments] =[]
identifier[self] . identifier[activeTRNCellIndices] =[]
identifier[self] . identifier[relayOverlaps] =[]
identifier[self] . identifier[activeRelaySegments] =[]
identifier[self] . identifier[burstReadyCellIndices] =[]
identifier[self] . identifier[burstReadyCells] = identifier[np] . identifier[zeros] (( identifier[self] . identifier[relayWidth] , identifier[self] . identifier[relayHeight] )) | def reset(self):
"""
Set everything back to zero
"""
self.trnOverlaps = []
self.activeTRNSegments = []
self.activeTRNCellIndices = []
self.relayOverlaps = []
self.activeRelaySegments = []
self.burstReadyCellIndices = []
self.burstReadyCells = np.zeros((self.relayWidth, self.relayHeight)) |
def get_contact_formatted_email(self, contact):
"""Returns a string with the formatted email for the given contact
"""
contact_name = contact.Title()
contact_email = contact.getEmailAddress()
return self.get_formatted_email((contact_name, contact_email)) | def function[get_contact_formatted_email, parameter[self, contact]]:
constant[Returns a string with the formatted email for the given contact
]
variable[contact_name] assign[=] call[name[contact].Title, parameter[]]
variable[contact_email] assign[=] call[name[contact].getEmailAddress, parameter[]]
return[call[name[self].get_formatted_email, parameter[tuple[[<ast.Name object at 0x7da18f00d750>, <ast.Name object at 0x7da18f00e3b0>]]]]] | keyword[def] identifier[get_contact_formatted_email] ( identifier[self] , identifier[contact] ):
literal[string]
identifier[contact_name] = identifier[contact] . identifier[Title] ()
identifier[contact_email] = identifier[contact] . identifier[getEmailAddress] ()
keyword[return] identifier[self] . identifier[get_formatted_email] (( identifier[contact_name] , identifier[contact_email] )) | def get_contact_formatted_email(self, contact):
"""Returns a string with the formatted email for the given contact
"""
contact_name = contact.Title()
contact_email = contact.getEmailAddress()
return self.get_formatted_email((contact_name, contact_email)) |
def _add_work_spec_args(self, parser):
'''Add ``--work-spec`` to an :mod:`argparse` `parser`.'''
parser.add_argument('-w', '--work-spec', dest='work_spec_path',
metavar='FILE', type=existing_path,
required=True,
help='path to a YAML or JSON file') | def function[_add_work_spec_args, parameter[self, parser]]:
constant[Add ``--work-spec`` to an :mod:`argparse` `parser`.]
call[name[parser].add_argument, parameter[constant[-w], constant[--work-spec]]] | keyword[def] identifier[_add_work_spec_args] ( identifier[self] , identifier[parser] ):
literal[string]
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[metavar] = literal[string] , identifier[type] = identifier[existing_path] ,
identifier[required] = keyword[True] ,
identifier[help] = literal[string] ) | def _add_work_spec_args(self, parser):
"""Add ``--work-spec`` to an :mod:`argparse` `parser`."""
parser.add_argument('-w', '--work-spec', dest='work_spec_path', metavar='FILE', type=existing_path, required=True, help='path to a YAML or JSON file') |
def chain_callback(self, iocb):
"""Callback when this iocb completes."""
if _debug: IOChainMixIn._debug("chain_callback %r", iocb)
# if we're not chained, there's no notification to do
if not self.ioChain:
return
# refer to the chained iocb
iocb = self.ioChain
try:
if _debug: IOChainMixIn._debug(" - decoding")
# let the derived class transform the data
self.decode()
if _debug: IOChainMixIn._debug(" - decode complete")
except:
# extract the error and abort
err = sys.exc_info()[1]
if _debug: IOChainMixIn._exception(" - decoding exception: %r", err)
iocb.ioState = ABORTED
iocb.ioError = err
# break the references
self.ioChain = None
iocb.ioController = None
# notify the client
iocb.trigger() | def function[chain_callback, parameter[self, iocb]]:
constant[Callback when this iocb completes.]
if name[_debug] begin[:]
call[name[IOChainMixIn]._debug, parameter[constant[chain_callback %r], name[iocb]]]
if <ast.UnaryOp object at 0x7da1b088c490> begin[:]
return[None]
variable[iocb] assign[=] name[self].ioChain
<ast.Try object at 0x7da1b088d390>
name[self].ioChain assign[=] constant[None]
name[iocb].ioController assign[=] constant[None]
call[name[iocb].trigger, parameter[]] | keyword[def] identifier[chain_callback] ( identifier[self] , identifier[iocb] ):
literal[string]
keyword[if] identifier[_debug] : identifier[IOChainMixIn] . identifier[_debug] ( literal[string] , identifier[iocb] )
keyword[if] keyword[not] identifier[self] . identifier[ioChain] :
keyword[return]
identifier[iocb] = identifier[self] . identifier[ioChain]
keyword[try] :
keyword[if] identifier[_debug] : identifier[IOChainMixIn] . identifier[_debug] ( literal[string] )
identifier[self] . identifier[decode] ()
keyword[if] identifier[_debug] : identifier[IOChainMixIn] . identifier[_debug] ( literal[string] )
keyword[except] :
identifier[err] = identifier[sys] . identifier[exc_info] ()[ literal[int] ]
keyword[if] identifier[_debug] : identifier[IOChainMixIn] . identifier[_exception] ( literal[string] , identifier[err] )
identifier[iocb] . identifier[ioState] = identifier[ABORTED]
identifier[iocb] . identifier[ioError] = identifier[err]
identifier[self] . identifier[ioChain] = keyword[None]
identifier[iocb] . identifier[ioController] = keyword[None]
identifier[iocb] . identifier[trigger] () | def chain_callback(self, iocb):
"""Callback when this iocb completes."""
if _debug:
IOChainMixIn._debug('chain_callback %r', iocb) # depends on [control=['if'], data=[]]
# if we're not chained, there's no notification to do
if not self.ioChain:
return # depends on [control=['if'], data=[]]
# refer to the chained iocb
iocb = self.ioChain
try:
if _debug:
IOChainMixIn._debug(' - decoding') # depends on [control=['if'], data=[]]
# let the derived class transform the data
self.decode()
if _debug:
IOChainMixIn._debug(' - decode complete') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
# extract the error and abort
err = sys.exc_info()[1]
if _debug:
IOChainMixIn._exception(' - decoding exception: %r', err) # depends on [control=['if'], data=[]]
iocb.ioState = ABORTED
iocb.ioError = err # depends on [control=['except'], data=[]]
# break the references
self.ioChain = None
iocb.ioController = None
# notify the client
iocb.trigger() |
def normalize(p):
"""Normalizes a point/vector
Args:
p ([float, float]): x and y coordinates
Returns:
float
"""
l = math.sqrt(p[0]**2 + p[1]**2)
return [0.0, 0.0] if l == 0 else [p[0]/l, p[1]/l] | def function[normalize, parameter[p]]:
constant[Normalizes a point/vector
Args:
p ([float, float]): x and y coordinates
Returns:
float
]
variable[l] assign[=] call[name[math].sqrt, parameter[binary_operation[binary_operation[call[name[p]][constant[0]] ** constant[2]] + binary_operation[call[name[p]][constant[1]] ** constant[2]]]]]
return[<ast.IfExp object at 0x7da1b26ae500>] | keyword[def] identifier[normalize] ( identifier[p] ):
literal[string]
identifier[l] = identifier[math] . identifier[sqrt] ( identifier[p] [ literal[int] ]** literal[int] + identifier[p] [ literal[int] ]** literal[int] )
keyword[return] [ literal[int] , literal[int] ] keyword[if] identifier[l] == literal[int] keyword[else] [ identifier[p] [ literal[int] ]/ identifier[l] , identifier[p] [ literal[int] ]/ identifier[l] ] | def normalize(p):
"""Normalizes a point/vector
Args:
p ([float, float]): x and y coordinates
Returns:
float
"""
l = math.sqrt(p[0] ** 2 + p[1] ** 2)
return [0.0, 0.0] if l == 0 else [p[0] / l, p[1] / l] |
def from_module(module_name):
"""
Load a configuration module and return a Config
"""
d = importlib.import_module(module_name)
config = {}
for key in dir(d):
if key.isupper():
config[key] = getattr(d, key)
return Config(config) | def function[from_module, parameter[module_name]]:
constant[
Load a configuration module and return a Config
]
variable[d] assign[=] call[name[importlib].import_module, parameter[name[module_name]]]
variable[config] assign[=] dictionary[[], []]
for taget[name[key]] in starred[call[name[dir], parameter[name[d]]]] begin[:]
if call[name[key].isupper, parameter[]] begin[:]
call[name[config]][name[key]] assign[=] call[name[getattr], parameter[name[d], name[key]]]
return[call[name[Config], parameter[name[config]]]] | keyword[def] identifier[from_module] ( identifier[module_name] ):
literal[string]
identifier[d] = identifier[importlib] . identifier[import_module] ( identifier[module_name] )
identifier[config] ={}
keyword[for] identifier[key] keyword[in] identifier[dir] ( identifier[d] ):
keyword[if] identifier[key] . identifier[isupper] ():
identifier[config] [ identifier[key] ]= identifier[getattr] ( identifier[d] , identifier[key] )
keyword[return] identifier[Config] ( identifier[config] ) | def from_module(module_name):
"""
Load a configuration module and return a Config
"""
d = importlib.import_module(module_name)
config = {}
for key in dir(d):
if key.isupper():
config[key] = getattr(d, key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
return Config(config) |
def _dict(self, with_name=True):
"""Returns the identity as a dict.
values that are empty are removed
"""
d = dict([(k, getattr(self, k)) for k, _, _ in self.name_parts])
if with_name:
d['name'] = self.name
try:
d['vname'] = self.vname
except ValueError:
pass
return self.clear_dict(d) | def function[_dict, parameter[self, with_name]]:
constant[Returns the identity as a dict.
values that are empty are removed
]
variable[d] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da2041db8e0>]]
if name[with_name] begin[:]
call[name[d]][constant[name]] assign[=] name[self].name
<ast.Try object at 0x7da2041da020>
return[call[name[self].clear_dict, parameter[name[d]]]] | keyword[def] identifier[_dict] ( identifier[self] , identifier[with_name] = keyword[True] ):
literal[string]
identifier[d] = identifier[dict] ([( identifier[k] , identifier[getattr] ( identifier[self] , identifier[k] )) keyword[for] identifier[k] , identifier[_] , identifier[_] keyword[in] identifier[self] . identifier[name_parts] ])
keyword[if] identifier[with_name] :
identifier[d] [ literal[string] ]= identifier[self] . identifier[name]
keyword[try] :
identifier[d] [ literal[string] ]= identifier[self] . identifier[vname]
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[self] . identifier[clear_dict] ( identifier[d] ) | def _dict(self, with_name=True):
"""Returns the identity as a dict.
values that are empty are removed
"""
d = dict([(k, getattr(self, k)) for (k, _, _) in self.name_parts])
if with_name:
d['name'] = self.name
try:
d['vname'] = self.vname # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return self.clear_dict(d) |
def iterate_presentation_files(path=None, excludes=None, includes=None):
"""Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority."""
# Defaults
if includes is None:
includes = []
if excludes is None:
excludes = []
# Transform glob patterns to regular expressions
includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.'
excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
includes_re = re.compile(includes_pattern)
excludes_re = re.compile(excludes_pattern)
def included(root, name):
"""Returns True if the specified file is a presentation file."""
full_path = os.path.join(root, name)
# Explicitly included files takes priority
if includes_re.match(full_path):
return True
# Ignore special and excluded files
return (not specials_re.match(name)
and not excludes_re.match(full_path))
# Get a filtered list of paths to be built
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if included(root, d)]
files = [f for f in files if included(root, f)]
for f in files:
yield os.path.relpath(os.path.join(root, f), path) | def function[iterate_presentation_files, parameter[path, excludes, includes]]:
constant[Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority.]
if compare[name[includes] is constant[None]] begin[:]
variable[includes] assign[=] list[[]]
if compare[name[excludes] is constant[None]] begin[:]
variable[excludes] assign[=] list[[]]
variable[includes_pattern] assign[=] <ast.BoolOp object at 0x7da2041db850>
variable[excludes_pattern] assign[=] <ast.BoolOp object at 0x7da2041da440>
variable[includes_re] assign[=] call[name[re].compile, parameter[name[includes_pattern]]]
variable[excludes_re] assign[=] call[name[re].compile, parameter[name[excludes_pattern]]]
def function[included, parameter[root, name]]:
constant[Returns True if the specified file is a presentation file.]
variable[full_path] assign[=] call[name[os].path.join, parameter[name[root], name[name]]]
if call[name[includes_re].match, parameter[name[full_path]]] begin[:]
return[constant[True]]
return[<ast.BoolOp object at 0x7da20c993550>]
for taget[tuple[[<ast.Name object at 0x7da20c990b50>, <ast.Name object at 0x7da20c992c80>, <ast.Name object at 0x7da20c991390>]]] in starred[call[name[os].walk, parameter[name[path]]]] begin[:]
call[name[dirs]][<ast.Slice object at 0x7da20c991ba0>] assign[=] <ast.ListComp object at 0x7da20c990af0>
variable[files] assign[=] <ast.ListComp object at 0x7da20c990f40>
for taget[name[f]] in starred[name[files]] begin[:]
<ast.Yield object at 0x7da20c9907c0> | keyword[def] identifier[iterate_presentation_files] ( identifier[path] = keyword[None] , identifier[excludes] = keyword[None] , identifier[includes] = keyword[None] ):
literal[string]
keyword[if] identifier[includes] keyword[is] keyword[None] :
identifier[includes] =[]
keyword[if] identifier[excludes] keyword[is] keyword[None] :
identifier[excludes] =[]
identifier[includes_pattern] = literal[string] . identifier[join] ([ identifier[fnmatch] . identifier[translate] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[includes] ]) keyword[or] literal[string]
identifier[excludes_pattern] = literal[string] . identifier[join] ([ identifier[fnmatch] . identifier[translate] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[excludes] ]) keyword[or] literal[string]
identifier[includes_re] = identifier[re] . identifier[compile] ( identifier[includes_pattern] )
identifier[excludes_re] = identifier[re] . identifier[compile] ( identifier[excludes_pattern] )
keyword[def] identifier[included] ( identifier[root] , identifier[name] ):
literal[string]
identifier[full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[name] )
keyword[if] identifier[includes_re] . identifier[match] ( identifier[full_path] ):
keyword[return] keyword[True]
keyword[return] ( keyword[not] identifier[specials_re] . identifier[match] ( identifier[name] )
keyword[and] keyword[not] identifier[excludes_re] . identifier[match] ( identifier[full_path] ))
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[path] ):
identifier[dirs] [:]=[ identifier[d] keyword[for] identifier[d] keyword[in] identifier[dirs] keyword[if] identifier[included] ( identifier[root] , identifier[d] )]
identifier[files] =[ identifier[f] keyword[for] identifier[f] keyword[in] identifier[files] keyword[if] identifier[included] ( identifier[root] , identifier[f] )]
keyword[for] identifier[f] keyword[in] identifier[files] :
keyword[yield] identifier[os] . identifier[path] . identifier[relpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[f] ), identifier[path] ) | def iterate_presentation_files(path=None, excludes=None, includes=None):
"""Iterates the repository presentation files relative to 'path',
not including themes. Note that 'includes' take priority."""
# Defaults
if includes is None:
includes = [] # depends on [control=['if'], data=['includes']]
if excludes is None:
excludes = [] # depends on [control=['if'], data=['excludes']]
# Transform glob patterns to regular expressions
includes_pattern = '|'.join([fnmatch.translate(x) for x in includes]) or '$.'
excludes_pattern = '|'.join([fnmatch.translate(x) for x in excludes]) or '$.'
includes_re = re.compile(includes_pattern)
excludes_re = re.compile(excludes_pattern)
def included(root, name):
"""Returns True if the specified file is a presentation file."""
full_path = os.path.join(root, name)
# Explicitly included files takes priority
if includes_re.match(full_path):
return True # depends on [control=['if'], data=[]]
# Ignore special and excluded files
return not specials_re.match(name) and (not excludes_re.match(full_path))
# Get a filtered list of paths to be built
for (root, dirs, files) in os.walk(path):
dirs[:] = [d for d in dirs if included(root, d)]
files = [f for f in files if included(root, f)]
for f in files:
yield os.path.relpath(os.path.join(root, f), path) # depends on [control=['for'], data=['f']] # depends on [control=['for'], data=[]] |
def _get_vcap_services(vcap_services=None):
"""Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If
`vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable.
Args:
vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file.
vcap_services (dict): Return the dict as is.
Returns:
dict: A dict representation of the VCAP Services information.
Raises:
ValueError:
* if `vcap_services` nor VCAP_SERVICES environment variable are specified.
* cannot parse `vcap_services` as a JSON string nor as a filename.
"""
vcap_services = vcap_services or os.environ.get('VCAP_SERVICES')
if not vcap_services:
raise ValueError(
"VCAP_SERVICES information must be supplied as a parameter or as environment variable 'VCAP_SERVICES'")
# If it was passed to config as a dict, simply return it
if isinstance(vcap_services, dict):
return vcap_services
try:
# Otherwise, if it's a string, try to load it as json
vcap_services = json.loads(vcap_services)
except json.JSONDecodeError:
# If that doesn't work, attempt to open it as a file path to the json config.
try:
with open(vcap_services) as vcap_json_data:
vcap_services = json.load(vcap_json_data)
except:
raise ValueError("VCAP_SERVICES information is not JSON or a file containing JSON:", vcap_services)
return vcap_services | def function[_get_vcap_services, parameter[vcap_services]]:
constant[Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If
`vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable.
Args:
vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file.
vcap_services (dict): Return the dict as is.
Returns:
dict: A dict representation of the VCAP Services information.
Raises:
ValueError:
* if `vcap_services` nor VCAP_SERVICES environment variable are specified.
* cannot parse `vcap_services` as a JSON string nor as a filename.
]
variable[vcap_services] assign[=] <ast.BoolOp object at 0x7da20eb2bdf0>
if <ast.UnaryOp object at 0x7da20eb294b0> begin[:]
<ast.Raise object at 0x7da20eb2b250>
if call[name[isinstance], parameter[name[vcap_services], name[dict]]] begin[:]
return[name[vcap_services]]
<ast.Try object at 0x7da207f031c0>
return[name[vcap_services]] | keyword[def] identifier[_get_vcap_services] ( identifier[vcap_services] = keyword[None] ):
literal[string]
identifier[vcap_services] = identifier[vcap_services] keyword[or] identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[vcap_services] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] identifier[isinstance] ( identifier[vcap_services] , identifier[dict] ):
keyword[return] identifier[vcap_services]
keyword[try] :
identifier[vcap_services] = identifier[json] . identifier[loads] ( identifier[vcap_services] )
keyword[except] identifier[json] . identifier[JSONDecodeError] :
keyword[try] :
keyword[with] identifier[open] ( identifier[vcap_services] ) keyword[as] identifier[vcap_json_data] :
identifier[vcap_services] = identifier[json] . identifier[load] ( identifier[vcap_json_data] )
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] , identifier[vcap_services] )
keyword[return] identifier[vcap_services] | def _get_vcap_services(vcap_services=None):
"""Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If
`vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable.
Args:
vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file.
vcap_services (dict): Return the dict as is.
Returns:
dict: A dict representation of the VCAP Services information.
Raises:
ValueError:
* if `vcap_services` nor VCAP_SERVICES environment variable are specified.
* cannot parse `vcap_services` as a JSON string nor as a filename.
"""
vcap_services = vcap_services or os.environ.get('VCAP_SERVICES')
if not vcap_services:
raise ValueError("VCAP_SERVICES information must be supplied as a parameter or as environment variable 'VCAP_SERVICES'") # depends on [control=['if'], data=[]]
# If it was passed to config as a dict, simply return it
if isinstance(vcap_services, dict):
return vcap_services # depends on [control=['if'], data=[]]
try:
# Otherwise, if it's a string, try to load it as json
vcap_services = json.loads(vcap_services) # depends on [control=['try'], data=[]]
except json.JSONDecodeError:
# If that doesn't work, attempt to open it as a file path to the json config.
try:
with open(vcap_services) as vcap_json_data:
vcap_services = json.load(vcap_json_data) # depends on [control=['with'], data=['vcap_json_data']] # depends on [control=['try'], data=[]]
except:
raise ValueError('VCAP_SERVICES information is not JSON or a file containing JSON:', vcap_services) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
return vcap_services |
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
Returns
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object.
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable) | def function[set_value, parameter[self, index, col, value, takeable]]:
constant[
Put single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
Returns
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object.
]
call[name[warnings].warn, parameter[constant[set_value is deprecated and will be removed in a future release. Please use .at[] or .iat[] accessors instead], name[FutureWarning]]]
return[call[name[self]._set_value, parameter[name[index], name[col], name[value]]]] | keyword[def] identifier[set_value] ( identifier[self] , identifier[index] , identifier[col] , identifier[value] , identifier[takeable] = keyword[False] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
literal[string] , identifier[FutureWarning] ,
identifier[stacklevel] = literal[int] )
keyword[return] identifier[self] . identifier[_set_value] ( identifier[index] , identifier[col] , identifier[value] , identifier[takeable] = identifier[takeable] ) | def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
Returns
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object.
"""
warnings.warn('set_value is deprecated and will be removed in a future release. Please use .at[] or .iat[] accessors instead', FutureWarning, stacklevel=2)
return self._set_value(index, col, value, takeable=takeable) |
def stop(self, timeout=15):
"""Stop the subprocess.
Keyword Arguments
**timeout**
Time in seconds to wait for a process and its
children to exit.
"""
pp = self.pid
if pp:
try:
kill_process_nicely(pp, timeout=timeout)
except psutil.NoSuchProcess:
pass | def function[stop, parameter[self, timeout]]:
constant[Stop the subprocess.
Keyword Arguments
**timeout**
Time in seconds to wait for a process and its
children to exit.
]
variable[pp] assign[=] name[self].pid
if name[pp] begin[:]
<ast.Try object at 0x7da20c6c57b0> | keyword[def] identifier[stop] ( identifier[self] , identifier[timeout] = literal[int] ):
literal[string]
identifier[pp] = identifier[self] . identifier[pid]
keyword[if] identifier[pp] :
keyword[try] :
identifier[kill_process_nicely] ( identifier[pp] , identifier[timeout] = identifier[timeout] )
keyword[except] identifier[psutil] . identifier[NoSuchProcess] :
keyword[pass] | def stop(self, timeout=15):
"""Stop the subprocess.
Keyword Arguments
**timeout**
Time in seconds to wait for a process and its
children to exit.
"""
pp = self.pid
if pp:
try:
kill_process_nicely(pp, timeout=timeout) # depends on [control=['try'], data=[]]
except psutil.NoSuchProcess:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] |
async def add_participant(self, display_name: str = None, username: str = None, email: str = None, seed: int = 0, misc: str = None, **params):
""" add a participant to the tournament
|methcoro|
Args:
display_name: The name displayed in the bracket/schedule - not required if email or challonge_username is provided. Must be unique per tournament.
username: Provide this if the participant has a Challonge account. He or she will be invited to the tournament.
email: Providing this will first search for a matching Challonge account. If one is found, this will have the same effect as the "challonge_username" attribute. If one is not found, the "new-user-email" attribute will be set, and the user will be invited via email to create an account.
seed: The participant's new seed. Must be between 1 and the current number of participants (including the new record). Overwriting an existing seed will automatically bump other participants as you would expect.
misc: Max: 255 characters. Multi-purpose field that is only visible via the API and handy for site integration (e.g. key to your users table)
params: optional params (see http://api.challonge.com/v1/documents/participants/create)
Returns:
Participant: newly created participant
Raises:
APIException
"""
assert_or_raise((display_name is None) ^ (username is None),
ValueError,
'One of display_name or username must not be None')
params.update({
'name': display_name or '',
'challonge_username': username or '',
})
if email is not None:
params.update({'email': email})
if seed != 0:
params.update({'seed': seed})
if misc is not None:
params.update({'misc': misc})
res = await self.connection('POST',
'tournaments/{}/participants'.format(self._id),
'participant',
**params)
new_p = self._create_participant(res)
self._add_participant(new_p)
return new_p | <ast.AsyncFunctionDef object at 0x7da18fe93ac0> | keyword[async] keyword[def] identifier[add_participant] ( identifier[self] , identifier[display_name] : identifier[str] = keyword[None] , identifier[username] : identifier[str] = keyword[None] , identifier[email] : identifier[str] = keyword[None] , identifier[seed] : identifier[int] = literal[int] , identifier[misc] : identifier[str] = keyword[None] ,** identifier[params] ):
literal[string]
identifier[assert_or_raise] (( identifier[display_name] keyword[is] keyword[None] )^( identifier[username] keyword[is] keyword[None] ),
identifier[ValueError] ,
literal[string] )
identifier[params] . identifier[update] ({
literal[string] : identifier[display_name] keyword[or] literal[string] ,
literal[string] : identifier[username] keyword[or] literal[string] ,
})
keyword[if] identifier[email] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] ({ literal[string] : identifier[email] })
keyword[if] identifier[seed] != literal[int] :
identifier[params] . identifier[update] ({ literal[string] : identifier[seed] })
keyword[if] identifier[misc] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[update] ({ literal[string] : identifier[misc] })
identifier[res] = keyword[await] identifier[self] . identifier[connection] ( literal[string] ,
literal[string] . identifier[format] ( identifier[self] . identifier[_id] ),
literal[string] ,
** identifier[params] )
identifier[new_p] = identifier[self] . identifier[_create_participant] ( identifier[res] )
identifier[self] . identifier[_add_participant] ( identifier[new_p] )
keyword[return] identifier[new_p] | async def add_participant(self, display_name: str=None, username: str=None, email: str=None, seed: int=0, misc: str=None, **params):
""" add a participant to the tournament
|methcoro|
Args:
display_name: The name displayed in the bracket/schedule - not required if email or challonge_username is provided. Must be unique per tournament.
username: Provide this if the participant has a Challonge account. He or she will be invited to the tournament.
email: Providing this will first search for a matching Challonge account. If one is found, this will have the same effect as the "challonge_username" attribute. If one is not found, the "new-user-email" attribute will be set, and the user will be invited via email to create an account.
seed: The participant's new seed. Must be between 1 and the current number of participants (including the new record). Overwriting an existing seed will automatically bump other participants as you would expect.
misc: Max: 255 characters. Multi-purpose field that is only visible via the API and handy for site integration (e.g. key to your users table)
params: optional params (see http://api.challonge.com/v1/documents/participants/create)
Returns:
Participant: newly created participant
Raises:
APIException
"""
assert_or_raise((display_name is None) ^ (username is None), ValueError, 'One of display_name or username must not be None')
params.update({'name': display_name or '', 'challonge_username': username or ''})
if email is not None:
params.update({'email': email}) # depends on [control=['if'], data=['email']]
if seed != 0:
params.update({'seed': seed}) # depends on [control=['if'], data=['seed']]
if misc is not None:
params.update({'misc': misc}) # depends on [control=['if'], data=['misc']]
res = await self.connection('POST', 'tournaments/{}/participants'.format(self._id), 'participant', **params)
new_p = self._create_participant(res)
self._add_participant(new_p)
return new_p |
def spearmanr(x, y):
"""
Michiel de Hoon's library (available in BioPython or standalone as
PyCluster) returns Spearman rsb which does include a tie correction.
>>> x = [5.05, 6.75, 3.21, 2.66]
>>> y = [1.65, 26.5, -5.93, 7.96]
>>> z = [1.65, 2.64, 2.64, 6.95]
>>> round(spearmanr(x, y), 4)
0.4
>>> round(spearmanr(x, z), 4)
-0.6325
"""
from scipy import stats
if not x or not y:
return 0
corr, pvalue = stats.spearmanr(x, y)
return corr | def function[spearmanr, parameter[x, y]]:
constant[
Michiel de Hoon's library (available in BioPython or standalone as
PyCluster) returns Spearman rsb which does include a tie correction.
>>> x = [5.05, 6.75, 3.21, 2.66]
>>> y = [1.65, 26.5, -5.93, 7.96]
>>> z = [1.65, 2.64, 2.64, 6.95]
>>> round(spearmanr(x, y), 4)
0.4
>>> round(spearmanr(x, z), 4)
-0.6325
]
from relative_module[scipy] import module[stats]
if <ast.BoolOp object at 0x7da20c76da50> begin[:]
return[constant[0]]
<ast.Tuple object at 0x7da20c76f6a0> assign[=] call[name[stats].spearmanr, parameter[name[x], name[y]]]
return[name[corr]] | keyword[def] identifier[spearmanr] ( identifier[x] , identifier[y] ):
literal[string]
keyword[from] identifier[scipy] keyword[import] identifier[stats]
keyword[if] keyword[not] identifier[x] keyword[or] keyword[not] identifier[y] :
keyword[return] literal[int]
identifier[corr] , identifier[pvalue] = identifier[stats] . identifier[spearmanr] ( identifier[x] , identifier[y] )
keyword[return] identifier[corr] | def spearmanr(x, y):
"""
Michiel de Hoon's library (available in BioPython or standalone as
PyCluster) returns Spearman rsb which does include a tie correction.
>>> x = [5.05, 6.75, 3.21, 2.66]
>>> y = [1.65, 26.5, -5.93, 7.96]
>>> z = [1.65, 2.64, 2.64, 6.95]
>>> round(spearmanr(x, y), 4)
0.4
>>> round(spearmanr(x, z), 4)
-0.6325
"""
from scipy import stats
if not x or not y:
return 0 # depends on [control=['if'], data=[]]
(corr, pvalue) = stats.spearmanr(x, y)
return corr |
def createSegments(self, cells):
"""
Create a segment on each of the specified cells.
@param cells (numpy array)
"""
segments = None
for connections in self.connectionsBySource.itervalues():
created = connections.createSegments(cells)
if segments is None:
segments = created
else:
# Sanity-check that the segment numbers are the same.
np.testing.assert_equal(segments, created)
return segments | def function[createSegments, parameter[self, cells]]:
constant[
Create a segment on each of the specified cells.
@param cells (numpy array)
]
variable[segments] assign[=] constant[None]
for taget[name[connections]] in starred[call[name[self].connectionsBySource.itervalues, parameter[]]] begin[:]
variable[created] assign[=] call[name[connections].createSegments, parameter[name[cells]]]
if compare[name[segments] is constant[None]] begin[:]
variable[segments] assign[=] name[created]
return[name[segments]] | keyword[def] identifier[createSegments] ( identifier[self] , identifier[cells] ):
literal[string]
identifier[segments] = keyword[None]
keyword[for] identifier[connections] keyword[in] identifier[self] . identifier[connectionsBySource] . identifier[itervalues] ():
identifier[created] = identifier[connections] . identifier[createSegments] ( identifier[cells] )
keyword[if] identifier[segments] keyword[is] keyword[None] :
identifier[segments] = identifier[created]
keyword[else] :
identifier[np] . identifier[testing] . identifier[assert_equal] ( identifier[segments] , identifier[created] )
keyword[return] identifier[segments] | def createSegments(self, cells):
"""
Create a segment on each of the specified cells.
@param cells (numpy array)
"""
segments = None
for connections in self.connectionsBySource.itervalues():
created = connections.createSegments(cells)
if segments is None:
segments = created # depends on [control=['if'], data=['segments']]
else:
# Sanity-check that the segment numbers are the same.
np.testing.assert_equal(segments, created) # depends on [control=['for'], data=['connections']]
return segments |
def deep_map(f, root):
"""Sibling to |inverse_deep_map|. As :py:func:`map` maps over an iterable,
|deep_map| maps over a structure of nested ``dict``s and ``list``s. Every
object is passed through `f` recursively. That is, first `root` is mapped,
next any object contained in its result, and so on.
No distinction is made between tuples and lists. This function was
created with encoding to JSON compatible data in mind.
.. |deep_map| replace:: :py:func:`deep_map`"""
result = f(root)
if isinstance(result, dict):
return {k: deep_map(f, v) for k, v in result.items()}
if isinstance(result, list) or isinstance(result, tuple):
return [deep_map(f, v) for v in result]
return result | def function[deep_map, parameter[f, root]]:
constant[Sibling to |inverse_deep_map|. As :py:func:`map` maps over an iterable,
|deep_map| maps over a structure of nested ``dict``s and ``list``s. Every
object is passed through `f` recursively. That is, first `root` is mapped,
next any object contained in its result, and so on.
No distinction is made between tuples and lists. This function was
created with encoding to JSON compatible data in mind.
.. |deep_map| replace:: :py:func:`deep_map`]
variable[result] assign[=] call[name[f], parameter[name[root]]]
if call[name[isinstance], parameter[name[result], name[dict]]] begin[:]
return[<ast.DictComp object at 0x7da18f09dd50>]
if <ast.BoolOp object at 0x7da18f09cd00> begin[:]
return[<ast.ListComp object at 0x7da18f09fb50>]
return[name[result]] | keyword[def] identifier[deep_map] ( identifier[f] , identifier[root] ):
literal[string]
identifier[result] = identifier[f] ( identifier[root] )
keyword[if] identifier[isinstance] ( identifier[result] , identifier[dict] ):
keyword[return] { identifier[k] : identifier[deep_map] ( identifier[f] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[result] . identifier[items] ()}
keyword[if] identifier[isinstance] ( identifier[result] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[result] , identifier[tuple] ):
keyword[return] [ identifier[deep_map] ( identifier[f] , identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[result] ]
keyword[return] identifier[result] | def deep_map(f, root):
"""Sibling to |inverse_deep_map|. As :py:func:`map` maps over an iterable,
|deep_map| maps over a structure of nested ``dict``s and ``list``s. Every
object is passed through `f` recursively. That is, first `root` is mapped,
next any object contained in its result, and so on.
No distinction is made between tuples and lists. This function was
created with encoding to JSON compatible data in mind.
.. |deep_map| replace:: :py:func:`deep_map`"""
result = f(root)
if isinstance(result, dict):
return {k: deep_map(f, v) for (k, v) in result.items()} # depends on [control=['if'], data=[]]
if isinstance(result, list) or isinstance(result, tuple):
return [deep_map(f, v) for v in result] # depends on [control=['if'], data=[]]
return result |
def build_dir():
'''
Build the directory used for templates.
'''
tag_arr = ['add', 'edit', 'view', 'list', 'infolist']
path_arr = [os.path.join(CRUD_PATH, x) for x in tag_arr]
for wpath in path_arr:
if os.path.exists(wpath):
continue
os.makedirs(wpath) | def function[build_dir, parameter[]]:
constant[
Build the directory used for templates.
]
variable[tag_arr] assign[=] list[[<ast.Constant object at 0x7da204564ca0>, <ast.Constant object at 0x7da2045669b0>, <ast.Constant object at 0x7da1b0464040>, <ast.Constant object at 0x7da1b04663b0>, <ast.Constant object at 0x7da1b0464910>]]
variable[path_arr] assign[=] <ast.ListComp object at 0x7da1b0467820>
for taget[name[wpath]] in starred[name[path_arr]] begin[:]
if call[name[os].path.exists, parameter[name[wpath]]] begin[:]
continue
call[name[os].makedirs, parameter[name[wpath]]] | keyword[def] identifier[build_dir] ():
literal[string]
identifier[tag_arr] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[path_arr] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[CRUD_PATH] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[tag_arr] ]
keyword[for] identifier[wpath] keyword[in] identifier[path_arr] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[wpath] ):
keyword[continue]
identifier[os] . identifier[makedirs] ( identifier[wpath] ) | def build_dir():
"""
Build the directory used for templates.
"""
tag_arr = ['add', 'edit', 'view', 'list', 'infolist']
path_arr = [os.path.join(CRUD_PATH, x) for x in tag_arr]
for wpath in path_arr:
if os.path.exists(wpath):
continue # depends on [control=['if'], data=[]]
os.makedirs(wpath) # depends on [control=['for'], data=['wpath']] |
def close_db(self, exception):
"""Added as a `~flask.Flask.teardown_request` to applications to
commit the transaction and disconnect ZODB if it was used during
the request."""
if self.is_connected:
if exception is None and not transaction.isDoomed():
transaction.commit()
else:
transaction.abort()
self.connection.close() | def function[close_db, parameter[self, exception]]:
constant[Added as a `~flask.Flask.teardown_request` to applications to
commit the transaction and disconnect ZODB if it was used during
the request.]
if name[self].is_connected begin[:]
if <ast.BoolOp object at 0x7da1b268dfc0> begin[:]
call[name[transaction].commit, parameter[]]
call[name[self].connection.close, parameter[]] | keyword[def] identifier[close_db] ( identifier[self] , identifier[exception] ):
literal[string]
keyword[if] identifier[self] . identifier[is_connected] :
keyword[if] identifier[exception] keyword[is] keyword[None] keyword[and] keyword[not] identifier[transaction] . identifier[isDoomed] ():
identifier[transaction] . identifier[commit] ()
keyword[else] :
identifier[transaction] . identifier[abort] ()
identifier[self] . identifier[connection] . identifier[close] () | def close_db(self, exception):
"""Added as a `~flask.Flask.teardown_request` to applications to
commit the transaction and disconnect ZODB if it was used during
the request."""
if self.is_connected:
if exception is None and (not transaction.isDoomed()):
transaction.commit() # depends on [control=['if'], data=[]]
else:
transaction.abort()
self.connection.close() # depends on [control=['if'], data=[]] |
def parse_url_path(self, url_path: str) -> str:
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path | def function[parse_url_path, parameter[self, url_path]]:
constant[Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
]
if compare[name[os].path.sep not_equal[!=] constant[/]] begin[:]
variable[url_path] assign[=] call[name[url_path].replace, parameter[constant[/], name[os].path.sep]]
return[name[url_path]] | keyword[def] identifier[parse_url_path] ( identifier[self] , identifier[url_path] : identifier[str] )-> identifier[str] :
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[sep] != literal[string] :
identifier[url_path] = identifier[url_path] . identifier[replace] ( literal[string] , identifier[os] . identifier[path] . identifier[sep] )
keyword[return] identifier[url_path] | def parse_url_path(self, url_path: str) -> str:
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != '/':
url_path = url_path.replace('/', os.path.sep) # depends on [control=['if'], data=[]]
return url_path |
def writeToken(self):
"""
Store details of the current connection in the named file.
This can be used by :meth:`readToken` to re-authenticate at a later time.
"""
# Write token file privately.
with os.fdopen(os.open(self.tokenFile, os.O_WRONLY | os.O_CREAT, 0o600), "w") as f:
# When opening files via os, truncation must be done manually.
f.truncate()
f.write(self.userId + "\n")
f.write(self.tokens["skype"] + "\n")
f.write(str(int(time.mktime(self.tokenExpiry["skype"].timetuple()))) + "\n")
f.write(self.tokens["reg"] + "\n")
f.write(str(int(time.mktime(self.tokenExpiry["reg"].timetuple()))) + "\n")
f.write(self.msgsHost + "\n") | def function[writeToken, parameter[self]]:
constant[
Store details of the current connection in the named file.
This can be used by :meth:`readToken` to re-authenticate at a later time.
]
with call[name[os].fdopen, parameter[call[name[os].open, parameter[name[self].tokenFile, binary_operation[name[os].O_WRONLY <ast.BitOr object at 0x7da2590d6aa0> name[os].O_CREAT], constant[384]]], constant[w]]] begin[:]
call[name[f].truncate, parameter[]]
call[name[f].write, parameter[binary_operation[name[self].userId + constant[
]]]]
call[name[f].write, parameter[binary_operation[call[name[self].tokens][constant[skype]] + constant[
]]]]
call[name[f].write, parameter[binary_operation[call[name[str], parameter[call[name[int], parameter[call[name[time].mktime, parameter[call[call[name[self].tokenExpiry][constant[skype]].timetuple, parameter[]]]]]]]] + constant[
]]]]
call[name[f].write, parameter[binary_operation[call[name[self].tokens][constant[reg]] + constant[
]]]]
call[name[f].write, parameter[binary_operation[call[name[str], parameter[call[name[int], parameter[call[name[time].mktime, parameter[call[call[name[self].tokenExpiry][constant[reg]].timetuple, parameter[]]]]]]]] + constant[
]]]]
call[name[f].write, parameter[binary_operation[name[self].msgsHost + constant[
]]]] | keyword[def] identifier[writeToken] ( identifier[self] ):
literal[string]
keyword[with] identifier[os] . identifier[fdopen] ( identifier[os] . identifier[open] ( identifier[self] . identifier[tokenFile] , identifier[os] . identifier[O_WRONLY] | identifier[os] . identifier[O_CREAT] , literal[int] ), literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[truncate] ()
identifier[f] . identifier[write] ( identifier[self] . identifier[userId] + literal[string] )
identifier[f] . identifier[write] ( identifier[self] . identifier[tokens] [ literal[string] ]+ literal[string] )
identifier[f] . identifier[write] ( identifier[str] ( identifier[int] ( identifier[time] . identifier[mktime] ( identifier[self] . identifier[tokenExpiry] [ literal[string] ]. identifier[timetuple] ())))+ literal[string] )
identifier[f] . identifier[write] ( identifier[self] . identifier[tokens] [ literal[string] ]+ literal[string] )
identifier[f] . identifier[write] ( identifier[str] ( identifier[int] ( identifier[time] . identifier[mktime] ( identifier[self] . identifier[tokenExpiry] [ literal[string] ]. identifier[timetuple] ())))+ literal[string] )
identifier[f] . identifier[write] ( identifier[self] . identifier[msgsHost] + literal[string] ) | def writeToken(self):
"""
Store details of the current connection in the named file.
This can be used by :meth:`readToken` to re-authenticate at a later time.
"""
# Write token file privately.
with os.fdopen(os.open(self.tokenFile, os.O_WRONLY | os.O_CREAT, 384), 'w') as f:
# When opening files via os, truncation must be done manually.
f.truncate()
f.write(self.userId + '\n')
f.write(self.tokens['skype'] + '\n')
f.write(str(int(time.mktime(self.tokenExpiry['skype'].timetuple()))) + '\n')
f.write(self.tokens['reg'] + '\n')
f.write(str(int(time.mktime(self.tokenExpiry['reg'].timetuple()))) + '\n')
f.write(self.msgsHost + '\n') # depends on [control=['with'], data=['f']] |
def _Backward3_T_Ps(P, s):
"""Backward equation for region 3, T=f(P,s)
Parameters
----------
P : float
Pressure, [MPa]
s : float
Specific entropy, [kJ/kgK]
Returns
-------
T : float
Temperature, [K]
"""
sc = 4.41202148223476
if s <= sc:
T = _Backward3a_T_Ps(P, s)
else:
T = _Backward3b_T_Ps(P, s)
return T | def function[_Backward3_T_Ps, parameter[P, s]]:
constant[Backward equation for region 3, T=f(P,s)
Parameters
----------
P : float
Pressure, [MPa]
s : float
Specific entropy, [kJ/kgK]
Returns
-------
T : float
Temperature, [K]
]
variable[sc] assign[=] constant[4.41202148223476]
if compare[name[s] less_or_equal[<=] name[sc]] begin[:]
variable[T] assign[=] call[name[_Backward3a_T_Ps], parameter[name[P], name[s]]]
return[name[T]] | keyword[def] identifier[_Backward3_T_Ps] ( identifier[P] , identifier[s] ):
literal[string]
identifier[sc] = literal[int]
keyword[if] identifier[s] <= identifier[sc] :
identifier[T] = identifier[_Backward3a_T_Ps] ( identifier[P] , identifier[s] )
keyword[else] :
identifier[T] = identifier[_Backward3b_T_Ps] ( identifier[P] , identifier[s] )
keyword[return] identifier[T] | def _Backward3_T_Ps(P, s):
"""Backward equation for region 3, T=f(P,s)
Parameters
----------
P : float
Pressure, [MPa]
s : float
Specific entropy, [kJ/kgK]
Returns
-------
T : float
Temperature, [K]
"""
sc = 4.41202148223476
if s <= sc:
T = _Backward3a_T_Ps(P, s) # depends on [control=['if'], data=['s']]
else:
T = _Backward3b_T_Ps(P, s)
return T |
def from_payload(self, payload):
"""Init frame from binary data."""
self.status = SetNodeNameConfirmationStatus(payload[0])
self.node_id = payload[1] | def function[from_payload, parameter[self, payload]]:
constant[Init frame from binary data.]
name[self].status assign[=] call[name[SetNodeNameConfirmationStatus], parameter[call[name[payload]][constant[0]]]]
name[self].node_id assign[=] call[name[payload]][constant[1]] | keyword[def] identifier[from_payload] ( identifier[self] , identifier[payload] ):
literal[string]
identifier[self] . identifier[status] = identifier[SetNodeNameConfirmationStatus] ( identifier[payload] [ literal[int] ])
identifier[self] . identifier[node_id] = identifier[payload] [ literal[int] ] | def from_payload(self, payload):
"""Init frame from binary data."""
self.status = SetNodeNameConfirmationStatus(payload[0])
self.node_id = payload[1] |
def download_manylinux_wheels(self, abi, packages, directory):
# type: (str, List[str], str) -> None
"""Download wheel files for manylinux for all the given packages."""
# If any one of these dependencies fails pip will bail out. Since we
# are only interested in all the ones we can download, we need to feed
# each package to pip individually. The return code of pip doesn't
# matter here since we will inspect the working directory to see which
# wheels were downloaded. We are only interested in wheel files
# compatible with lambda, which means manylinux1_x86_64 platform and
# cpython implementation. The compatible abi depends on the python
# version and is checked later.
for package in packages:
arguments = ['--only-binary=:all:', '--no-deps', '--platform',
'manylinux1_x86_64', '--implementation', 'cp',
'--abi', abi, '--dest', directory, package]
self._execute('download', arguments) | def function[download_manylinux_wheels, parameter[self, abi, packages, directory]]:
constant[Download wheel files for manylinux for all the given packages.]
for taget[name[package]] in starred[name[packages]] begin[:]
variable[arguments] assign[=] list[[<ast.Constant object at 0x7da1b1fc8970>, <ast.Constant object at 0x7da1b1fcaa70>, <ast.Constant object at 0x7da1b1fc97e0>, <ast.Constant object at 0x7da1b1fcace0>, <ast.Constant object at 0x7da1b1fc9090>, <ast.Constant object at 0x7da1b1fc9900>, <ast.Constant object at 0x7da1b1fca260>, <ast.Name object at 0x7da1b1fc9ed0>, <ast.Constant object at 0x7da1b1fcae60>, <ast.Name object at 0x7da1b1fcb160>, <ast.Name object at 0x7da1b1fca7d0>]]
call[name[self]._execute, parameter[constant[download], name[arguments]]] | keyword[def] identifier[download_manylinux_wheels] ( identifier[self] , identifier[abi] , identifier[packages] , identifier[directory] ):
literal[string]
keyword[for] identifier[package] keyword[in] identifier[packages] :
identifier[arguments] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , identifier[abi] , literal[string] , identifier[directory] , identifier[package] ]
identifier[self] . identifier[_execute] ( literal[string] , identifier[arguments] ) | def download_manylinux_wheels(self, abi, packages, directory):
# type: (str, List[str], str) -> None
'Download wheel files for manylinux for all the given packages.'
# If any one of these dependencies fails pip will bail out. Since we
# are only interested in all the ones we can download, we need to feed
# each package to pip individually. The return code of pip doesn't
# matter here since we will inspect the working directory to see which
# wheels were downloaded. We are only interested in wheel files
# compatible with lambda, which means manylinux1_x86_64 platform and
# cpython implementation. The compatible abi depends on the python
# version and is checked later.
for package in packages:
arguments = ['--only-binary=:all:', '--no-deps', '--platform', 'manylinux1_x86_64', '--implementation', 'cp', '--abi', abi, '--dest', directory, package]
self._execute('download', arguments) # depends on [control=['for'], data=['package']] |
async def deleteStickerFromSet(self, sticker):
"""
See: https://core.telegram.org/bots/api#deletestickerfromset
"""
p = _strip(locals())
return await self._api_request('deleteStickerFromSet', _rectify(p)) | <ast.AsyncFunctionDef object at 0x7da1b1a55fc0> | keyword[async] keyword[def] identifier[deleteStickerFromSet] ( identifier[self] , identifier[sticker] ):
literal[string]
identifier[p] = identifier[_strip] ( identifier[locals] ())
keyword[return] keyword[await] identifier[self] . identifier[_api_request] ( literal[string] , identifier[_rectify] ( identifier[p] )) | async def deleteStickerFromSet(self, sticker):
"""
See: https://core.telegram.org/bots/api#deletestickerfromset
"""
p = _strip(locals())
return await self._api_request('deleteStickerFromSet', _rectify(p)) |
def derive(self):
'''Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))'''
#res = [0] * (len(self)-1) # pre-allocate the list, it will be one item shorter because the constant coefficient (x^0) will be removed
#for i in _range(2, len(self)+1): # start at 2 to skip the first coeff which is useless since it's a constant (x^0) so we +1, and because we work in reverse (lower coefficients are on the right) so +1 again
#res[-(i-1)] = (i-1) * self[-i] # self[-i] == coeff[i] and i-1 is the x exponent (eg: x^1, x^2, x^3, etc.)
#return Polynomial(res)
# One liner way to do it (also a bit faster too)
#return Polynomial( [(i-1) * self[-i] for i in _range(2, len(self)+1)][::-1] )
# Another faster version
L = len(self)-1
return Polynomial( [(L-i) * self[i] for i in _range(0, len(self)-1)] ) | def function[derive, parameter[self]]:
constant[Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))]
variable[L] assign[=] binary_operation[call[name[len], parameter[name[self]]] - constant[1]]
return[call[name[Polynomial], parameter[<ast.ListComp object at 0x7da18f00fca0>]]] | keyword[def] identifier[derive] ( identifier[self] ):
literal[string]
identifier[L] = identifier[len] ( identifier[self] )- literal[int]
keyword[return] identifier[Polynomial] ([( identifier[L] - identifier[i] )* identifier[self] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[_range] ( literal[int] , identifier[len] ( identifier[self] )- literal[int] )]) | def derive(self):
"""Compute the formal derivative of the polynomial: sum(i*coeff[i] x^(i-1))"""
#res = [0] * (len(self)-1) # pre-allocate the list, it will be one item shorter because the constant coefficient (x^0) will be removed
#for i in _range(2, len(self)+1): # start at 2 to skip the first coeff which is useless since it's a constant (x^0) so we +1, and because we work in reverse (lower coefficients are on the right) so +1 again
#res[-(i-1)] = (i-1) * self[-i] # self[-i] == coeff[i] and i-1 is the x exponent (eg: x^1, x^2, x^3, etc.)
#return Polynomial(res)
# One liner way to do it (also a bit faster too)
#return Polynomial( [(i-1) * self[-i] for i in _range(2, len(self)+1)][::-1] )
# Another faster version
L = len(self) - 1
return Polynomial([(L - i) * self[i] for i in _range(0, len(self) - 1)]) |
def iteritems(cls):
"""
Inspects attributes of the class for instances of the class
and returns as key,value pairs mirroring dict#iteritems
"""
for key in dir(cls):
val = getattr(cls, key)
if isinstance(cls, val):
yield (key, val) | def function[iteritems, parameter[cls]]:
constant[
Inspects attributes of the class for instances of the class
and returns as key,value pairs mirroring dict#iteritems
]
for taget[name[key]] in starred[call[name[dir], parameter[name[cls]]]] begin[:]
variable[val] assign[=] call[name[getattr], parameter[name[cls], name[key]]]
if call[name[isinstance], parameter[name[cls], name[val]]] begin[:]
<ast.Yield object at 0x7da1b18a09a0> | keyword[def] identifier[iteritems] ( identifier[cls] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[dir] ( identifier[cls] ):
identifier[val] = identifier[getattr] ( identifier[cls] , identifier[key] )
keyword[if] identifier[isinstance] ( identifier[cls] , identifier[val] ):
keyword[yield] ( identifier[key] , identifier[val] ) | def iteritems(cls):
"""
Inspects attributes of the class for instances of the class
and returns as key,value pairs mirroring dict#iteritems
"""
for key in dir(cls):
val = getattr(cls, key)
if isinstance(cls, val):
yield (key, val) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] |
def document(self, name, file_name, owner=None, **kwargs):
"""
Create the Document TI object.
Args:
owner:
name:
file_name:
**kwargs:
Return:
"""
return Document(self.tcex, name, file_name, owner=owner, **kwargs) | def function[document, parameter[self, name, file_name, owner]]:
constant[
Create the Document TI object.
Args:
owner:
name:
file_name:
**kwargs:
Return:
]
return[call[name[Document], parameter[name[self].tcex, name[name], name[file_name]]]] | keyword[def] identifier[document] ( identifier[self] , identifier[name] , identifier[file_name] , identifier[owner] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[Document] ( identifier[self] . identifier[tcex] , identifier[name] , identifier[file_name] , identifier[owner] = identifier[owner] ,** identifier[kwargs] ) | def document(self, name, file_name, owner=None, **kwargs):
"""
Create the Document TI object.
Args:
owner:
name:
file_name:
**kwargs:
Return:
"""
return Document(self.tcex, name, file_name, owner=owner, **kwargs) |
def tag(self, tag):
"""Get a release by tag
"""
url = '%s/tags/%s' % (self, tag)
response = self.http.get(url, auth=self.auth)
response.raise_for_status()
return response.json() | def function[tag, parameter[self, tag]]:
constant[Get a release by tag
]
variable[url] assign[=] binary_operation[constant[%s/tags/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26ae290>, <ast.Name object at 0x7da1b26ad240>]]]
variable[response] assign[=] call[name[self].http.get, parameter[name[url]]]
call[name[response].raise_for_status, parameter[]]
return[call[name[response].json, parameter[]]] | keyword[def] identifier[tag] ( identifier[self] , identifier[tag] ):
literal[string]
identifier[url] = literal[string] %( identifier[self] , identifier[tag] )
identifier[response] = identifier[self] . identifier[http] . identifier[get] ( identifier[url] , identifier[auth] = identifier[self] . identifier[auth] )
identifier[response] . identifier[raise_for_status] ()
keyword[return] identifier[response] . identifier[json] () | def tag(self, tag):
"""Get a release by tag
"""
url = '%s/tags/%s' % (self, tag)
response = self.http.get(url, auth=self.auth)
response.raise_for_status()
return response.json() |
def __update_keywords(uid, inkeywords):
'''
Update with keywords.
'''
entry = TabPost.update(keywords=inkeywords).where(TabPost.uid == uid)
entry.execute() | def function[__update_keywords, parameter[uid, inkeywords]]:
constant[
Update with keywords.
]
variable[entry] assign[=] call[call[name[TabPost].update, parameter[]].where, parameter[compare[name[TabPost].uid equal[==] name[uid]]]]
call[name[entry].execute, parameter[]] | keyword[def] identifier[__update_keywords] ( identifier[uid] , identifier[inkeywords] ):
literal[string]
identifier[entry] = identifier[TabPost] . identifier[update] ( identifier[keywords] = identifier[inkeywords] ). identifier[where] ( identifier[TabPost] . identifier[uid] == identifier[uid] )
identifier[entry] . identifier[execute] () | def __update_keywords(uid, inkeywords):
"""
Update with keywords.
"""
entry = TabPost.update(keywords=inkeywords).where(TabPost.uid == uid)
entry.execute() |
def add_dict_to_hash(a_hash, a_dict):
"""Adds `a_dict` to `a_hash`
Args:
a_hash (`Hash`): the secure hash, e.g created by hashlib.md5
a_dict (dict[string, [string]]): the dictionary to add to the hash
"""
if a_dict is None:
return
for k, v in a_dict.items():
a_hash.update(b'\x00' + k.encode('utf-8') + b'\x00' + v.encode('utf-8')) | def function[add_dict_to_hash, parameter[a_hash, a_dict]]:
constant[Adds `a_dict` to `a_hash`
Args:
a_hash (`Hash`): the secure hash, e.g created by hashlib.md5
a_dict (dict[string, [string]]): the dictionary to add to the hash
]
if compare[name[a_dict] is constant[None]] begin[:]
return[None]
for taget[tuple[[<ast.Name object at 0x7da1b04304c0>, <ast.Name object at 0x7da1b0431330>]]] in starred[call[name[a_dict].items, parameter[]]] begin[:]
call[name[a_hash].update, parameter[binary_operation[binary_operation[binary_operation[constant[b'\x00'] + call[name[k].encode, parameter[constant[utf-8]]]] + constant[b'\x00']] + call[name[v].encode, parameter[constant[utf-8]]]]]] | keyword[def] identifier[add_dict_to_hash] ( identifier[a_hash] , identifier[a_dict] ):
literal[string]
keyword[if] identifier[a_dict] keyword[is] keyword[None] :
keyword[return]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[a_dict] . identifier[items] ():
identifier[a_hash] . identifier[update] ( literal[string] + identifier[k] . identifier[encode] ( literal[string] )+ literal[string] + identifier[v] . identifier[encode] ( literal[string] )) | def add_dict_to_hash(a_hash, a_dict):
"""Adds `a_dict` to `a_hash`
Args:
a_hash (`Hash`): the secure hash, e.g created by hashlib.md5
a_dict (dict[string, [string]]): the dictionary to add to the hash
"""
if a_dict is None:
return # depends on [control=['if'], data=[]]
for (k, v) in a_dict.items():
a_hash.update(b'\x00' + k.encode('utf-8') + b'\x00' + v.encode('utf-8')) # depends on [control=['for'], data=[]] |
def get_pastml_marginal_prob_file(method, model, column):
"""
Get the filename where the PastML marginal probabilities of node states are saved (will be None for non-marginal methods).
This file is inside the work_dir that can be specified for the pastml_pipeline method.
:param method: str, the ancestral state prediction method used by PASTML.
:param model: str, the state evolution model used by PASTML.
:param column: str, the column for which ancestral states are reconstructed with PASTML.
:return: str, filename or None if the method is not marginal.
"""
if not is_marginal(method):
return None
column, method = get_column_method(column, method)
return PASTML_MARGINAL_PROBS_TAB.format(state=column, model=model) | def function[get_pastml_marginal_prob_file, parameter[method, model, column]]:
constant[
Get the filename where the PastML marginal probabilities of node states are saved (will be None for non-marginal methods).
This file is inside the work_dir that can be specified for the pastml_pipeline method.
:param method: str, the ancestral state prediction method used by PASTML.
:param model: str, the state evolution model used by PASTML.
:param column: str, the column for which ancestral states are reconstructed with PASTML.
:return: str, filename or None if the method is not marginal.
]
if <ast.UnaryOp object at 0x7da18fe92500> begin[:]
return[constant[None]]
<ast.Tuple object at 0x7da18fe93550> assign[=] call[name[get_column_method], parameter[name[column], name[method]]]
return[call[name[PASTML_MARGINAL_PROBS_TAB].format, parameter[]]] | keyword[def] identifier[get_pastml_marginal_prob_file] ( identifier[method] , identifier[model] , identifier[column] ):
literal[string]
keyword[if] keyword[not] identifier[is_marginal] ( identifier[method] ):
keyword[return] keyword[None]
identifier[column] , identifier[method] = identifier[get_column_method] ( identifier[column] , identifier[method] )
keyword[return] identifier[PASTML_MARGINAL_PROBS_TAB] . identifier[format] ( identifier[state] = identifier[column] , identifier[model] = identifier[model] ) | def get_pastml_marginal_prob_file(method, model, column):
"""
Get the filename where the PastML marginal probabilities of node states are saved (will be None for non-marginal methods).
This file is inside the work_dir that can be specified for the pastml_pipeline method.
:param method: str, the ancestral state prediction method used by PASTML.
:param model: str, the state evolution model used by PASTML.
:param column: str, the column for which ancestral states are reconstructed with PASTML.
:return: str, filename or None if the method is not marginal.
"""
if not is_marginal(method):
return None # depends on [control=['if'], data=[]]
(column, method) = get_column_method(column, method)
return PASTML_MARGINAL_PROBS_TAB.format(state=column, model=model) |
def DataIsInteger(self):
"""Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise.
"""
return self.data_type in (
definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN,
definitions.REG_QWORD) | def function[DataIsInteger, parameter[self]]:
constant[Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise.
]
return[compare[name[self].data_type in tuple[[<ast.Attribute object at 0x7da20c6ab940>, <ast.Attribute object at 0x7da20c6aa2f0>, <ast.Attribute object at 0x7da20c6aa560>]]]] | keyword[def] identifier[DataIsInteger] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[data_type] keyword[in] (
identifier[definitions] . identifier[REG_DWORD] , identifier[definitions] . identifier[REG_DWORD_BIG_ENDIAN] ,
identifier[definitions] . identifier[REG_QWORD] ) | def DataIsInteger(self):
"""Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise.
"""
return self.data_type in (definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN, definitions.REG_QWORD) |
def wavefunction(self, quil_program: Program,
memory_map: Any = None) -> Wavefunction:
"""
Simulate a Quil program and return the wavefunction.
.. note:: If your program contains measurements or noisy gates, this method may not do what
you want. If the execution of ``quil_program`` is **non-deterministic** then the
final wavefunction only represents a stochastically generated sample and the
wavefunctions returned by *different* ``wavefunction`` calls *will generally be
different*.
:param quil_program: A Quil program.
:param memory_map: An assignment of classical registers to values, representing an initial
state for the QAM's classical memory.
This is expected to be of type Dict[str, List[Union[int, float]]],
where the keys are memory region names and the values are arrays of
initialization data.
For now, we also support input of type Dict[MemoryReference, Any],
but this is deprecated and will be removed in a future release.
:return: A Wavefunction object representing the state of the QVM.
"""
if memory_map is not None:
quil_program = self.augment_program_with_memory_values(quil_program, memory_map)
return self.connection._wavefunction(quil_program=quil_program,
random_seed=self.random_seed) | def function[wavefunction, parameter[self, quil_program, memory_map]]:
constant[
Simulate a Quil program and return the wavefunction.
.. note:: If your program contains measurements or noisy gates, this method may not do what
you want. If the execution of ``quil_program`` is **non-deterministic** then the
final wavefunction only represents a stochastically generated sample and the
wavefunctions returned by *different* ``wavefunction`` calls *will generally be
different*.
:param quil_program: A Quil program.
:param memory_map: An assignment of classical registers to values, representing an initial
state for the QAM's classical memory.
This is expected to be of type Dict[str, List[Union[int, float]]],
where the keys are memory region names and the values are arrays of
initialization data.
For now, we also support input of type Dict[MemoryReference, Any],
but this is deprecated and will be removed in a future release.
:return: A Wavefunction object representing the state of the QVM.
]
if compare[name[memory_map] is_not constant[None]] begin[:]
variable[quil_program] assign[=] call[name[self].augment_program_with_memory_values, parameter[name[quil_program], name[memory_map]]]
return[call[name[self].connection._wavefunction, parameter[]]] | keyword[def] identifier[wavefunction] ( identifier[self] , identifier[quil_program] : identifier[Program] ,
identifier[memory_map] : identifier[Any] = keyword[None] )-> identifier[Wavefunction] :
literal[string]
keyword[if] identifier[memory_map] keyword[is] keyword[not] keyword[None] :
identifier[quil_program] = identifier[self] . identifier[augment_program_with_memory_values] ( identifier[quil_program] , identifier[memory_map] )
keyword[return] identifier[self] . identifier[connection] . identifier[_wavefunction] ( identifier[quil_program] = identifier[quil_program] ,
identifier[random_seed] = identifier[self] . identifier[random_seed] ) | def wavefunction(self, quil_program: Program, memory_map: Any=None) -> Wavefunction:
"""
Simulate a Quil program and return the wavefunction.
.. note:: If your program contains measurements or noisy gates, this method may not do what
you want. If the execution of ``quil_program`` is **non-deterministic** then the
final wavefunction only represents a stochastically generated sample and the
wavefunctions returned by *different* ``wavefunction`` calls *will generally be
different*.
:param quil_program: A Quil program.
:param memory_map: An assignment of classical registers to values, representing an initial
state for the QAM's classical memory.
This is expected to be of type Dict[str, List[Union[int, float]]],
where the keys are memory region names and the values are arrays of
initialization data.
For now, we also support input of type Dict[MemoryReference, Any],
but this is deprecated and will be removed in a future release.
:return: A Wavefunction object representing the state of the QVM.
"""
if memory_map is not None:
quil_program = self.augment_program_with_memory_values(quil_program, memory_map) # depends on [control=['if'], data=['memory_map']]
return self.connection._wavefunction(quil_program=quil_program, random_seed=self.random_seed) |
def _get_backend(args):
"""Extract the backend class from the command line arguments."""
if args.backend == 'gatttool':
backend = GatttoolBackend
elif args.backend == 'bluepy':
backend = BluepyBackend
elif args.backend == 'pygatt':
backend = PygattBackend
else:
raise Exception('unknown backend: {}'.format(args.backend))
return backend | def function[_get_backend, parameter[args]]:
constant[Extract the backend class from the command line arguments.]
if compare[name[args].backend equal[==] constant[gatttool]] begin[:]
variable[backend] assign[=] name[GatttoolBackend]
return[name[backend]] | keyword[def] identifier[_get_backend] ( identifier[args] ):
literal[string]
keyword[if] identifier[args] . identifier[backend] == literal[string] :
identifier[backend] = identifier[GatttoolBackend]
keyword[elif] identifier[args] . identifier[backend] == literal[string] :
identifier[backend] = identifier[BluepyBackend]
keyword[elif] identifier[args] . identifier[backend] == literal[string] :
identifier[backend] = identifier[PygattBackend]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[args] . identifier[backend] ))
keyword[return] identifier[backend] | def _get_backend(args):
"""Extract the backend class from the command line arguments."""
if args.backend == 'gatttool':
backend = GatttoolBackend # depends on [control=['if'], data=[]]
elif args.backend == 'bluepy':
backend = BluepyBackend # depends on [control=['if'], data=[]]
elif args.backend == 'pygatt':
backend = PygattBackend # depends on [control=['if'], data=[]]
else:
raise Exception('unknown backend: {}'.format(args.backend))
return backend |
def optimization_updates(self, params, gradients):
"""
Return updates from optimization.
"""
updates, free_parameters = optimize_updates(params, gradients, self.config)
self.network.free_parameters.extend(free_parameters)
logging.info("Added %d free parameters for optimization" % len(free_parameters))
return updates | def function[optimization_updates, parameter[self, params, gradients]]:
constant[
Return updates from optimization.
]
<ast.Tuple object at 0x7da1b0531c90> assign[=] call[name[optimize_updates], parameter[name[params], name[gradients], name[self].config]]
call[name[self].network.free_parameters.extend, parameter[name[free_parameters]]]
call[name[logging].info, parameter[binary_operation[constant[Added %d free parameters for optimization] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[free_parameters]]]]]]
return[name[updates]] | keyword[def] identifier[optimization_updates] ( identifier[self] , identifier[params] , identifier[gradients] ):
literal[string]
identifier[updates] , identifier[free_parameters] = identifier[optimize_updates] ( identifier[params] , identifier[gradients] , identifier[self] . identifier[config] )
identifier[self] . identifier[network] . identifier[free_parameters] . identifier[extend] ( identifier[free_parameters] )
identifier[logging] . identifier[info] ( literal[string] % identifier[len] ( identifier[free_parameters] ))
keyword[return] identifier[updates] | def optimization_updates(self, params, gradients):
"""
Return updates from optimization.
"""
(updates, free_parameters) = optimize_updates(params, gradients, self.config)
self.network.free_parameters.extend(free_parameters)
logging.info('Added %d free parameters for optimization' % len(free_parameters))
return updates |
def eval_function(value):
""" Evaluate a timestamp function """
name, args = value[0], value[1:]
if name == "NOW":
return datetime.utcnow().replace(tzinfo=tzutc())
elif name in ["TIMESTAMP", "TS"]:
return parse(unwrap(args[0])).replace(tzinfo=tzlocal())
elif name in ["UTCTIMESTAMP", "UTCTS"]:
return parse(unwrap(args[0])).replace(tzinfo=tzutc())
elif name == "MS":
return 1000 * resolve(args[0])
else:
raise SyntaxError("Unrecognized function %r" % name) | def function[eval_function, parameter[value]]:
constant[ Evaluate a timestamp function ]
<ast.Tuple object at 0x7da1b0e2e5c0> assign[=] tuple[[<ast.Subscript object at 0x7da1b0e2d0c0>, <ast.Subscript object at 0x7da1b0e2c1f0>]]
if compare[name[name] equal[==] constant[NOW]] begin[:]
return[call[call[name[datetime].utcnow, parameter[]].replace, parameter[]]] | keyword[def] identifier[eval_function] ( identifier[value] ):
literal[string]
identifier[name] , identifier[args] = identifier[value] [ literal[int] ], identifier[value] [ literal[int] :]
keyword[if] identifier[name] == literal[string] :
keyword[return] identifier[datetime] . identifier[utcnow] (). identifier[replace] ( identifier[tzinfo] = identifier[tzutc] ())
keyword[elif] identifier[name] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[parse] ( identifier[unwrap] ( identifier[args] [ literal[int] ])). identifier[replace] ( identifier[tzinfo] = identifier[tzlocal] ())
keyword[elif] identifier[name] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[parse] ( identifier[unwrap] ( identifier[args] [ literal[int] ])). identifier[replace] ( identifier[tzinfo] = identifier[tzutc] ())
keyword[elif] identifier[name] == literal[string] :
keyword[return] literal[int] * identifier[resolve] ( identifier[args] [ literal[int] ])
keyword[else] :
keyword[raise] identifier[SyntaxError] ( literal[string] % identifier[name] ) | def eval_function(value):
""" Evaluate a timestamp function """
(name, args) = (value[0], value[1:])
if name == 'NOW':
return datetime.utcnow().replace(tzinfo=tzutc()) # depends on [control=['if'], data=[]]
elif name in ['TIMESTAMP', 'TS']:
return parse(unwrap(args[0])).replace(tzinfo=tzlocal()) # depends on [control=['if'], data=[]]
elif name in ['UTCTIMESTAMP', 'UTCTS']:
return parse(unwrap(args[0])).replace(tzinfo=tzutc()) # depends on [control=['if'], data=[]]
elif name == 'MS':
return 1000 * resolve(args[0]) # depends on [control=['if'], data=[]]
else:
raise SyntaxError('Unrecognized function %r' % name) |
def get_template(self, R):
"""Compute a template on latent factors
Parameters
----------
R : 2D array, in format [n_voxel, n_dim]
The scanner coordinate matrix of one subject's fMRI data
Returns
-------
template_prior : 1D array
The template prior.
template_centers_cov: 2D array, in shape [n_dim, n_dim]
The template on centers' covariance.
template_widths_var: float
The template on widths' variance
"""
centers, widths = self.init_centers_widths(R)
template_prior =\
np.zeros(self.K * (self.n_dim + 2 + self.cov_vec_size))
# template centers cov and widths var are const
template_centers_cov = np.cov(R.T) * math.pow(self.K, -2 / 3.0)
template_widths_var = self._get_max_sigma(R)
centers_cov_all = np.tile(from_sym_2_tri(template_centers_cov), self.K)
widths_var_all = np.tile(template_widths_var, self.K)
# initial mean of centers' mean
self.set_centers(template_prior, centers)
self.set_widths(template_prior, widths)
self.set_centers_mean_cov(template_prior, centers_cov_all)
self.set_widths_mean_var(template_prior, widths_var_all)
return template_prior, template_centers_cov, template_widths_var | def function[get_template, parameter[self, R]]:
constant[Compute a template on latent factors
Parameters
----------
R : 2D array, in format [n_voxel, n_dim]
The scanner coordinate matrix of one subject's fMRI data
Returns
-------
template_prior : 1D array
The template prior.
template_centers_cov: 2D array, in shape [n_dim, n_dim]
The template on centers' covariance.
template_widths_var: float
The template on widths' variance
]
<ast.Tuple object at 0x7da204566b30> assign[=] call[name[self].init_centers_widths, parameter[name[R]]]
variable[template_prior] assign[=] call[name[np].zeros, parameter[binary_operation[name[self].K * binary_operation[binary_operation[name[self].n_dim + constant[2]] + name[self].cov_vec_size]]]]
variable[template_centers_cov] assign[=] binary_operation[call[name[np].cov, parameter[name[R].T]] * call[name[math].pow, parameter[name[self].K, binary_operation[<ast.UnaryOp object at 0x7da204566fe0> / constant[3.0]]]]]
variable[template_widths_var] assign[=] call[name[self]._get_max_sigma, parameter[name[R]]]
variable[centers_cov_all] assign[=] call[name[np].tile, parameter[call[name[from_sym_2_tri], parameter[name[template_centers_cov]]], name[self].K]]
variable[widths_var_all] assign[=] call[name[np].tile, parameter[name[template_widths_var], name[self].K]]
call[name[self].set_centers, parameter[name[template_prior], name[centers]]]
call[name[self].set_widths, parameter[name[template_prior], name[widths]]]
call[name[self].set_centers_mean_cov, parameter[name[template_prior], name[centers_cov_all]]]
call[name[self].set_widths_mean_var, parameter[name[template_prior], name[widths_var_all]]]
return[tuple[[<ast.Name object at 0x7da204564b20>, <ast.Name object at 0x7da204567850>, <ast.Name object at 0x7da2045658d0>]]] | keyword[def] identifier[get_template] ( identifier[self] , identifier[R] ):
literal[string]
identifier[centers] , identifier[widths] = identifier[self] . identifier[init_centers_widths] ( identifier[R] )
identifier[template_prior] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[K] *( identifier[self] . identifier[n_dim] + literal[int] + identifier[self] . identifier[cov_vec_size] ))
identifier[template_centers_cov] = identifier[np] . identifier[cov] ( identifier[R] . identifier[T] )* identifier[math] . identifier[pow] ( identifier[self] . identifier[K] ,- literal[int] / literal[int] )
identifier[template_widths_var] = identifier[self] . identifier[_get_max_sigma] ( identifier[R] )
identifier[centers_cov_all] = identifier[np] . identifier[tile] ( identifier[from_sym_2_tri] ( identifier[template_centers_cov] ), identifier[self] . identifier[K] )
identifier[widths_var_all] = identifier[np] . identifier[tile] ( identifier[template_widths_var] , identifier[self] . identifier[K] )
identifier[self] . identifier[set_centers] ( identifier[template_prior] , identifier[centers] )
identifier[self] . identifier[set_widths] ( identifier[template_prior] , identifier[widths] )
identifier[self] . identifier[set_centers_mean_cov] ( identifier[template_prior] , identifier[centers_cov_all] )
identifier[self] . identifier[set_widths_mean_var] ( identifier[template_prior] , identifier[widths_var_all] )
keyword[return] identifier[template_prior] , identifier[template_centers_cov] , identifier[template_widths_var] | def get_template(self, R):
"""Compute a template on latent factors
Parameters
----------
R : 2D array, in format [n_voxel, n_dim]
The scanner coordinate matrix of one subject's fMRI data
Returns
-------
template_prior : 1D array
The template prior.
template_centers_cov: 2D array, in shape [n_dim, n_dim]
The template on centers' covariance.
template_widths_var: float
The template on widths' variance
"""
(centers, widths) = self.init_centers_widths(R)
template_prior = np.zeros(self.K * (self.n_dim + 2 + self.cov_vec_size))
# template centers cov and widths var are const
template_centers_cov = np.cov(R.T) * math.pow(self.K, -2 / 3.0)
template_widths_var = self._get_max_sigma(R)
centers_cov_all = np.tile(from_sym_2_tri(template_centers_cov), self.K)
widths_var_all = np.tile(template_widths_var, self.K)
# initial mean of centers' mean
self.set_centers(template_prior, centers)
self.set_widths(template_prior, widths)
self.set_centers_mean_cov(template_prior, centers_cov_all)
self.set_widths_mean_var(template_prior, widths_var_all)
return (template_prior, template_centers_cov, template_widths_var) |
def hget(self, key):
"""Read data from Redis for the provided key.
Args:
key (string): The key to read in Redis.
Returns:
(any): The response data from Redis.
"""
data = self.r.hget(self.hash, key)
if data is not None and not isinstance(data, str):
data = str(self.r.hget(self.hash, key), 'utf-8')
return data | def function[hget, parameter[self, key]]:
constant[Read data from Redis for the provided key.
Args:
key (string): The key to read in Redis.
Returns:
(any): The response data from Redis.
]
variable[data] assign[=] call[name[self].r.hget, parameter[name[self].hash, name[key]]]
if <ast.BoolOp object at 0x7da18fe91ab0> begin[:]
variable[data] assign[=] call[name[str], parameter[call[name[self].r.hget, parameter[name[self].hash, name[key]]], constant[utf-8]]]
return[name[data]] | keyword[def] identifier[hget] ( identifier[self] , identifier[key] ):
literal[string]
identifier[data] = identifier[self] . identifier[r] . identifier[hget] ( identifier[self] . identifier[hash] , identifier[key] )
keyword[if] identifier[data] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[data] , identifier[str] ):
identifier[data] = identifier[str] ( identifier[self] . identifier[r] . identifier[hget] ( identifier[self] . identifier[hash] , identifier[key] ), literal[string] )
keyword[return] identifier[data] | def hget(self, key):
"""Read data from Redis for the provided key.
Args:
key (string): The key to read in Redis.
Returns:
(any): The response data from Redis.
"""
data = self.r.hget(self.hash, key)
if data is not None and (not isinstance(data, str)):
data = str(self.r.hget(self.hash, key), 'utf-8') # depends on [control=['if'], data=[]]
return data |
def _select_ontology(self, line):
"""try to select an ontology NP: the actual load from FS is in <_load_ontology> """
try:
var = int(line) # it's a string
if var in range(1, len(self.all_ontologies)+1):
self._load_ontology(self.all_ontologies[var-1])
except ValueError:
out = []
for each in self.all_ontologies:
if line in each:
out += [each]
choice = self._selectFromList(out, line, "ontology")
if choice:
self._load_ontology(choice) | def function[_select_ontology, parameter[self, line]]:
constant[try to select an ontology NP: the actual load from FS is in <_load_ontology> ]
<ast.Try object at 0x7da1b1115450> | keyword[def] identifier[_select_ontology] ( identifier[self] , identifier[line] ):
literal[string]
keyword[try] :
identifier[var] = identifier[int] ( identifier[line] )
keyword[if] identifier[var] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[self] . identifier[all_ontologies] )+ literal[int] ):
identifier[self] . identifier[_load_ontology] ( identifier[self] . identifier[all_ontologies] [ identifier[var] - literal[int] ])
keyword[except] identifier[ValueError] :
identifier[out] =[]
keyword[for] identifier[each] keyword[in] identifier[self] . identifier[all_ontologies] :
keyword[if] identifier[line] keyword[in] identifier[each] :
identifier[out] +=[ identifier[each] ]
identifier[choice] = identifier[self] . identifier[_selectFromList] ( identifier[out] , identifier[line] , literal[string] )
keyword[if] identifier[choice] :
identifier[self] . identifier[_load_ontology] ( identifier[choice] ) | def _select_ontology(self, line):
"""try to select an ontology NP: the actual load from FS is in <_load_ontology> """
try:
var = int(line) # it's a string
if var in range(1, len(self.all_ontologies) + 1):
self._load_ontology(self.all_ontologies[var - 1]) # depends on [control=['if'], data=['var']] # depends on [control=['try'], data=[]]
except ValueError:
out = []
for each in self.all_ontologies:
if line in each:
out += [each] # depends on [control=['if'], data=['each']] # depends on [control=['for'], data=['each']]
choice = self._selectFromList(out, line, 'ontology')
if choice:
self._load_ontology(choice) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] |
def admin_app_list(request):
"""
Adopted from ``django.contrib.admin.sites.AdminSite.index``.
Returns a list of lists of models grouped and ordered according to
``yacms.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
"""
app_dict = {}
# Model or view --> (group index, group title, item index, item title).
menu_order = {}
for (group_index, group) in enumerate(settings.ADMIN_MENU_ORDER):
group_title, items = group
for (item_index, item) in enumerate(items):
if isinstance(item, (tuple, list)):
item_title, item = item
else:
item_title = None
menu_order[item] = (group_index, group_title,
item_index, item_title)
# Add all registered models, using group and title from menu order.
for (model, model_admin) in admin.site._registry.items():
opts = model._meta
in_menu = not hasattr(model_admin, "in_menu") or model_admin.in_menu()
if hasattr(model_admin, "in_menu"):
import warnings
warnings.warn(
'ModelAdmin.in_menu() has been replaced with '
'ModelAdmin.has_module_permission(request). See '
'https://docs.djangoproject.com/en/stable/ref/contrib/admin/'
'#django.contrib.admin.ModelAdmin.has_module_permission.',
DeprecationWarning)
in_menu = in_menu and model_admin.has_module_permission(request)
if in_menu and request.user.has_module_perms(opts.app_label):
admin_url_name = ""
if model_admin.has_change_permission(request):
admin_url_name = "changelist"
change_url = admin_url(model, admin_url_name)
else:
change_url = None
if model_admin.has_add_permission(request):
admin_url_name = "add"
add_url = admin_url(model, admin_url_name)
else:
add_url = None
if admin_url_name:
model_label = "%s.%s" % (opts.app_label, opts.object_name)
try:
app_index, app_title, model_index, model_title = \
menu_order[model_label]
except KeyError:
app_index = None
try:
app_title = opts.app_config.verbose_name.title()
except AttributeError:
# Third party admin classes doing weird things.
# See GH #1628
app_title = ""
model_index = None
model_title = None
else:
del menu_order[model_label]
if not model_title:
model_title = capfirst(model._meta.verbose_name_plural)
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": model_index,
"perms": model_admin.get_model_perms(request),
"name": model_title,
"object_name": opts.object_name,
"admin_url": change_url,
"add_url": add_url
})
# Menu may also contain view or url pattern names given as (title, name).
for (item_url, item) in menu_order.items():
app_index, app_title, item_index, item_title = item
try:
item_url = reverse(item_url)
except NoReverseMatch:
continue
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": item_index,
"perms": {"custom": True},
"name": item_title,
"admin_url": item_url,
})
app_list = list(app_dict.values())
sort = lambda x: (x["index"] if x["index"] is not None else 999, x["name"])
for app in app_list:
app["models"].sort(key=sort)
app_list.sort(key=sort)
return app_list | def function[admin_app_list, parameter[request]]:
constant[
Adopted from ``django.contrib.admin.sites.AdminSite.index``.
Returns a list of lists of models grouped and ordered according to
``yacms.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
]
variable[app_dict] assign[=] dictionary[[], []]
variable[menu_order] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18bc72e90>, <ast.Name object at 0x7da18bc71510>]]] in starred[call[name[enumerate], parameter[name[settings].ADMIN_MENU_ORDER]]] begin[:]
<ast.Tuple object at 0x7da18bc72b00> assign[=] name[group]
for taget[tuple[[<ast.Name object at 0x7da18bc70dc0>, <ast.Name object at 0x7da18bc70130>]]] in starred[call[name[enumerate], parameter[name[items]]]] begin[:]
if call[name[isinstance], parameter[name[item], tuple[[<ast.Name object at 0x7da18bc71330>, <ast.Name object at 0x7da18bc70ca0>]]]] begin[:]
<ast.Tuple object at 0x7da18bc73b20> assign[=] name[item]
call[name[menu_order]][name[item]] assign[=] tuple[[<ast.Name object at 0x7da18bc728c0>, <ast.Name object at 0x7da18bc73be0>, <ast.Name object at 0x7da18bc73c70>, <ast.Name object at 0x7da18bc716c0>]]
for taget[tuple[[<ast.Name object at 0x7da18bc73850>, <ast.Name object at 0x7da18bc73280>]]] in starred[call[name[admin].site._registry.items, parameter[]]] begin[:]
variable[opts] assign[=] name[model]._meta
variable[in_menu] assign[=] <ast.BoolOp object at 0x7da18bc704f0>
if call[name[hasattr], parameter[name[model_admin], constant[in_menu]]] begin[:]
import module[warnings]
call[name[warnings].warn, parameter[constant[ModelAdmin.in_menu() has been replaced with ModelAdmin.has_module_permission(request). See https://docs.djangoproject.com/en/stable/ref/contrib/admin/#django.contrib.admin.ModelAdmin.has_module_permission.], name[DeprecationWarning]]]
variable[in_menu] assign[=] <ast.BoolOp object at 0x7da18bc72aa0>
if <ast.BoolOp object at 0x7da18bc713c0> begin[:]
variable[admin_url_name] assign[=] constant[]
if call[name[model_admin].has_change_permission, parameter[name[request]]] begin[:]
variable[admin_url_name] assign[=] constant[changelist]
variable[change_url] assign[=] call[name[admin_url], parameter[name[model], name[admin_url_name]]]
if call[name[model_admin].has_add_permission, parameter[name[request]]] begin[:]
variable[admin_url_name] assign[=] constant[add]
variable[add_url] assign[=] call[name[admin_url], parameter[name[model], name[admin_url_name]]]
if name[admin_url_name] begin[:]
variable[model_label] assign[=] binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18bc71ae0>, <ast.Attribute object at 0x7da18bc70e20>]]]
<ast.Try object at 0x7da18bc714e0>
if <ast.UnaryOp object at 0x7da18bc72f80> begin[:]
variable[model_title] assign[=] call[name[capfirst], parameter[name[model]._meta.verbose_name_plural]]
if compare[name[app_title] <ast.NotIn object at 0x7da2590d7190> name[app_dict]] begin[:]
call[name[app_dict]][name[app_title]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc72890>, <ast.Constant object at 0x7da18bc705e0>, <ast.Constant object at 0x7da18bc700a0>], [<ast.Name object at 0x7da18bc70040>, <ast.Name object at 0x7da18bc70c40>, <ast.List object at 0x7da18bc73400>]]
call[call[call[name[app_dict]][name[app_title]]][constant[models]].append, parameter[dictionary[[<ast.Constant object at 0x7da18bc71390>, <ast.Constant object at 0x7da18bc71e40>, <ast.Constant object at 0x7da18bc73370>, <ast.Constant object at 0x7da18bc70c70>, <ast.Constant object at 0x7da18bc72b90>, <ast.Constant object at 0x7da18bc72650>], [<ast.Name object at 0x7da18bc72590>, <ast.Call object at 0x7da18bc710f0>, <ast.Name object at 0x7da18bc72c80>, <ast.Attribute object at 0x7da18bc73460>, <ast.Name object at 0x7da18bc70f40>, <ast.Name object at 0x7da18bc73a90>]]]]
for taget[tuple[[<ast.Name object at 0x7da18bc715d0>, <ast.Name object at 0x7da18bc707c0>]]] in starred[call[name[menu_order].items, parameter[]]] begin[:]
<ast.Tuple object at 0x7da18bc70190> assign[=] name[item]
<ast.Try object at 0x7da18bc71b10>
if compare[name[app_title] <ast.NotIn object at 0x7da2590d7190> name[app_dict]] begin[:]
call[name[app_dict]][name[app_title]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc72110>, <ast.Constant object at 0x7da18bc70400>, <ast.Constant object at 0x7da18bc72800>], [<ast.Name object at 0x7da18bc721d0>, <ast.Name object at 0x7da18bc73f10>, <ast.List object at 0x7da18bc72dd0>]]
call[call[call[name[app_dict]][name[app_title]]][constant[models]].append, parameter[dictionary[[<ast.Constant object at 0x7da18bc70d30>, <ast.Constant object at 0x7da18bc71e70>, <ast.Constant object at 0x7da18bc71630>, <ast.Constant object at 0x7da18bc736d0>], [<ast.Name object at 0x7da18bc718d0>, <ast.Dict object at 0x7da18c4ccb50>, <ast.Name object at 0x7da18c4cdff0>, <ast.Name object at 0x7da18c4cf970>]]]]
variable[app_list] assign[=] call[name[list], parameter[call[name[app_dict].values, parameter[]]]]
variable[sort] assign[=] <ast.Lambda object at 0x7da18c4cde40>
for taget[name[app]] in starred[name[app_list]] begin[:]
call[call[name[app]][constant[models]].sort, parameter[]]
call[name[app_list].sort, parameter[]]
return[name[app_list]] | keyword[def] identifier[admin_app_list] ( identifier[request] ):
literal[string]
identifier[app_dict] ={}
identifier[menu_order] ={}
keyword[for] ( identifier[group_index] , identifier[group] ) keyword[in] identifier[enumerate] ( identifier[settings] . identifier[ADMIN_MENU_ORDER] ):
identifier[group_title] , identifier[items] = identifier[group]
keyword[for] ( identifier[item_index] , identifier[item] ) keyword[in] identifier[enumerate] ( identifier[items] ):
keyword[if] identifier[isinstance] ( identifier[item] ,( identifier[tuple] , identifier[list] )):
identifier[item_title] , identifier[item] = identifier[item]
keyword[else] :
identifier[item_title] = keyword[None]
identifier[menu_order] [ identifier[item] ]=( identifier[group_index] , identifier[group_title] ,
identifier[item_index] , identifier[item_title] )
keyword[for] ( identifier[model] , identifier[model_admin] ) keyword[in] identifier[admin] . identifier[site] . identifier[_registry] . identifier[items] ():
identifier[opts] = identifier[model] . identifier[_meta]
identifier[in_menu] = keyword[not] identifier[hasattr] ( identifier[model_admin] , literal[string] ) keyword[or] identifier[model_admin] . identifier[in_menu] ()
keyword[if] identifier[hasattr] ( identifier[model_admin] , literal[string] ):
keyword[import] identifier[warnings]
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
literal[string]
literal[string] ,
identifier[DeprecationWarning] )
identifier[in_menu] = identifier[in_menu] keyword[and] identifier[model_admin] . identifier[has_module_permission] ( identifier[request] )
keyword[if] identifier[in_menu] keyword[and] identifier[request] . identifier[user] . identifier[has_module_perms] ( identifier[opts] . identifier[app_label] ):
identifier[admin_url_name] = literal[string]
keyword[if] identifier[model_admin] . identifier[has_change_permission] ( identifier[request] ):
identifier[admin_url_name] = literal[string]
identifier[change_url] = identifier[admin_url] ( identifier[model] , identifier[admin_url_name] )
keyword[else] :
identifier[change_url] = keyword[None]
keyword[if] identifier[model_admin] . identifier[has_add_permission] ( identifier[request] ):
identifier[admin_url_name] = literal[string]
identifier[add_url] = identifier[admin_url] ( identifier[model] , identifier[admin_url_name] )
keyword[else] :
identifier[add_url] = keyword[None]
keyword[if] identifier[admin_url_name] :
identifier[model_label] = literal[string] %( identifier[opts] . identifier[app_label] , identifier[opts] . identifier[object_name] )
keyword[try] :
identifier[app_index] , identifier[app_title] , identifier[model_index] , identifier[model_title] = identifier[menu_order] [ identifier[model_label] ]
keyword[except] identifier[KeyError] :
identifier[app_index] = keyword[None]
keyword[try] :
identifier[app_title] = identifier[opts] . identifier[app_config] . identifier[verbose_name] . identifier[title] ()
keyword[except] identifier[AttributeError] :
identifier[app_title] = literal[string]
identifier[model_index] = keyword[None]
identifier[model_title] = keyword[None]
keyword[else] :
keyword[del] identifier[menu_order] [ identifier[model_label] ]
keyword[if] keyword[not] identifier[model_title] :
identifier[model_title] = identifier[capfirst] ( identifier[model] . identifier[_meta] . identifier[verbose_name_plural] )
keyword[if] identifier[app_title] keyword[not] keyword[in] identifier[app_dict] :
identifier[app_dict] [ identifier[app_title] ]={
literal[string] : identifier[app_index] ,
literal[string] : identifier[app_title] ,
literal[string] :[],
}
identifier[app_dict] [ identifier[app_title] ][ literal[string] ]. identifier[append] ({
literal[string] : identifier[model_index] ,
literal[string] : identifier[model_admin] . identifier[get_model_perms] ( identifier[request] ),
literal[string] : identifier[model_title] ,
literal[string] : identifier[opts] . identifier[object_name] ,
literal[string] : identifier[change_url] ,
literal[string] : identifier[add_url]
})
keyword[for] ( identifier[item_url] , identifier[item] ) keyword[in] identifier[menu_order] . identifier[items] ():
identifier[app_index] , identifier[app_title] , identifier[item_index] , identifier[item_title] = identifier[item]
keyword[try] :
identifier[item_url] = identifier[reverse] ( identifier[item_url] )
keyword[except] identifier[NoReverseMatch] :
keyword[continue]
keyword[if] identifier[app_title] keyword[not] keyword[in] identifier[app_dict] :
identifier[app_dict] [ identifier[app_title] ]={
literal[string] : identifier[app_index] ,
literal[string] : identifier[app_title] ,
literal[string] :[],
}
identifier[app_dict] [ identifier[app_title] ][ literal[string] ]. identifier[append] ({
literal[string] : identifier[item_index] ,
literal[string] :{ literal[string] : keyword[True] },
literal[string] : identifier[item_title] ,
literal[string] : identifier[item_url] ,
})
identifier[app_list] = identifier[list] ( identifier[app_dict] . identifier[values] ())
identifier[sort] = keyword[lambda] identifier[x] :( identifier[x] [ literal[string] ] keyword[if] identifier[x] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[else] literal[int] , identifier[x] [ literal[string] ])
keyword[for] identifier[app] keyword[in] identifier[app_list] :
identifier[app] [ literal[string] ]. identifier[sort] ( identifier[key] = identifier[sort] )
identifier[app_list] . identifier[sort] ( identifier[key] = identifier[sort] )
keyword[return] identifier[app_list] | def admin_app_list(request):
"""
Adopted from ``django.contrib.admin.sites.AdminSite.index``.
Returns a list of lists of models grouped and ordered according to
``yacms.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
"""
app_dict = {}
# Model or view --> (group index, group title, item index, item title).
menu_order = {}
for (group_index, group) in enumerate(settings.ADMIN_MENU_ORDER):
(group_title, items) = group
for (item_index, item) in enumerate(items):
if isinstance(item, (tuple, list)):
(item_title, item) = item # depends on [control=['if'], data=[]]
else:
item_title = None
menu_order[item] = (group_index, group_title, item_index, item_title) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
# Add all registered models, using group and title from menu order.
for (model, model_admin) in admin.site._registry.items():
opts = model._meta
in_menu = not hasattr(model_admin, 'in_menu') or model_admin.in_menu()
if hasattr(model_admin, 'in_menu'):
import warnings
warnings.warn('ModelAdmin.in_menu() has been replaced with ModelAdmin.has_module_permission(request). See https://docs.djangoproject.com/en/stable/ref/contrib/admin/#django.contrib.admin.ModelAdmin.has_module_permission.', DeprecationWarning) # depends on [control=['if'], data=[]]
in_menu = in_menu and model_admin.has_module_permission(request)
if in_menu and request.user.has_module_perms(opts.app_label):
admin_url_name = ''
if model_admin.has_change_permission(request):
admin_url_name = 'changelist'
change_url = admin_url(model, admin_url_name) # depends on [control=['if'], data=[]]
else:
change_url = None
if model_admin.has_add_permission(request):
admin_url_name = 'add'
add_url = admin_url(model, admin_url_name) # depends on [control=['if'], data=[]]
else:
add_url = None
if admin_url_name:
model_label = '%s.%s' % (opts.app_label, opts.object_name)
try:
(app_index, app_title, model_index, model_title) = menu_order[model_label] # depends on [control=['try'], data=[]]
except KeyError:
app_index = None
try:
app_title = opts.app_config.verbose_name.title() # depends on [control=['try'], data=[]]
except AttributeError:
# Third party admin classes doing weird things.
# See GH #1628
app_title = '' # depends on [control=['except'], data=[]]
model_index = None
model_title = None # depends on [control=['except'], data=[]]
else:
del menu_order[model_label]
if not model_title:
model_title = capfirst(model._meta.verbose_name_plural) # depends on [control=['if'], data=[]]
if app_title not in app_dict:
app_dict[app_title] = {'index': app_index, 'name': app_title, 'models': []} # depends on [control=['if'], data=['app_title', 'app_dict']]
app_dict[app_title]['models'].append({'index': model_index, 'perms': model_admin.get_model_perms(request), 'name': model_title, 'object_name': opts.object_name, 'admin_url': change_url, 'add_url': add_url}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Menu may also contain view or url pattern names given as (title, name).
for (item_url, item) in menu_order.items():
(app_index, app_title, item_index, item_title) = item
try:
item_url = reverse(item_url) # depends on [control=['try'], data=[]]
except NoReverseMatch:
continue # depends on [control=['except'], data=[]]
if app_title not in app_dict:
app_dict[app_title] = {'index': app_index, 'name': app_title, 'models': []} # depends on [control=['if'], data=['app_title', 'app_dict']]
app_dict[app_title]['models'].append({'index': item_index, 'perms': {'custom': True}, 'name': item_title, 'admin_url': item_url}) # depends on [control=['for'], data=[]]
app_list = list(app_dict.values())
sort = lambda x: (x['index'] if x['index'] is not None else 999, x['name'])
for app in app_list:
app['models'].sort(key=sort) # depends on [control=['for'], data=['app']]
app_list.sort(key=sort)
return app_list |
def nmf_kfold(data, k, n_runs=10, **nmf_params):
"""
Runs K-fold ensemble topic modeling (Belford et al. 2017)
"""
# TODO
nmf = NMF(k)
W_list = []
kf = KFold(n_splits=n_runs, shuffle=True)
# TODO: randomly divide data into n_runs folds
for train_index, test_index in kf.split(data.T):
W = nmf.fit_transform(data[:,train_index])
W_list.append(W)
W_stacked = np.hstack(W_list)
nmf_w = nmf.fit_transform(W_stacked)
nmf_h = nmf.components_
H_new = data.T.dot(nmf_w).T
nmf2 = NMF(k, init='custom')
nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new)
H_new = nmf2.components_
#W_new = W_new/W_new.sum(0)
return nmf_w, H_new | def function[nmf_kfold, parameter[data, k, n_runs]]:
constant[
Runs K-fold ensemble topic modeling (Belford et al. 2017)
]
variable[nmf] assign[=] call[name[NMF], parameter[name[k]]]
variable[W_list] assign[=] list[[]]
variable[kf] assign[=] call[name[KFold], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da2044c2e00>, <ast.Name object at 0x7da2044c14b0>]]] in starred[call[name[kf].split, parameter[name[data].T]]] begin[:]
variable[W] assign[=] call[name[nmf].fit_transform, parameter[call[name[data]][tuple[[<ast.Slice object at 0x7da2044c2c80>, <ast.Name object at 0x7da2044c3130>]]]]]
call[name[W_list].append, parameter[name[W]]]
variable[W_stacked] assign[=] call[name[np].hstack, parameter[name[W_list]]]
variable[nmf_w] assign[=] call[name[nmf].fit_transform, parameter[name[W_stacked]]]
variable[nmf_h] assign[=] name[nmf].components_
variable[H_new] assign[=] call[name[data].T.dot, parameter[name[nmf_w]]].T
variable[nmf2] assign[=] call[name[NMF], parameter[name[k]]]
variable[nmf_w] assign[=] call[name[nmf2].fit_transform, parameter[name[data]]]
variable[H_new] assign[=] name[nmf2].components_
return[tuple[[<ast.Name object at 0x7da2044c3bb0>, <ast.Name object at 0x7da2044c0ca0>]]] | keyword[def] identifier[nmf_kfold] ( identifier[data] , identifier[k] , identifier[n_runs] = literal[int] ,** identifier[nmf_params] ):
literal[string]
identifier[nmf] = identifier[NMF] ( identifier[k] )
identifier[W_list] =[]
identifier[kf] = identifier[KFold] ( identifier[n_splits] = identifier[n_runs] , identifier[shuffle] = keyword[True] )
keyword[for] identifier[train_index] , identifier[test_index] keyword[in] identifier[kf] . identifier[split] ( identifier[data] . identifier[T] ):
identifier[W] = identifier[nmf] . identifier[fit_transform] ( identifier[data] [:, identifier[train_index] ])
identifier[W_list] . identifier[append] ( identifier[W] )
identifier[W_stacked] = identifier[np] . identifier[hstack] ( identifier[W_list] )
identifier[nmf_w] = identifier[nmf] . identifier[fit_transform] ( identifier[W_stacked] )
identifier[nmf_h] = identifier[nmf] . identifier[components_]
identifier[H_new] = identifier[data] . identifier[T] . identifier[dot] ( identifier[nmf_w] ). identifier[T]
identifier[nmf2] = identifier[NMF] ( identifier[k] , identifier[init] = literal[string] )
identifier[nmf_w] = identifier[nmf2] . identifier[fit_transform] ( identifier[data] , identifier[W] = identifier[nmf_w] , identifier[H] = identifier[H_new] )
identifier[H_new] = identifier[nmf2] . identifier[components_]
keyword[return] identifier[nmf_w] , identifier[H_new] | def nmf_kfold(data, k, n_runs=10, **nmf_params):
"""
Runs K-fold ensemble topic modeling (Belford et al. 2017)
"""
# TODO
nmf = NMF(k)
W_list = []
kf = KFold(n_splits=n_runs, shuffle=True)
# TODO: randomly divide data into n_runs folds
for (train_index, test_index) in kf.split(data.T):
W = nmf.fit_transform(data[:, train_index])
W_list.append(W) # depends on [control=['for'], data=[]]
W_stacked = np.hstack(W_list)
nmf_w = nmf.fit_transform(W_stacked)
nmf_h = nmf.components_
H_new = data.T.dot(nmf_w).T
nmf2 = NMF(k, init='custom')
nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new)
H_new = nmf2.components_
#W_new = W_new/W_new.sum(0)
return (nmf_w, H_new) |
def merge_config(configs):
""" recursively deep-merge the configs into one another (highest priority comes first) """
new_config = {}
for name, config in configs.items():
new_config = dictmerge(new_config, config, name)
return new_config | def function[merge_config, parameter[configs]]:
constant[ recursively deep-merge the configs into one another (highest priority comes first) ]
variable[new_config] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1506890>, <ast.Name object at 0x7da1b1504b20>]]] in starred[call[name[configs].items, parameter[]]] begin[:]
variable[new_config] assign[=] call[name[dictmerge], parameter[name[new_config], name[config], name[name]]]
return[name[new_config]] | keyword[def] identifier[merge_config] ( identifier[configs] ):
literal[string]
identifier[new_config] ={}
keyword[for] identifier[name] , identifier[config] keyword[in] identifier[configs] . identifier[items] ():
identifier[new_config] = identifier[dictmerge] ( identifier[new_config] , identifier[config] , identifier[name] )
keyword[return] identifier[new_config] | def merge_config(configs):
""" recursively deep-merge the configs into one another (highest priority comes first) """
new_config = {}
for (name, config) in configs.items():
new_config = dictmerge(new_config, config, name) # depends on [control=['for'], data=[]]
return new_config |
def get_textnode_subtrees(html_tree,
xpath_to_text=TEXT_FINDER_XPATH):
"""A modification of get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
try:
xpath_finder = html_tree.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = html_tree.getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# Within the TextNodeSubTree construction, the ABSL is calculated
# refer to eatiht_trees.py
parentpaths_textnodes = [TextNodeSubTree(n, xpath_finder(n),
n.xpath('.//text()'))
for n in nodes_with_text]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes | def function[get_textnode_subtrees, parameter[html_tree, xpath_to_text]]:
constant[A modification of get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
]
<ast.Try object at 0x7da204622aa0>
variable[nodes_with_text] assign[=] call[name[html_tree].xpath, parameter[name[xpath_to_text]]]
variable[parentpaths_textnodes] assign[=] <ast.ListComp object at 0x7da204622650>
if compare[call[name[len], parameter[name[parentpaths_textnodes]]] is constant[0]] begin[:]
<ast.Raise object at 0x7da2046226b0>
return[name[parentpaths_textnodes]] | keyword[def] identifier[get_textnode_subtrees] ( identifier[html_tree] ,
identifier[xpath_to_text] = identifier[TEXT_FINDER_XPATH] ):
literal[string]
keyword[try] :
identifier[xpath_finder] = identifier[html_tree] . identifier[getroot] (). identifier[getroottree] (). identifier[getpath]
keyword[except] ( identifier[AttributeError] ):
identifier[xpath_finder] = identifier[html_tree] . identifier[getroottree] (). identifier[getpath]
identifier[nodes_with_text] = identifier[html_tree] . identifier[xpath] ( identifier[xpath_to_text] )
identifier[parentpaths_textnodes] =[ identifier[TextNodeSubTree] ( identifier[n] , identifier[xpath_finder] ( identifier[n] ),
identifier[n] . identifier[xpath] ( literal[string] ))
keyword[for] identifier[n] keyword[in] identifier[nodes_with_text] ]
keyword[if] identifier[len] ( identifier[parentpaths_textnodes] ) keyword[is] literal[int] :
keyword[raise] identifier[Exception] ( literal[string] +
identifier[xpath_to_text] + literal[string] +
literal[string] )
keyword[return] identifier[parentpaths_textnodes] | def get_textnode_subtrees(html_tree, xpath_to_text=TEXT_FINDER_XPATH):
"""A modification of get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
try:
xpath_finder = html_tree.getroot().getroottree().getpath # depends on [control=['try'], data=[]]
except AttributeError:
xpath_finder = html_tree.getroottree().getpath # depends on [control=['except'], data=[]]
nodes_with_text = html_tree.xpath(xpath_to_text) # Within the TextNodeSubTree construction, the ABSL is calculated
# refer to eatiht_trees.py
parentpaths_textnodes = [TextNodeSubTree(n, xpath_finder(n), n.xpath('.//text()')) for n in nodes_with_text]
if len(parentpaths_textnodes) is 0:
raise Exception('No text nodes satisfied the xpath:\n\n' + xpath_to_text + "\n\nThis can be due to user's" + ' custom xpath, min_str_length value, or both') # depends on [control=['if'], data=[]]
return parentpaths_textnodes |
def end_of_directory(self, succeeded=True, update_listing=False,
cache_to_disc=True):
'''Wrapper for xbmcplugin.endOfDirectory. Records state in
self._end_of_directory.
Typically it is not necessary to call this method directly, as
calling :meth:`~xbmcswift2.Plugin.finish` will call this method.
'''
self._update_listing = update_listing
if not self._end_of_directory:
self._end_of_directory = True
# Finalize the directory items
return xbmcplugin.endOfDirectory(self.handle, succeeded,
update_listing, cache_to_disc)
assert False, 'Already called endOfDirectory.' | def function[end_of_directory, parameter[self, succeeded, update_listing, cache_to_disc]]:
constant[Wrapper for xbmcplugin.endOfDirectory. Records state in
self._end_of_directory.
Typically it is not necessary to call this method directly, as
calling :meth:`~xbmcswift2.Plugin.finish` will call this method.
]
name[self]._update_listing assign[=] name[update_listing]
if <ast.UnaryOp object at 0x7da18dc04400> begin[:]
name[self]._end_of_directory assign[=] constant[True]
return[call[name[xbmcplugin].endOfDirectory, parameter[name[self].handle, name[succeeded], name[update_listing], name[cache_to_disc]]]]
assert[constant[False]] | keyword[def] identifier[end_of_directory] ( identifier[self] , identifier[succeeded] = keyword[True] , identifier[update_listing] = keyword[False] ,
identifier[cache_to_disc] = keyword[True] ):
literal[string]
identifier[self] . identifier[_update_listing] = identifier[update_listing]
keyword[if] keyword[not] identifier[self] . identifier[_end_of_directory] :
identifier[self] . identifier[_end_of_directory] = keyword[True]
keyword[return] identifier[xbmcplugin] . identifier[endOfDirectory] ( identifier[self] . identifier[handle] , identifier[succeeded] ,
identifier[update_listing] , identifier[cache_to_disc] )
keyword[assert] keyword[False] , literal[string] | def end_of_directory(self, succeeded=True, update_listing=False, cache_to_disc=True):
"""Wrapper for xbmcplugin.endOfDirectory. Records state in
self._end_of_directory.
Typically it is not necessary to call this method directly, as
calling :meth:`~xbmcswift2.Plugin.finish` will call this method.
"""
self._update_listing = update_listing
if not self._end_of_directory:
self._end_of_directory = True
# Finalize the directory items
return xbmcplugin.endOfDirectory(self.handle, succeeded, update_listing, cache_to_disc) # depends on [control=['if'], data=[]]
assert False, 'Already called endOfDirectory.' |
def get_my_data(self, session=None):
"""
Returns a list of data descriptions for data which has been purchased by the signed in user.
:param requests.session session: Requests session object
:rtype: dict
"""
params = clean_locals(locals())
method = 'GetMyData'
(response, elapsed_time) = self.request(method, params, session)
return response | def function[get_my_data, parameter[self, session]]:
constant[
Returns a list of data descriptions for data which has been purchased by the signed in user.
:param requests.session session: Requests session object
:rtype: dict
]
variable[params] assign[=] call[name[clean_locals], parameter[call[name[locals], parameter[]]]]
variable[method] assign[=] constant[GetMyData]
<ast.Tuple object at 0x7da1b17fa020> assign[=] call[name[self].request, parameter[name[method], name[params], name[session]]]
return[name[response]] | keyword[def] identifier[get_my_data] ( identifier[self] , identifier[session] = keyword[None] ):
literal[string]
identifier[params] = identifier[clean_locals] ( identifier[locals] ())
identifier[method] = literal[string]
( identifier[response] , identifier[elapsed_time] )= identifier[self] . identifier[request] ( identifier[method] , identifier[params] , identifier[session] )
keyword[return] identifier[response] | def get_my_data(self, session=None):
"""
Returns a list of data descriptions for data which has been purchased by the signed in user.
:param requests.session session: Requests session object
:rtype: dict
"""
params = clean_locals(locals())
method = 'GetMyData'
(response, elapsed_time) = self.request(method, params, session)
return response |
def _jitter(self, durations, event, jitter_level, seed=None):
"""Determine extent to jitter tied event times. Automatically called by fit if tied event times are detected
"""
np.random.seed(seed)
if jitter_level <= 0:
raise ValueError("The jitter level is less than zero, please select a jitter value greater than 0")
event_times = durations[event != 0].copy()
n = event_times.shape[0]
# Determining extent to jitter event times up or down
shift = np.random.uniform(low=-1, high=1, size=n) * jitter_level
event_times += shift
durations_jitter = event_times.align(durations)[0].fillna(durations)
# Recursive call if event times are still tied after jitter
if self._check_for_duplicates(durations=durations_jitter, events=event):
return self._jitter(durations=durations_jitter, event=event, jitter_level=jitter_level, seed=seed)
return durations_jitter | def function[_jitter, parameter[self, durations, event, jitter_level, seed]]:
constant[Determine extent to jitter tied event times. Automatically called by fit if tied event times are detected
]
call[name[np].random.seed, parameter[name[seed]]]
if compare[name[jitter_level] less_or_equal[<=] constant[0]] begin[:]
<ast.Raise object at 0x7da20cabf6a0>
variable[event_times] assign[=] call[call[name[durations]][compare[name[event] not_equal[!=] constant[0]]].copy, parameter[]]
variable[n] assign[=] call[name[event_times].shape][constant[0]]
variable[shift] assign[=] binary_operation[call[name[np].random.uniform, parameter[]] * name[jitter_level]]
<ast.AugAssign object at 0x7da20cabfd00>
variable[durations_jitter] assign[=] call[call[call[name[event_times].align, parameter[name[durations]]]][constant[0]].fillna, parameter[name[durations]]]
if call[name[self]._check_for_duplicates, parameter[]] begin[:]
return[call[name[self]._jitter, parameter[]]]
return[name[durations_jitter]] | keyword[def] identifier[_jitter] ( identifier[self] , identifier[durations] , identifier[event] , identifier[jitter_level] , identifier[seed] = keyword[None] ):
literal[string]
identifier[np] . identifier[random] . identifier[seed] ( identifier[seed] )
keyword[if] identifier[jitter_level] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[event_times] = identifier[durations] [ identifier[event] != literal[int] ]. identifier[copy] ()
identifier[n] = identifier[event_times] . identifier[shape] [ literal[int] ]
identifier[shift] = identifier[np] . identifier[random] . identifier[uniform] ( identifier[low] =- literal[int] , identifier[high] = literal[int] , identifier[size] = identifier[n] )* identifier[jitter_level]
identifier[event_times] += identifier[shift]
identifier[durations_jitter] = identifier[event_times] . identifier[align] ( identifier[durations] )[ literal[int] ]. identifier[fillna] ( identifier[durations] )
keyword[if] identifier[self] . identifier[_check_for_duplicates] ( identifier[durations] = identifier[durations_jitter] , identifier[events] = identifier[event] ):
keyword[return] identifier[self] . identifier[_jitter] ( identifier[durations] = identifier[durations_jitter] , identifier[event] = identifier[event] , identifier[jitter_level] = identifier[jitter_level] , identifier[seed] = identifier[seed] )
keyword[return] identifier[durations_jitter] | def _jitter(self, durations, event, jitter_level, seed=None):
"""Determine extent to jitter tied event times. Automatically called by fit if tied event times are detected
"""
np.random.seed(seed)
if jitter_level <= 0:
raise ValueError('The jitter level is less than zero, please select a jitter value greater than 0') # depends on [control=['if'], data=[]]
event_times = durations[event != 0].copy()
n = event_times.shape[0]
# Determining extent to jitter event times up or down
shift = np.random.uniform(low=-1, high=1, size=n) * jitter_level
event_times += shift
durations_jitter = event_times.align(durations)[0].fillna(durations)
# Recursive call if event times are still tied after jitter
if self._check_for_duplicates(durations=durations_jitter, events=event):
return self._jitter(durations=durations_jitter, event=event, jitter_level=jitter_level, seed=seed) # depends on [control=['if'], data=[]]
return durations_jitter |
def fingerprint_correlation(T, obs1, obs2=None, tau=1, k=None, ncv=None):
r"""Dynamical fingerprint for equilibrium correlation experiment.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
k : int (optional)
Number of time-scales and amplitudes to compute
tau : int (optional)
Lag time of given transition matrix, for correct time-scales
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
Returns
-------
timescales : (N,) ndarray
Time-scales of the transition matrix
amplitudes : (N,) ndarray
Amplitudes for the correlation experiment
See also
--------
correlation, fingerprint_relaxation
References
----------
.. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D
Chodera and J Smith. 2010. Dynamical fingerprints for probing
individual relaxation processes in biomolecular dynamics with
simulations and kinetic experiments. PNAS 108 (12): 4822-4827.
Notes
-----
Fingerprints are a combination of time-scale and amplitude spectrum for
a equilibrium correlation or a non-equilibrium relaxation experiment.
**Auto-correlation**
The auto-correlation of an observable :math:`a(x)` for a system in
equilibrium is
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_x \mu(x) a(x, 0) a(x, t)
:math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can
be propagated forward in time using the t-step transition matrix
:math:`p^{t}(x, y)`.
The propagated observable at time :math:`t` is :math:`a(x,
t)=\sum_y p^t(x, y)a(y, 0)`.
Using the eigenvlaues and eigenvectors of the transition matrix the autocorrelation
can be written as
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_i \lambda_i^t \langle a, r_i\rangle_{\mu} \langle l_i, a \rangle.
The fingerprint amplitudes :math:`\gamma_i` are given by
.. math:: \gamma_i=\langle a, r_i\rangle_{\mu} \langle l_i, a \rangle.
And the fingerprint time scales :math:`t_i` are given by
.. math:: t_i=-\frac{\tau}{\log \lvert \lambda_i \rvert}.
**Cross-correlation**
The cross-correlation of two observables :math:`a(x)`, :math:`b(x)` is similarly given
.. math:: \mathbb{E}_{\mu}[a(x,0)b(x,t)]=\sum_x \mu(x) a(x, 0) b(x, t)
The fingerprint amplitudes :math:`\gamma_i` are similarly given in terms of the eigenvectors
.. math:: \gamma_i=\langle a, r_i\rangle_{\mu} \langle l_i, b \rangle.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import fingerprint_correlation
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> a = np.array([1.0, 0.0, 0.0])
>>> ts, amp = fingerprint_correlation(T, a)
>>> ts
array([ inf, 9.49122158, 0.43429448])
>>> amp
array([ 0.20661157, 0.22727273, 0.02066116])
"""
# check if square matrix and remember size
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
n = T.shape[0]
# will not do fingerprint analysis for nonreversible matrices
if not is_reversible(T):
raise ValueError('Fingerprint calculation is not supported for nonreversible transition matrices. ')
obs1 = _types.ensure_ndarray(obs1, ndim=1, size=n, kind='numeric')
obs1 = _types.ensure_ndarray_or_None(obs1, ndim=1, size=n, kind='numeric')
# go
if _issparse(T):
return sparse.fingerprints.fingerprint_correlation(T, obs1, obs2=obs2, tau=tau, k=k, ncv=ncv)
else:
return dense.fingerprints.fingerprint_correlation(T, obs1, obs2, tau=tau, k=k) | def function[fingerprint_correlation, parameter[T, obs1, obs2, tau, k, ncv]]:
constant[Dynamical fingerprint for equilibrium correlation experiment.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
k : int (optional)
Number of time-scales and amplitudes to compute
tau : int (optional)
Lag time of given transition matrix, for correct time-scales
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
Returns
-------
timescales : (N,) ndarray
Time-scales of the transition matrix
amplitudes : (N,) ndarray
Amplitudes for the correlation experiment
See also
--------
correlation, fingerprint_relaxation
References
----------
.. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D
Chodera and J Smith. 2010. Dynamical fingerprints for probing
individual relaxation processes in biomolecular dynamics with
simulations and kinetic experiments. PNAS 108 (12): 4822-4827.
Notes
-----
Fingerprints are a combination of time-scale and amplitude spectrum for
a equilibrium correlation or a non-equilibrium relaxation experiment.
**Auto-correlation**
The auto-correlation of an observable :math:`a(x)` for a system in
equilibrium is
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_x \mu(x) a(x, 0) a(x, t)
:math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can
be propagated forward in time using the t-step transition matrix
:math:`p^{t}(x, y)`.
The propagated observable at time :math:`t` is :math:`a(x,
t)=\sum_y p^t(x, y)a(y, 0)`.
Using the eigenvlaues and eigenvectors of the transition matrix the autocorrelation
can be written as
.. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_i \lambda_i^t \langle a, r_i\rangle_{\mu} \langle l_i, a \rangle.
The fingerprint amplitudes :math:`\gamma_i` are given by
.. math:: \gamma_i=\langle a, r_i\rangle_{\mu} \langle l_i, a \rangle.
And the fingerprint time scales :math:`t_i` are given by
.. math:: t_i=-\frac{\tau}{\log \lvert \lambda_i \rvert}.
**Cross-correlation**
The cross-correlation of two observables :math:`a(x)`, :math:`b(x)` is similarly given
.. math:: \mathbb{E}_{\mu}[a(x,0)b(x,t)]=\sum_x \mu(x) a(x, 0) b(x, t)
The fingerprint amplitudes :math:`\gamma_i` are similarly given in terms of the eigenvectors
.. math:: \gamma_i=\langle a, r_i\rangle_{\mu} \langle l_i, b \rangle.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import fingerprint_correlation
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> a = np.array([1.0, 0.0, 0.0])
>>> ts, amp = fingerprint_correlation(T, a)
>>> ts
array([ inf, 9.49122158, 0.43429448])
>>> amp
array([ 0.20661157, 0.22727273, 0.02066116])
]
variable[T] assign[=] call[name[_types].ensure_ndarray_or_sparse, parameter[name[T]]]
variable[n] assign[=] call[name[T].shape][constant[0]]
if <ast.UnaryOp object at 0x7da1b26a07c0> begin[:]
<ast.Raise object at 0x7da1b26a3940>
variable[obs1] assign[=] call[name[_types].ensure_ndarray, parameter[name[obs1]]]
variable[obs1] assign[=] call[name[_types].ensure_ndarray_or_None, parameter[name[obs1]]]
if call[name[_issparse], parameter[name[T]]] begin[:]
return[call[name[sparse].fingerprints.fingerprint_correlation, parameter[name[T], name[obs1]]]] | keyword[def] identifier[fingerprint_correlation] ( identifier[T] , identifier[obs1] , identifier[obs2] = keyword[None] , identifier[tau] = literal[int] , identifier[k] = keyword[None] , identifier[ncv] = keyword[None] ):
literal[string]
identifier[T] = identifier[_types] . identifier[ensure_ndarray_or_sparse] ( identifier[T] , identifier[ndim] = literal[int] , identifier[uniform] = keyword[True] , identifier[kind] = literal[string] )
identifier[n] = identifier[T] . identifier[shape] [ literal[int] ]
keyword[if] keyword[not] identifier[is_reversible] ( identifier[T] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[obs1] = identifier[_types] . identifier[ensure_ndarray] ( identifier[obs1] , identifier[ndim] = literal[int] , identifier[size] = identifier[n] , identifier[kind] = literal[string] )
identifier[obs1] = identifier[_types] . identifier[ensure_ndarray_or_None] ( identifier[obs1] , identifier[ndim] = literal[int] , identifier[size] = identifier[n] , identifier[kind] = literal[string] )
keyword[if] identifier[_issparse] ( identifier[T] ):
keyword[return] identifier[sparse] . identifier[fingerprints] . identifier[fingerprint_correlation] ( identifier[T] , identifier[obs1] , identifier[obs2] = identifier[obs2] , identifier[tau] = identifier[tau] , identifier[k] = identifier[k] , identifier[ncv] = identifier[ncv] )
keyword[else] :
keyword[return] identifier[dense] . identifier[fingerprints] . identifier[fingerprint_correlation] ( identifier[T] , identifier[obs1] , identifier[obs2] , identifier[tau] = identifier[tau] , identifier[k] = identifier[k] ) | def fingerprint_correlation(T, obs1, obs2=None, tau=1, k=None, ncv=None):
"""Dynamical fingerprint for equilibrium correlation experiment.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
k : int (optional)
Number of time-scales and amplitudes to compute
tau : int (optional)
Lag time of given transition matrix, for correct time-scales
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
Returns
-------
timescales : (N,) ndarray
Time-scales of the transition matrix
amplitudes : (N,) ndarray
Amplitudes for the correlation experiment
See also
--------
correlation, fingerprint_relaxation
References
----------
.. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D
Chodera and J Smith. 2010. Dynamical fingerprints for probing
individual relaxation processes in biomolecular dynamics with
simulations and kinetic experiments. PNAS 108 (12): 4822-4827.
Notes
-----
Fingerprints are a combination of time-scale and amplitude spectrum for
a equilibrium correlation or a non-equilibrium relaxation experiment.
**Auto-correlation**
The auto-correlation of an observable :math:`a(x)` for a system in
equilibrium is
.. math:: \\mathbb{E}_{\\mu}[a(x,0)a(x,t)]=\\sum_x \\mu(x) a(x, 0) a(x, t)
:math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can
be propagated forward in time using the t-step transition matrix
:math:`p^{t}(x, y)`.
The propagated observable at time :math:`t` is :math:`a(x,
t)=\\sum_y p^t(x, y)a(y, 0)`.
Using the eigenvlaues and eigenvectors of the transition matrix the autocorrelation
can be written as
.. math:: \\mathbb{E}_{\\mu}[a(x,0)a(x,t)]=\\sum_i \\lambda_i^t \\langle a, r_i\\rangle_{\\mu} \\langle l_i, a \\rangle.
The fingerprint amplitudes :math:`\\gamma_i` are given by
.. math:: \\gamma_i=\\langle a, r_i\\rangle_{\\mu} \\langle l_i, a \\rangle.
And the fingerprint time scales :math:`t_i` are given by
.. math:: t_i=-\\frac{\\tau}{\\log \\lvert \\lambda_i \\rvert}.
**Cross-correlation**
The cross-correlation of two observables :math:`a(x)`, :math:`b(x)` is similarly given
.. math:: \\mathbb{E}_{\\mu}[a(x,0)b(x,t)]=\\sum_x \\mu(x) a(x, 0) b(x, t)
The fingerprint amplitudes :math:`\\gamma_i` are similarly given in terms of the eigenvectors
.. math:: \\gamma_i=\\langle a, r_i\\rangle_{\\mu} \\langle l_i, b \\rangle.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import fingerprint_correlation
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> a = np.array([1.0, 0.0, 0.0])
>>> ts, amp = fingerprint_correlation(T, a)
>>> ts
array([ inf, 9.49122158, 0.43429448])
>>> amp
array([ 0.20661157, 0.22727273, 0.02066116])
"""
# check if square matrix and remember size
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
n = T.shape[0]
# will not do fingerprint analysis for nonreversible matrices
if not is_reversible(T):
raise ValueError('Fingerprint calculation is not supported for nonreversible transition matrices. ') # depends on [control=['if'], data=[]]
obs1 = _types.ensure_ndarray(obs1, ndim=1, size=n, kind='numeric')
obs1 = _types.ensure_ndarray_or_None(obs1, ndim=1, size=n, kind='numeric')
# go
if _issparse(T):
return sparse.fingerprints.fingerprint_correlation(T, obs1, obs2=obs2, tau=tau, k=k, ncv=ncv) # depends on [control=['if'], data=[]]
else:
return dense.fingerprints.fingerprint_correlation(T, obs1, obs2, tau=tau, k=k) |
def pop(self, key, default=UndefinedKey):
"""Remove specified key and return the corresponding value.
If key is not found, default is returned if given, otherwise ConfigMissingException is raised
This method assumes the user wants to remove the last value in the chain so it parses via parse_key
and pops the last value out of the dict.
:param key: key to use (dot separated). E.g., a.b.c
:type key: basestring
:param default: default value if key not found
:type default: object
:param default: default value if key not found
:return: value in the tree located at key
"""
if default != UndefinedKey and key not in self:
return default
value = self.get(key, UndefinedKey)
lst = ConfigTree.parse_key(key)
parent = self.KEY_SEP.join(lst[0:-1])
child = lst[-1]
if parent:
self.get(parent).__delitem__(child)
else:
self.__delitem__(child)
return value | def function[pop, parameter[self, key, default]]:
constant[Remove specified key and return the corresponding value.
If key is not found, default is returned if given, otherwise ConfigMissingException is raised
This method assumes the user wants to remove the last value in the chain so it parses via parse_key
and pops the last value out of the dict.
:param key: key to use (dot separated). E.g., a.b.c
:type key: basestring
:param default: default value if key not found
:type default: object
:param default: default value if key not found
:return: value in the tree located at key
]
if <ast.BoolOp object at 0x7da1b08a3340> begin[:]
return[name[default]]
variable[value] assign[=] call[name[self].get, parameter[name[key], name[UndefinedKey]]]
variable[lst] assign[=] call[name[ConfigTree].parse_key, parameter[name[key]]]
variable[parent] assign[=] call[name[self].KEY_SEP.join, parameter[call[name[lst]][<ast.Slice object at 0x7da1b08a0970>]]]
variable[child] assign[=] call[name[lst]][<ast.UnaryOp object at 0x7da1b08a3bb0>]
if name[parent] begin[:]
call[call[name[self].get, parameter[name[parent]]].__delitem__, parameter[name[child]]]
return[name[value]] | keyword[def] identifier[pop] ( identifier[self] , identifier[key] , identifier[default] = identifier[UndefinedKey] ):
literal[string]
keyword[if] identifier[default] != identifier[UndefinedKey] keyword[and] identifier[key] keyword[not] keyword[in] identifier[self] :
keyword[return] identifier[default]
identifier[value] = identifier[self] . identifier[get] ( identifier[key] , identifier[UndefinedKey] )
identifier[lst] = identifier[ConfigTree] . identifier[parse_key] ( identifier[key] )
identifier[parent] = identifier[self] . identifier[KEY_SEP] . identifier[join] ( identifier[lst] [ literal[int] :- literal[int] ])
identifier[child] = identifier[lst] [- literal[int] ]
keyword[if] identifier[parent] :
identifier[self] . identifier[get] ( identifier[parent] ). identifier[__delitem__] ( identifier[child] )
keyword[else] :
identifier[self] . identifier[__delitem__] ( identifier[child] )
keyword[return] identifier[value] | def pop(self, key, default=UndefinedKey):
"""Remove specified key and return the corresponding value.
If key is not found, default is returned if given, otherwise ConfigMissingException is raised
This method assumes the user wants to remove the last value in the chain so it parses via parse_key
and pops the last value out of the dict.
:param key: key to use (dot separated). E.g., a.b.c
:type key: basestring
:param default: default value if key not found
:type default: object
:param default: default value if key not found
:return: value in the tree located at key
"""
if default != UndefinedKey and key not in self:
return default # depends on [control=['if'], data=[]]
value = self.get(key, UndefinedKey)
lst = ConfigTree.parse_key(key)
parent = self.KEY_SEP.join(lst[0:-1])
child = lst[-1]
if parent:
self.get(parent).__delitem__(child) # depends on [control=['if'], data=[]]
else:
self.__delitem__(child)
return value |
def validate_ports_string(ports):
""" Validate that provided string has proper port numbers:
1. port number < 65535
2. range start < range end
"""
pattern = re.compile('^\\d+(-\\d+)?(,\\d+(-\\d+)?)*$')
if pattern.match(ports) is None:
return False
ranges = PortsRangeHelper._get_string_port_ranges(ports)
for r in ranges:
if r.start > r.end or r.start > 65535 or r.end > 65535:
return False
return True | def function[validate_ports_string, parameter[ports]]:
constant[ Validate that provided string has proper port numbers:
1. port number < 65535
2. range start < range end
]
variable[pattern] assign[=] call[name[re].compile, parameter[constant[^\d+(-\d+)?(,\d+(-\d+)?)*$]]]
if compare[call[name[pattern].match, parameter[name[ports]]] is constant[None]] begin[:]
return[constant[False]]
variable[ranges] assign[=] call[name[PortsRangeHelper]._get_string_port_ranges, parameter[name[ports]]]
for taget[name[r]] in starred[name[ranges]] begin[:]
if <ast.BoolOp object at 0x7da1b1f24b20> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[validate_ports_string] ( identifier[ports] ):
literal[string]
identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] )
keyword[if] identifier[pattern] . identifier[match] ( identifier[ports] ) keyword[is] keyword[None] :
keyword[return] keyword[False]
identifier[ranges] = identifier[PortsRangeHelper] . identifier[_get_string_port_ranges] ( identifier[ports] )
keyword[for] identifier[r] keyword[in] identifier[ranges] :
keyword[if] identifier[r] . identifier[start] > identifier[r] . identifier[end] keyword[or] identifier[r] . identifier[start] > literal[int] keyword[or] identifier[r] . identifier[end] > literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def validate_ports_string(ports):
""" Validate that provided string has proper port numbers:
1. port number < 65535
2. range start < range end
"""
pattern = re.compile('^\\d+(-\\d+)?(,\\d+(-\\d+)?)*$')
if pattern.match(ports) is None:
return False # depends on [control=['if'], data=[]]
ranges = PortsRangeHelper._get_string_port_ranges(ports)
for r in ranges:
if r.start > r.end or r.start > 65535 or r.end > 65535:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['r']]
return True |
def _set_dst_port_any(self, v, load=False):
"""
Setter method for dst_port_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/dst_port_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_port_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_port_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="dst-port-any", rest_name="dst-port-any", parent=self, choice=(u'choice-dst-port', u'case-dst-port-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst-port-any', u'display-when': u'(../src-ip-host) or (../src-ip) or (../src-ip-any)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dst_port_any must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="dst-port-any", rest_name="dst-port-any", parent=self, choice=(u'choice-dst-port', u'case-dst-port-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst-port-any', u'display-when': u'(../src-ip-host) or (../src-ip) or (../src-ip-any)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)""",
})
self.__dst_port_any = t
if hasattr(self, '_set'):
self._set() | def function[_set_dst_port_any, parameter[self, v, load]]:
constant[
Setter method for dst_port_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/dst_port_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_port_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_port_any() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da20c6c55d0>
name[self].__dst_port_any assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_dst_port_any] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[YANGBool] , identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[choice] =( literal[string] , literal[string] ), identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__dst_port_any] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_dst_port_any(self, v, load=False):
"""
Setter method for dst_port_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/dst_port_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_port_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_port_any() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=YANGBool, is_leaf=True, yang_name='dst-port-any', rest_name='dst-port-any', parent=self, choice=(u'choice-dst-port', u'case-dst-port-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst-port-any', u'display-when': u'(../src-ip-host) or (../src-ip) or (../src-ip-any)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'dst_port_any must be of a type compatible with empty', 'defined-type': 'empty', 'generated-type': 'YANGDynClass(base=YANGBool, is_leaf=True, yang_name="dst-port-any", rest_name="dst-port-any", parent=self, choice=(u\'choice-dst-port\', u\'case-dst-port-any\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'dst-port-any\', u\'display-when\': u\'(../src-ip-host) or (../src-ip) or (../src-ip-any)\', u\'cli-incomplete-command\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-vxlan-visibility\', defining_module=\'brocade-vxlan-visibility\', yang_type=\'empty\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__dst_port_any = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def write_virtual_meta(self):
"""Writes virtual columns, variables and their ucd,description and units.
The default implementation is to write this to a file called virtual_meta.yaml in the directory defined by
:func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself.
This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_virtual_meta`
is called, so that the information is not lost between sessions.
Note: opening a DataFrame twice may result in corruption of this file.
"""
path = os.path.join(self.get_private_dir(create=True), "virtual_meta.yaml")
virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys())
units = {key: str(value) for key, value in self.units.items() if key in virtual_names}
ucds = {key: value for key, value in self.ucds.items() if key in virtual_names}
descriptions = {key: value for key, value in self.descriptions.items() if key in virtual_names}
meta_info = dict(virtual_columns=self.virtual_columns,
variables=self.variables,
ucds=ucds, units=units, descriptions=descriptions)
vaex.utils.write_json_or_yaml(path, meta_info) | def function[write_virtual_meta, parameter[self]]:
constant[Writes virtual columns, variables and their ucd,description and units.
The default implementation is to write this to a file called virtual_meta.yaml in the directory defined by
:func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself.
This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_virtual_meta`
is called, so that the information is not lost between sessions.
Note: opening a DataFrame twice may result in corruption of this file.
]
variable[path] assign[=] call[name[os].path.join, parameter[call[name[self].get_private_dir, parameter[]], constant[virtual_meta.yaml]]]
variable[virtual_names] assign[=] binary_operation[call[name[list], parameter[call[name[self].virtual_columns.keys, parameter[]]]] + call[name[list], parameter[call[name[self].variables.keys, parameter[]]]]]
variable[units] assign[=] <ast.DictComp object at 0x7da18bc71300>
variable[ucds] assign[=] <ast.DictComp object at 0x7da18bc70dc0>
variable[descriptions] assign[=] <ast.DictComp object at 0x7da18bc70310>
variable[meta_info] assign[=] call[name[dict], parameter[]]
call[name[vaex].utils.write_json_or_yaml, parameter[name[path], name[meta_info]]] | keyword[def] identifier[write_virtual_meta] ( identifier[self] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[get_private_dir] ( identifier[create] = keyword[True] ), literal[string] )
identifier[virtual_names] = identifier[list] ( identifier[self] . identifier[virtual_columns] . identifier[keys] ())+ identifier[list] ( identifier[self] . identifier[variables] . identifier[keys] ())
identifier[units] ={ identifier[key] : identifier[str] ( identifier[value] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[units] . identifier[items] () keyword[if] identifier[key] keyword[in] identifier[virtual_names] }
identifier[ucds] ={ identifier[key] : identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[ucds] . identifier[items] () keyword[if] identifier[key] keyword[in] identifier[virtual_names] }
identifier[descriptions] ={ identifier[key] : identifier[value] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[descriptions] . identifier[items] () keyword[if] identifier[key] keyword[in] identifier[virtual_names] }
identifier[meta_info] = identifier[dict] ( identifier[virtual_columns] = identifier[self] . identifier[virtual_columns] ,
identifier[variables] = identifier[self] . identifier[variables] ,
identifier[ucds] = identifier[ucds] , identifier[units] = identifier[units] , identifier[descriptions] = identifier[descriptions] )
identifier[vaex] . identifier[utils] . identifier[write_json_or_yaml] ( identifier[path] , identifier[meta_info] ) | def write_virtual_meta(self):
"""Writes virtual columns, variables and their ucd,description and units.
The default implementation is to write this to a file called virtual_meta.yaml in the directory defined by
:func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself.
This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_virtual_meta`
is called, so that the information is not lost between sessions.
Note: opening a DataFrame twice may result in corruption of this file.
"""
path = os.path.join(self.get_private_dir(create=True), 'virtual_meta.yaml')
virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys())
units = {key: str(value) for (key, value) in self.units.items() if key in virtual_names}
ucds = {key: value for (key, value) in self.ucds.items() if key in virtual_names}
descriptions = {key: value for (key, value) in self.descriptions.items() if key in virtual_names}
meta_info = dict(virtual_columns=self.virtual_columns, variables=self.variables, ucds=ucds, units=units, descriptions=descriptions)
vaex.utils.write_json_or_yaml(path, meta_info) |
def is_equal(self, other):
""" Two DictCells are equal when they share ALL Keys, and all of their
is_equal() methods return True. This ensures substructure equality.
"""
if not isinstance(other, DictCell):
return False
for (this, that) in itertools.izip_longest(self, other):
if this[0] != that[0]:
# compare key names
return False
if not this[1].is_equal(that[1]):
# compare cells
return False
return True | def function[is_equal, parameter[self, other]]:
constant[ Two DictCells are equal when they share ALL Keys, and all of their
is_equal() methods return True. This ensures substructure equality.
]
if <ast.UnaryOp object at 0x7da20c76e680> begin[:]
return[constant[False]]
for taget[tuple[[<ast.Name object at 0x7da1b23463e0>, <ast.Name object at 0x7da1b2345d80>]]] in starred[call[name[itertools].izip_longest, parameter[name[self], name[other]]]] begin[:]
if compare[call[name[this]][constant[0]] not_equal[!=] call[name[that]][constant[0]]] begin[:]
return[constant[False]]
if <ast.UnaryOp object at 0x7da1b2345f00> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_equal] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[DictCell] ):
keyword[return] keyword[False]
keyword[for] ( identifier[this] , identifier[that] ) keyword[in] identifier[itertools] . identifier[izip_longest] ( identifier[self] , identifier[other] ):
keyword[if] identifier[this] [ literal[int] ]!= identifier[that] [ literal[int] ]:
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[this] [ literal[int] ]. identifier[is_equal] ( identifier[that] [ literal[int] ]):
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_equal(self, other):
""" Two DictCells are equal when they share ALL Keys, and all of their
is_equal() methods return True. This ensures substructure equality.
"""
if not isinstance(other, DictCell):
return False # depends on [control=['if'], data=[]]
for (this, that) in itertools.izip_longest(self, other):
if this[0] != that[0]:
# compare key names
return False # depends on [control=['if'], data=[]]
if not this[1].is_equal(that[1]):
# compare cells
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return True |
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
"""
if self.remove_wfkq:
for task in self.wfkq_tasks:
if task.status != task.S_OK: continue
children = self.wfkq_task_children[task]
if all(child.status == child.S_OK for child in children):
path = task.outdir.has_abiext("WFQ")
if path:
self.history.info("Removing WFQ: %s" % path)
os.remove(path)
return super().on_ok(sender) | def function[on_ok, parameter[self, sender]]:
constant[
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
]
if name[self].remove_wfkq begin[:]
for taget[name[task]] in starred[name[self].wfkq_tasks] begin[:]
if compare[name[task].status not_equal[!=] name[task].S_OK] begin[:]
continue
variable[children] assign[=] call[name[self].wfkq_task_children][name[task]]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da18dc04970>]] begin[:]
variable[path] assign[=] call[name[task].outdir.has_abiext, parameter[constant[WFQ]]]
if name[path] begin[:]
call[name[self].history.info, parameter[binary_operation[constant[Removing WFQ: %s] <ast.Mod object at 0x7da2590d6920> name[path]]]]
call[name[os].remove, parameter[name[path]]]
return[call[call[name[super], parameter[]].on_ok, parameter[name[sender]]]] | keyword[def] identifier[on_ok] ( identifier[self] , identifier[sender] ):
literal[string]
keyword[if] identifier[self] . identifier[remove_wfkq] :
keyword[for] identifier[task] keyword[in] identifier[self] . identifier[wfkq_tasks] :
keyword[if] identifier[task] . identifier[status] != identifier[task] . identifier[S_OK] : keyword[continue]
identifier[children] = identifier[self] . identifier[wfkq_task_children] [ identifier[task] ]
keyword[if] identifier[all] ( identifier[child] . identifier[status] == identifier[child] . identifier[S_OK] keyword[for] identifier[child] keyword[in] identifier[children] ):
identifier[path] = identifier[task] . identifier[outdir] . identifier[has_abiext] ( literal[string] )
keyword[if] identifier[path] :
identifier[self] . identifier[history] . identifier[info] ( literal[string] % identifier[path] )
identifier[os] . identifier[remove] ( identifier[path] )
keyword[return] identifier[super] (). identifier[on_ok] ( identifier[sender] ) | def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
"""
if self.remove_wfkq:
for task in self.wfkq_tasks:
if task.status != task.S_OK:
continue # depends on [control=['if'], data=[]]
children = self.wfkq_task_children[task]
if all((child.status == child.S_OK for child in children)):
path = task.outdir.has_abiext('WFQ')
if path:
self.history.info('Removing WFQ: %s' % path)
os.remove(path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['task']] # depends on [control=['if'], data=[]]
return super().on_ok(sender) |
def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurary 2011 to implement subrectangles.
"""
# Default use full image and place at upper left
if xy is None:
xy = (0, 0)
# Image separator,
bb = b'\x2C'
# Image position and size
bb += intToBin(xy[0]) # Left position
bb += intToBin(xy[1]) # Top position
bb += intToBin(im.size[0]) # image width
bb += intToBin(im.size[1]) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += b'\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb | def function[getImageDescriptor, parameter[self, im, xy]]:
constant[ getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurary 2011 to implement subrectangles.
]
if compare[name[xy] is constant[None]] begin[:]
variable[xy] assign[=] tuple[[<ast.Constant object at 0x7da18f00f9d0>, <ast.Constant object at 0x7da18f00ece0>]]
variable[bb] assign[=] constant[b',']
<ast.AugAssign object at 0x7da18f00d240>
<ast.AugAssign object at 0x7da18f00c190>
<ast.AugAssign object at 0x7da18f00f4f0>
<ast.AugAssign object at 0x7da18f00da50>
<ast.AugAssign object at 0x7da1b15eabc0>
return[name[bb]] | keyword[def] identifier[getImageDescriptor] ( identifier[self] , identifier[im] , identifier[xy] = keyword[None] ):
literal[string]
keyword[if] identifier[xy] keyword[is] keyword[None] :
identifier[xy] =( literal[int] , literal[int] )
identifier[bb] = literal[string]
identifier[bb] += identifier[intToBin] ( identifier[xy] [ literal[int] ])
identifier[bb] += identifier[intToBin] ( identifier[xy] [ literal[int] ])
identifier[bb] += identifier[intToBin] ( identifier[im] . identifier[size] [ literal[int] ])
identifier[bb] += identifier[intToBin] ( identifier[im] . identifier[size] [ literal[int] ])
identifier[bb] += literal[string]
keyword[return] identifier[bb] | def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurary 2011 to implement subrectangles.
"""
# Default use full image and place at upper left
if xy is None:
xy = (0, 0) # depends on [control=['if'], data=['xy']]
# Image separator,
bb = b','
# Image position and size
bb += intToBin(xy[0]) # Left position
bb += intToBin(xy[1]) # Top position
bb += intToBin(im.size[0]) # image width
bb += intToBin(im.size[1]) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += b'\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb |
def xml_for(cls, parts):
"""
Return content types XML mapping each part in *parts* to the
appropriate content type and suitable for storage as
``[Content_Types].xml`` in an OPC package.
"""
cti = cls()
cti._defaults['rels'] = CT.OPC_RELATIONSHIPS
cti._defaults['xml'] = CT.XML
for part in parts:
cti._add_content_type(part.partname, part.content_type)
return cti._xml() | def function[xml_for, parameter[cls, parts]]:
constant[
Return content types XML mapping each part in *parts* to the
appropriate content type and suitable for storage as
``[Content_Types].xml`` in an OPC package.
]
variable[cti] assign[=] call[name[cls], parameter[]]
call[name[cti]._defaults][constant[rels]] assign[=] name[CT].OPC_RELATIONSHIPS
call[name[cti]._defaults][constant[xml]] assign[=] name[CT].XML
for taget[name[part]] in starred[name[parts]] begin[:]
call[name[cti]._add_content_type, parameter[name[part].partname, name[part].content_type]]
return[call[name[cti]._xml, parameter[]]] | keyword[def] identifier[xml_for] ( identifier[cls] , identifier[parts] ):
literal[string]
identifier[cti] = identifier[cls] ()
identifier[cti] . identifier[_defaults] [ literal[string] ]= identifier[CT] . identifier[OPC_RELATIONSHIPS]
identifier[cti] . identifier[_defaults] [ literal[string] ]= identifier[CT] . identifier[XML]
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[cti] . identifier[_add_content_type] ( identifier[part] . identifier[partname] , identifier[part] . identifier[content_type] )
keyword[return] identifier[cti] . identifier[_xml] () | def xml_for(cls, parts):
"""
Return content types XML mapping each part in *parts* to the
appropriate content type and suitable for storage as
``[Content_Types].xml`` in an OPC package.
"""
cti = cls()
cti._defaults['rels'] = CT.OPC_RELATIONSHIPS
cti._defaults['xml'] = CT.XML
for part in parts:
cti._add_content_type(part.partname, part.content_type) # depends on [control=['for'], data=['part']]
return cti._xml() |
def query_cat_random(catid, **kwargs):
'''
Get random lists of certain category.
'''
num = kwargs.get('limit', 8)
if catid == '':
rand_recs = TabPost.select().order_by(peewee.fn.Random()).limit(num)
else:
rand_recs = TabPost.select().join(
TabPost2Tag,
on=(TabPost.uid == TabPost2Tag.post_id)
).where(
(TabPost.valid == 1) & (TabPost2Tag.tag_id == catid)
).order_by(
peewee.fn.Random()
).limit(num)
return rand_recs | def function[query_cat_random, parameter[catid]]:
constant[
Get random lists of certain category.
]
variable[num] assign[=] call[name[kwargs].get, parameter[constant[limit], constant[8]]]
if compare[name[catid] equal[==] constant[]] begin[:]
variable[rand_recs] assign[=] call[call[call[name[TabPost].select, parameter[]].order_by, parameter[call[name[peewee].fn.Random, parameter[]]]].limit, parameter[name[num]]]
return[name[rand_recs]] | keyword[def] identifier[query_cat_random] ( identifier[catid] ,** identifier[kwargs] ):
literal[string]
identifier[num] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
keyword[if] identifier[catid] == literal[string] :
identifier[rand_recs] = identifier[TabPost] . identifier[select] (). identifier[order_by] ( identifier[peewee] . identifier[fn] . identifier[Random] ()). identifier[limit] ( identifier[num] )
keyword[else] :
identifier[rand_recs] = identifier[TabPost] . identifier[select] (). identifier[join] (
identifier[TabPost2Tag] ,
identifier[on] =( identifier[TabPost] . identifier[uid] == identifier[TabPost2Tag] . identifier[post_id] )
). identifier[where] (
( identifier[TabPost] . identifier[valid] == literal[int] )&( identifier[TabPost2Tag] . identifier[tag_id] == identifier[catid] )
). identifier[order_by] (
identifier[peewee] . identifier[fn] . identifier[Random] ()
). identifier[limit] ( identifier[num] )
keyword[return] identifier[rand_recs] | def query_cat_random(catid, **kwargs):
"""
Get random lists of certain category.
"""
num = kwargs.get('limit', 8)
if catid == '':
rand_recs = TabPost.select().order_by(peewee.fn.Random()).limit(num) # depends on [control=['if'], data=[]]
else:
rand_recs = TabPost.select().join(TabPost2Tag, on=TabPost.uid == TabPost2Tag.post_id).where((TabPost.valid == 1) & (TabPost2Tag.tag_id == catid)).order_by(peewee.fn.Random()).limit(num)
return rand_recs |
def metrics(self):
"""
:return: Description of metrics extracted by this class
"""
return dict(
(name, getattr(Metrics, config['type']))
for name, config in six.iteritems(self._metrics)
) | def function[metrics, parameter[self]]:
constant[
:return: Description of metrics extracted by this class
]
return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18f09c310>]]] | keyword[def] identifier[metrics] ( identifier[self] ):
literal[string]
keyword[return] identifier[dict] (
( identifier[name] , identifier[getattr] ( identifier[Metrics] , identifier[config] [ literal[string] ]))
keyword[for] identifier[name] , identifier[config] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[_metrics] )
) | def metrics(self):
"""
:return: Description of metrics extracted by this class
"""
return dict(((name, getattr(Metrics, config['type'])) for (name, config) in six.iteritems(self._metrics))) |
def _get_existing_logical_drives(raid_adapter):
"""Collect existing logical drives on the server.
:param raid_adapter: raid adapter info
:returns: existing_logical_drives: get logical drive on server
"""
existing_logical_drives = []
logical_drives = raid_adapter['Server']['HWConfigurationIrmc'][
'Adapters']['RAIDAdapter'][0].get('LogicalDrives')
if logical_drives is not None:
for drive in logical_drives['LogicalDrive']:
existing_logical_drives.append(drive['@Number'])
return existing_logical_drives | def function[_get_existing_logical_drives, parameter[raid_adapter]]:
constant[Collect existing logical drives on the server.
:param raid_adapter: raid adapter info
:returns: existing_logical_drives: get logical drive on server
]
variable[existing_logical_drives] assign[=] list[[]]
variable[logical_drives] assign[=] call[call[call[call[call[call[name[raid_adapter]][constant[Server]]][constant[HWConfigurationIrmc]]][constant[Adapters]]][constant[RAIDAdapter]]][constant[0]].get, parameter[constant[LogicalDrives]]]
if compare[name[logical_drives] is_not constant[None]] begin[:]
for taget[name[drive]] in starred[call[name[logical_drives]][constant[LogicalDrive]]] begin[:]
call[name[existing_logical_drives].append, parameter[call[name[drive]][constant[@Number]]]]
return[name[existing_logical_drives]] | keyword[def] identifier[_get_existing_logical_drives] ( identifier[raid_adapter] ):
literal[string]
identifier[existing_logical_drives] =[]
identifier[logical_drives] = identifier[raid_adapter] [ literal[string] ][ literal[string] ][
literal[string] ][ literal[string] ][ literal[int] ]. identifier[get] ( literal[string] )
keyword[if] identifier[logical_drives] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[drive] keyword[in] identifier[logical_drives] [ literal[string] ]:
identifier[existing_logical_drives] . identifier[append] ( identifier[drive] [ literal[string] ])
keyword[return] identifier[existing_logical_drives] | def _get_existing_logical_drives(raid_adapter):
"""Collect existing logical drives on the server.
:param raid_adapter: raid adapter info
:returns: existing_logical_drives: get logical drive on server
"""
existing_logical_drives = []
logical_drives = raid_adapter['Server']['HWConfigurationIrmc']['Adapters']['RAIDAdapter'][0].get('LogicalDrives')
if logical_drives is not None:
for drive in logical_drives['LogicalDrive']:
existing_logical_drives.append(drive['@Number']) # depends on [control=['for'], data=['drive']] # depends on [control=['if'], data=['logical_drives']]
return existing_logical_drives |
def locales(self, query=None):
"""Fetches all Locales from the Environment (up to the set limit, can be modified in `query`).
# TODO: fix url
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/assets/assets-collection/get-all-assets-of-a-space
:param query: (optional) Dict with API options.
:return: List of :class:`Locale <contentful.locale.Locale>` objects.
:rtype: List of contentful.locale.Locale
Usage:
>>> locales = client.locales()
[<Locale[English (United States)] code='en-US' default=True fallback_code=None optional=False>]
"""
if query is None:
query = {}
return self._get(
self.environment_url('/locales'),
query
) | def function[locales, parameter[self, query]]:
constant[Fetches all Locales from the Environment (up to the set limit, can be modified in `query`).
# TODO: fix url
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/assets/assets-collection/get-all-assets-of-a-space
:param query: (optional) Dict with API options.
:return: List of :class:`Locale <contentful.locale.Locale>` objects.
:rtype: List of contentful.locale.Locale
Usage:
>>> locales = client.locales()
[<Locale[English (United States)] code='en-US' default=True fallback_code=None optional=False>]
]
if compare[name[query] is constant[None]] begin[:]
variable[query] assign[=] dictionary[[], []]
return[call[name[self]._get, parameter[call[name[self].environment_url, parameter[constant[/locales]]], name[query]]]] | keyword[def] identifier[locales] ( identifier[self] , identifier[query] = keyword[None] ):
literal[string]
keyword[if] identifier[query] keyword[is] keyword[None] :
identifier[query] ={}
keyword[return] identifier[self] . identifier[_get] (
identifier[self] . identifier[environment_url] ( literal[string] ),
identifier[query]
) | def locales(self, query=None):
"""Fetches all Locales from the Environment (up to the set limit, can be modified in `query`).
# TODO: fix url
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/assets/assets-collection/get-all-assets-of-a-space
:param query: (optional) Dict with API options.
:return: List of :class:`Locale <contentful.locale.Locale>` objects.
:rtype: List of contentful.locale.Locale
Usage:
>>> locales = client.locales()
[<Locale[English (United States)] code='en-US' default=True fallback_code=None optional=False>]
"""
if query is None:
query = {} # depends on [control=['if'], data=['query']]
return self._get(self.environment_url('/locales'), query) |
def register_responses_handlers(self):
"""Register handlers for responses from the server.
A handler must accept only one parameter: `payload`.
"""
self.handlers["SymbolInfo"] = self.handle_symbol_info
self.handlers["IndexerReadyEvent"] = self.handle_indexer_ready
self.handlers["AnalyzerReadyEvent"] = self.handle_analyzer_ready
self.handlers["NewScalaNotesEvent"] = self.buffer_typechecks
self.handlers["NewJavaNotesEvent"] = self.buffer_typechecks_and_display
self.handlers["BasicTypeInfo"] = self.show_type
self.handlers["ArrowTypeInfo"] = self.show_type
self.handlers["FullTypeCheckCompleteEvent"] = self.handle_typecheck_complete
self.handlers["StringResponse"] = self.handle_string_response
self.handlers["CompletionInfoList"] = self.handle_completion_info_list
self.handlers["TypeInspectInfo"] = self.handle_type_inspect
self.handlers["SymbolSearchResults"] = self.handle_symbol_search
self.handlers["SourcePositions"] = self.handle_source_positions
self.handlers["DebugOutputEvent"] = self.handle_debug_output
self.handlers["DebugBreakEvent"] = self.handle_debug_break
self.handlers["DebugBacktrace"] = self.handle_debug_backtrace
self.handlers["DebugVmError"] = self.handle_debug_vm_error
self.handlers["RefactorDiffEffect"] = self.apply_refactor
self.handlers["ImportSuggestions"] = self.handle_import_suggestions
self.handlers["PackageInfo"] = self.handle_package_info
self.handlers["FalseResponse"] = self.handle_false_response | def function[register_responses_handlers, parameter[self]]:
constant[Register handlers for responses from the server.
A handler must accept only one parameter: `payload`.
]
call[name[self].handlers][constant[SymbolInfo]] assign[=] name[self].handle_symbol_info
call[name[self].handlers][constant[IndexerReadyEvent]] assign[=] name[self].handle_indexer_ready
call[name[self].handlers][constant[AnalyzerReadyEvent]] assign[=] name[self].handle_analyzer_ready
call[name[self].handlers][constant[NewScalaNotesEvent]] assign[=] name[self].buffer_typechecks
call[name[self].handlers][constant[NewJavaNotesEvent]] assign[=] name[self].buffer_typechecks_and_display
call[name[self].handlers][constant[BasicTypeInfo]] assign[=] name[self].show_type
call[name[self].handlers][constant[ArrowTypeInfo]] assign[=] name[self].show_type
call[name[self].handlers][constant[FullTypeCheckCompleteEvent]] assign[=] name[self].handle_typecheck_complete
call[name[self].handlers][constant[StringResponse]] assign[=] name[self].handle_string_response
call[name[self].handlers][constant[CompletionInfoList]] assign[=] name[self].handle_completion_info_list
call[name[self].handlers][constant[TypeInspectInfo]] assign[=] name[self].handle_type_inspect
call[name[self].handlers][constant[SymbolSearchResults]] assign[=] name[self].handle_symbol_search
call[name[self].handlers][constant[SourcePositions]] assign[=] name[self].handle_source_positions
call[name[self].handlers][constant[DebugOutputEvent]] assign[=] name[self].handle_debug_output
call[name[self].handlers][constant[DebugBreakEvent]] assign[=] name[self].handle_debug_break
call[name[self].handlers][constant[DebugBacktrace]] assign[=] name[self].handle_debug_backtrace
call[name[self].handlers][constant[DebugVmError]] assign[=] name[self].handle_debug_vm_error
call[name[self].handlers][constant[RefactorDiffEffect]] assign[=] name[self].apply_refactor
call[name[self].handlers][constant[ImportSuggestions]] assign[=] name[self].handle_import_suggestions
call[name[self].handlers][constant[PackageInfo]] assign[=] name[self].handle_package_info
call[name[self].handlers][constant[FalseResponse]] assign[=] name[self].handle_false_response | keyword[def] identifier[register_responses_handlers] ( identifier[self] ):
literal[string]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_symbol_info]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_indexer_ready]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_analyzer_ready]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[buffer_typechecks]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[buffer_typechecks_and_display]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[show_type]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[show_type]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_typecheck_complete]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_string_response]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_completion_info_list]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_type_inspect]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_symbol_search]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_source_positions]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_debug_output]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_debug_break]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_debug_backtrace]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_debug_vm_error]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[apply_refactor]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_import_suggestions]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_package_info]
identifier[self] . identifier[handlers] [ literal[string] ]= identifier[self] . identifier[handle_false_response] | def register_responses_handlers(self):
"""Register handlers for responses from the server.
A handler must accept only one parameter: `payload`.
"""
self.handlers['SymbolInfo'] = self.handle_symbol_info
self.handlers['IndexerReadyEvent'] = self.handle_indexer_ready
self.handlers['AnalyzerReadyEvent'] = self.handle_analyzer_ready
self.handlers['NewScalaNotesEvent'] = self.buffer_typechecks
self.handlers['NewJavaNotesEvent'] = self.buffer_typechecks_and_display
self.handlers['BasicTypeInfo'] = self.show_type
self.handlers['ArrowTypeInfo'] = self.show_type
self.handlers['FullTypeCheckCompleteEvent'] = self.handle_typecheck_complete
self.handlers['StringResponse'] = self.handle_string_response
self.handlers['CompletionInfoList'] = self.handle_completion_info_list
self.handlers['TypeInspectInfo'] = self.handle_type_inspect
self.handlers['SymbolSearchResults'] = self.handle_symbol_search
self.handlers['SourcePositions'] = self.handle_source_positions
self.handlers['DebugOutputEvent'] = self.handle_debug_output
self.handlers['DebugBreakEvent'] = self.handle_debug_break
self.handlers['DebugBacktrace'] = self.handle_debug_backtrace
self.handlers['DebugVmError'] = self.handle_debug_vm_error
self.handlers['RefactorDiffEffect'] = self.apply_refactor
self.handlers['ImportSuggestions'] = self.handle_import_suggestions
self.handlers['PackageInfo'] = self.handle_package_info
self.handlers['FalseResponse'] = self.handle_false_response |
def solve(self, angles0, target):
"""Calculate joint angles and returns it."""
return self.optimizer.optimize(np.array(angles0), target) | def function[solve, parameter[self, angles0, target]]:
constant[Calculate joint angles and returns it.]
return[call[name[self].optimizer.optimize, parameter[call[name[np].array, parameter[name[angles0]]], name[target]]]] | keyword[def] identifier[solve] ( identifier[self] , identifier[angles0] , identifier[target] ):
literal[string]
keyword[return] identifier[self] . identifier[optimizer] . identifier[optimize] ( identifier[np] . identifier[array] ( identifier[angles0] ), identifier[target] ) | def solve(self, angles0, target):
"""Calculate joint angles and returns it."""
return self.optimizer.optimize(np.array(angles0), target) |
def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(self.fields["honeypot"].label)
return value | def function[clean_honeypot, parameter[self]]:
constant[Check that nothing's been entered into the honeypot.]
variable[value] assign[=] call[name[self].cleaned_data][constant[honeypot]]
if name[value] begin[:]
<ast.Raise object at 0x7da1b26afd60>
return[name[value]] | keyword[def] identifier[clean_honeypot] ( identifier[self] ):
literal[string]
identifier[value] = identifier[self] . identifier[cleaned_data] [ literal[string] ]
keyword[if] identifier[value] :
keyword[raise] identifier[forms] . identifier[ValidationError] ( identifier[self] . identifier[fields] [ literal[string] ]. identifier[label] )
keyword[return] identifier[value] | def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data['honeypot']
if value:
raise forms.ValidationError(self.fields['honeypot'].label) # depends on [control=['if'], data=[]]
return value |
def is_url_allowed(url):
""" Return ``True`` if ``url`` is not in ``blacklist``.
:rtype: bool
"""
blacklist = [
r'\.ttf', r'\.woff', r'fonts\.googleapis\.com', r'\.png', r'\.jpe?g', r'\.gif',
r'\.svg'
]
for ft in blacklist:
if re.search(ft, url):
return False
return True | def function[is_url_allowed, parameter[url]]:
constant[ Return ``True`` if ``url`` is not in ``blacklist``.
:rtype: bool
]
variable[blacklist] assign[=] list[[<ast.Constant object at 0x7da1b137b520>, <ast.Constant object at 0x7da1b13783a0>, <ast.Constant object at 0x7da1b137b6a0>, <ast.Constant object at 0x7da1b137a7a0>, <ast.Constant object at 0x7da1b13aace0>, <ast.Constant object at 0x7da1b13aae60>, <ast.Constant object at 0x7da1b13ab040>]]
for taget[name[ft]] in starred[name[blacklist]] begin[:]
if call[name[re].search, parameter[name[ft], name[url]]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_url_allowed] ( identifier[url] ):
literal[string]
identifier[blacklist] =[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string]
]
keyword[for] identifier[ft] keyword[in] identifier[blacklist] :
keyword[if] identifier[re] . identifier[search] ( identifier[ft] , identifier[url] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_url_allowed(url):
""" Return ``True`` if ``url`` is not in ``blacklist``.
:rtype: bool
"""
blacklist = ['\\.ttf', '\\.woff', 'fonts\\.googleapis\\.com', '\\.png', '\\.jpe?g', '\\.gif', '\\.svg']
for ft in blacklist:
if re.search(ft, url):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ft']]
return True |
def get_full_command(self):
"""
Split command and args
:return: tuple of (command, args)
"""
if self.is_command():
command, _, args = self.text.partition(' ')
return command, args | def function[get_full_command, parameter[self]]:
constant[
Split command and args
:return: tuple of (command, args)
]
if call[name[self].is_command, parameter[]] begin[:]
<ast.Tuple object at 0x7da1b17ba170> assign[=] call[name[self].text.partition, parameter[constant[ ]]]
return[tuple[[<ast.Name object at 0x7da1b17b8640>, <ast.Name object at 0x7da1b17b8dc0>]]] | keyword[def] identifier[get_full_command] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_command] ():
identifier[command] , identifier[_] , identifier[args] = identifier[self] . identifier[text] . identifier[partition] ( literal[string] )
keyword[return] identifier[command] , identifier[args] | def get_full_command(self):
"""
Split command and args
:return: tuple of (command, args)
"""
if self.is_command():
(command, _, args) = self.text.partition(' ')
return (command, args) # depends on [control=['if'], data=[]] |
def get_noise_correction(self, shape, chunks=None):
"""Get the noise correction array."""
data_items = self.root.findall(".//noiseVector")
data, low_res_coords = self.read_xml_array(data_items, 'noiseLut')
if not data_items:
data_items = self.root.findall(".//noiseRangeVector")
data, low_res_coords = self.read_xml_array(data_items, 'noiseRangeLut')
range_noise = self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks)
data_items = self.root.findall(".//noiseAzimuthVector")
data, low_res_coords = self.read_azimuth_noise_array(data_items)
azimuth_noise = self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks)
noise = range_noise * azimuth_noise
else:
noise = self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks)
return noise | def function[get_noise_correction, parameter[self, shape, chunks]]:
constant[Get the noise correction array.]
variable[data_items] assign[=] call[name[self].root.findall, parameter[constant[.//noiseVector]]]
<ast.Tuple object at 0x7da1b22f8910> assign[=] call[name[self].read_xml_array, parameter[name[data_items], constant[noiseLut]]]
if <ast.UnaryOp object at 0x7da1b22fa170> begin[:]
variable[data_items] assign[=] call[name[self].root.findall, parameter[constant[.//noiseRangeVector]]]
<ast.Tuple object at 0x7da1b22f8d30> assign[=] call[name[self].read_xml_array, parameter[name[data_items], constant[noiseRangeLut]]]
variable[range_noise] assign[=] call[name[self].interpolate_xml_array, parameter[name[data], name[low_res_coords], name[shape]]]
variable[data_items] assign[=] call[name[self].root.findall, parameter[constant[.//noiseAzimuthVector]]]
<ast.Tuple object at 0x7da1b22fa4a0> assign[=] call[name[self].read_azimuth_noise_array, parameter[name[data_items]]]
variable[azimuth_noise] assign[=] call[name[self].interpolate_xml_array, parameter[name[data], name[low_res_coords], name[shape]]]
variable[noise] assign[=] binary_operation[name[range_noise] * name[azimuth_noise]]
return[name[noise]] | keyword[def] identifier[get_noise_correction] ( identifier[self] , identifier[shape] , identifier[chunks] = keyword[None] ):
literal[string]
identifier[data_items] = identifier[self] . identifier[root] . identifier[findall] ( literal[string] )
identifier[data] , identifier[low_res_coords] = identifier[self] . identifier[read_xml_array] ( identifier[data_items] , literal[string] )
keyword[if] keyword[not] identifier[data_items] :
identifier[data_items] = identifier[self] . identifier[root] . identifier[findall] ( literal[string] )
identifier[data] , identifier[low_res_coords] = identifier[self] . identifier[read_xml_array] ( identifier[data_items] , literal[string] )
identifier[range_noise] = identifier[self] . identifier[interpolate_xml_array] ( identifier[data] , identifier[low_res_coords] , identifier[shape] , identifier[chunks] = identifier[chunks] )
identifier[data_items] = identifier[self] . identifier[root] . identifier[findall] ( literal[string] )
identifier[data] , identifier[low_res_coords] = identifier[self] . identifier[read_azimuth_noise_array] ( identifier[data_items] )
identifier[azimuth_noise] = identifier[self] . identifier[interpolate_xml_array] ( identifier[data] , identifier[low_res_coords] , identifier[shape] , identifier[chunks] = identifier[chunks] )
identifier[noise] = identifier[range_noise] * identifier[azimuth_noise]
keyword[else] :
identifier[noise] = identifier[self] . identifier[interpolate_xml_array] ( identifier[data] , identifier[low_res_coords] , identifier[shape] , identifier[chunks] = identifier[chunks] )
keyword[return] identifier[noise] | def get_noise_correction(self, shape, chunks=None):
"""Get the noise correction array."""
data_items = self.root.findall('.//noiseVector')
(data, low_res_coords) = self.read_xml_array(data_items, 'noiseLut')
if not data_items:
data_items = self.root.findall('.//noiseRangeVector')
(data, low_res_coords) = self.read_xml_array(data_items, 'noiseRangeLut')
range_noise = self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks)
data_items = self.root.findall('.//noiseAzimuthVector')
(data, low_res_coords) = self.read_azimuth_noise_array(data_items)
azimuth_noise = self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks)
noise = range_noise * azimuth_noise # depends on [control=['if'], data=[]]
else:
noise = self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks)
return noise |
def transform_action(self, obs, func_call, skip_available=False):
"""Tranform an agent-style action to one that SC2 can consume.
Args:
obs: a `sc_pb.Observation` from the previous frame.
func_call: a `FunctionCall` to be turned into a `sc_pb.Action`.
skip_available: If True, assume the action is available. This should only
be used for testing or if you expect to make actions that weren't
valid at the last observation.
Returns:
a corresponding `sc_pb.Action`.
Raises:
ValueError: if the action doesn't pass validation.
"""
func_id = func_call.function
try:
func = actions.FUNCTIONS[func_id]
except KeyError:
raise ValueError("Invalid function id: %s." % func_id)
# Available?
if not (skip_available or func_id in self.available_actions(obs)):
raise ValueError("Function %s/%s is currently not available" % (
func_id, func.name))
# Right number of args?
if len(func_call.arguments) != len(func.args):
raise ValueError(
"Wrong number of arguments for function: %s, got: %s" % (
func, func_call.arguments))
# Args are valid?
aif = self._agent_interface_format
for t, arg in zip(func.args, func_call.arguments):
if t.name in ("screen", "screen2"):
sizes = aif.action_dimensions.screen
elif t.name == "minimap":
sizes = aif.action_dimensions.minimap
else:
sizes = t.sizes
if len(sizes) != len(arg):
raise ValueError(
"Wrong number of values for argument of %s, got: %s" % (
func, func_call.arguments))
for s, a in zip(sizes, arg):
if not 0 <= a < s:
raise ValueError("Argument is out of range for %s, got: %s" % (
func, func_call.arguments))
# Convert them to python types.
kwargs = {type_.name: type_.fn(a)
for type_, a in zip(func.args, func_call.arguments)}
# Call the right callback to get an SC2 action proto.
sc2_action = sc_pb.Action()
kwargs["action"] = sc2_action
kwargs["action_space"] = aif.action_space
if func.ability_id:
kwargs["ability_id"] = func.ability_id
actions.FUNCTIONS[func_id].function_type(**kwargs)
return sc2_action | def function[transform_action, parameter[self, obs, func_call, skip_available]]:
constant[Tranform an agent-style action to one that SC2 can consume.
Args:
obs: a `sc_pb.Observation` from the previous frame.
func_call: a `FunctionCall` to be turned into a `sc_pb.Action`.
skip_available: If True, assume the action is available. This should only
be used for testing or if you expect to make actions that weren't
valid at the last observation.
Returns:
a corresponding `sc_pb.Action`.
Raises:
ValueError: if the action doesn't pass validation.
]
variable[func_id] assign[=] name[func_call].function
<ast.Try object at 0x7da2046205e0>
if <ast.UnaryOp object at 0x7da2046233d0> begin[:]
<ast.Raise object at 0x7da204620a30>
if compare[call[name[len], parameter[name[func_call].arguments]] not_equal[!=] call[name[len], parameter[name[func].args]]] begin[:]
<ast.Raise object at 0x7da204623910>
variable[aif] assign[=] name[self]._agent_interface_format
for taget[tuple[[<ast.Name object at 0x7da204621420>, <ast.Name object at 0x7da204620130>]]] in starred[call[name[zip], parameter[name[func].args, name[func_call].arguments]]] begin[:]
if compare[name[t].name in tuple[[<ast.Constant object at 0x7da2046217b0>, <ast.Constant object at 0x7da204620400>]]] begin[:]
variable[sizes] assign[=] name[aif].action_dimensions.screen
if compare[call[name[len], parameter[name[sizes]]] not_equal[!=] call[name[len], parameter[name[arg]]]] begin[:]
<ast.Raise object at 0x7da204623d90>
for taget[tuple[[<ast.Name object at 0x7da204623970>, <ast.Name object at 0x7da204622aa0>]]] in starred[call[name[zip], parameter[name[sizes], name[arg]]]] begin[:]
if <ast.UnaryOp object at 0x7da204621930> begin[:]
<ast.Raise object at 0x7da2046208e0>
variable[kwargs] assign[=] <ast.DictComp object at 0x7da20cabe050>
variable[sc2_action] assign[=] call[name[sc_pb].Action, parameter[]]
call[name[kwargs]][constant[action]] assign[=] name[sc2_action]
call[name[kwargs]][constant[action_space]] assign[=] name[aif].action_space
if name[func].ability_id begin[:]
call[name[kwargs]][constant[ability_id]] assign[=] name[func].ability_id
call[call[name[actions].FUNCTIONS][name[func_id]].function_type, parameter[]]
return[name[sc2_action]] | keyword[def] identifier[transform_action] ( identifier[self] , identifier[obs] , identifier[func_call] , identifier[skip_available] = keyword[False] ):
literal[string]
identifier[func_id] = identifier[func_call] . identifier[function]
keyword[try] :
identifier[func] = identifier[actions] . identifier[FUNCTIONS] [ identifier[func_id] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[func_id] )
keyword[if] keyword[not] ( identifier[skip_available] keyword[or] identifier[func_id] keyword[in] identifier[self] . identifier[available_actions] ( identifier[obs] )):
keyword[raise] identifier[ValueError] ( literal[string] %(
identifier[func_id] , identifier[func] . identifier[name] ))
keyword[if] identifier[len] ( identifier[func_call] . identifier[arguments] )!= identifier[len] ( identifier[func] . identifier[args] ):
keyword[raise] identifier[ValueError] (
literal[string] %(
identifier[func] , identifier[func_call] . identifier[arguments] ))
identifier[aif] = identifier[self] . identifier[_agent_interface_format]
keyword[for] identifier[t] , identifier[arg] keyword[in] identifier[zip] ( identifier[func] . identifier[args] , identifier[func_call] . identifier[arguments] ):
keyword[if] identifier[t] . identifier[name] keyword[in] ( literal[string] , literal[string] ):
identifier[sizes] = identifier[aif] . identifier[action_dimensions] . identifier[screen]
keyword[elif] identifier[t] . identifier[name] == literal[string] :
identifier[sizes] = identifier[aif] . identifier[action_dimensions] . identifier[minimap]
keyword[else] :
identifier[sizes] = identifier[t] . identifier[sizes]
keyword[if] identifier[len] ( identifier[sizes] )!= identifier[len] ( identifier[arg] ):
keyword[raise] identifier[ValueError] (
literal[string] %(
identifier[func] , identifier[func_call] . identifier[arguments] ))
keyword[for] identifier[s] , identifier[a] keyword[in] identifier[zip] ( identifier[sizes] , identifier[arg] ):
keyword[if] keyword[not] literal[int] <= identifier[a] < identifier[s] :
keyword[raise] identifier[ValueError] ( literal[string] %(
identifier[func] , identifier[func_call] . identifier[arguments] ))
identifier[kwargs] ={ identifier[type_] . identifier[name] : identifier[type_] . identifier[fn] ( identifier[a] )
keyword[for] identifier[type_] , identifier[a] keyword[in] identifier[zip] ( identifier[func] . identifier[args] , identifier[func_call] . identifier[arguments] )}
identifier[sc2_action] = identifier[sc_pb] . identifier[Action] ()
identifier[kwargs] [ literal[string] ]= identifier[sc2_action]
identifier[kwargs] [ literal[string] ]= identifier[aif] . identifier[action_space]
keyword[if] identifier[func] . identifier[ability_id] :
identifier[kwargs] [ literal[string] ]= identifier[func] . identifier[ability_id]
identifier[actions] . identifier[FUNCTIONS] [ identifier[func_id] ]. identifier[function_type] (** identifier[kwargs] )
keyword[return] identifier[sc2_action] | def transform_action(self, obs, func_call, skip_available=False):
"""Tranform an agent-style action to one that SC2 can consume.
Args:
obs: a `sc_pb.Observation` from the previous frame.
func_call: a `FunctionCall` to be turned into a `sc_pb.Action`.
skip_available: If True, assume the action is available. This should only
be used for testing or if you expect to make actions that weren't
valid at the last observation.
Returns:
a corresponding `sc_pb.Action`.
Raises:
ValueError: if the action doesn't pass validation.
"""
func_id = func_call.function
try:
func = actions.FUNCTIONS[func_id] # depends on [control=['try'], data=[]]
except KeyError:
raise ValueError('Invalid function id: %s.' % func_id) # depends on [control=['except'], data=[]]
# Available?
if not (skip_available or func_id in self.available_actions(obs)):
raise ValueError('Function %s/%s is currently not available' % (func_id, func.name)) # depends on [control=['if'], data=[]]
# Right number of args?
if len(func_call.arguments) != len(func.args):
raise ValueError('Wrong number of arguments for function: %s, got: %s' % (func, func_call.arguments)) # depends on [control=['if'], data=[]]
# Args are valid?
aif = self._agent_interface_format
for (t, arg) in zip(func.args, func_call.arguments):
if t.name in ('screen', 'screen2'):
sizes = aif.action_dimensions.screen # depends on [control=['if'], data=[]]
elif t.name == 'minimap':
sizes = aif.action_dimensions.minimap # depends on [control=['if'], data=[]]
else:
sizes = t.sizes
if len(sizes) != len(arg):
raise ValueError('Wrong number of values for argument of %s, got: %s' % (func, func_call.arguments)) # depends on [control=['if'], data=[]]
for (s, a) in zip(sizes, arg):
if not 0 <= a < s:
raise ValueError('Argument is out of range for %s, got: %s' % (func, func_call.arguments)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
# Convert them to python types.
kwargs = {type_.name: type_.fn(a) for (type_, a) in zip(func.args, func_call.arguments)}
# Call the right callback to get an SC2 action proto.
sc2_action = sc_pb.Action()
kwargs['action'] = sc2_action
kwargs['action_space'] = aif.action_space
if func.ability_id:
kwargs['ability_id'] = func.ability_id # depends on [control=['if'], data=[]]
actions.FUNCTIONS[func_id].function_type(**kwargs)
return sc2_action |
def format_code(source, preferred_quote="'"):
"""Return source code with quotes unified."""
try:
return _format_code(source, preferred_quote)
except (tokenize.TokenError, IndentationError):
return source | def function[format_code, parameter[source, preferred_quote]]:
constant[Return source code with quotes unified.]
<ast.Try object at 0x7da1b2648880> | keyword[def] identifier[format_code] ( identifier[source] , identifier[preferred_quote] = literal[string] ):
literal[string]
keyword[try] :
keyword[return] identifier[_format_code] ( identifier[source] , identifier[preferred_quote] )
keyword[except] ( identifier[tokenize] . identifier[TokenError] , identifier[IndentationError] ):
keyword[return] identifier[source] | def format_code(source, preferred_quote="'"):
"""Return source code with quotes unified."""
try:
return _format_code(source, preferred_quote) # depends on [control=['try'], data=[]]
except (tokenize.TokenError, IndentationError):
return source # depends on [control=['except'], data=[]] |
def convert_to_xml(cls, value):
"""
Convert signed angle float like -42.42 to int 60000 per degree,
normalized to positive value.
"""
# modulo normalizes negative and >360 degree values
rot = int(round(value * cls.DEGREE_INCREMENTS)) % cls.THREE_SIXTY
return str(rot) | def function[convert_to_xml, parameter[cls, value]]:
constant[
Convert signed angle float like -42.42 to int 60000 per degree,
normalized to positive value.
]
variable[rot] assign[=] binary_operation[call[name[int], parameter[call[name[round], parameter[binary_operation[name[value] * name[cls].DEGREE_INCREMENTS]]]]] <ast.Mod object at 0x7da2590d6920> name[cls].THREE_SIXTY]
return[call[name[str], parameter[name[rot]]]] | keyword[def] identifier[convert_to_xml] ( identifier[cls] , identifier[value] ):
literal[string]
identifier[rot] = identifier[int] ( identifier[round] ( identifier[value] * identifier[cls] . identifier[DEGREE_INCREMENTS] ))% identifier[cls] . identifier[THREE_SIXTY]
keyword[return] identifier[str] ( identifier[rot] ) | def convert_to_xml(cls, value):
"""
Convert signed angle float like -42.42 to int 60000 per degree,
normalized to positive value.
"""
# modulo normalizes negative and >360 degree values
rot = int(round(value * cls.DEGREE_INCREMENTS)) % cls.THREE_SIXTY
return str(rot) |
def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n
return o | def function[strong, parameter[node]]:
constant[
A bolded section
]
variable[o] assign[=] call[name[nodes].strong, parameter[]]
for taget[name[n]] in starred[call[name[MarkDown], parameter[name[node]]]] begin[:]
<ast.AugAssign object at 0x7da18f00edd0>
return[name[o]] | keyword[def] identifier[strong] ( identifier[node] ):
literal[string]
identifier[o] = identifier[nodes] . identifier[strong] ()
keyword[for] identifier[n] keyword[in] identifier[MarkDown] ( identifier[node] ):
identifier[o] += identifier[n]
keyword[return] identifier[o] | def strong(node):
"""
A bolded section
"""
o = nodes.strong()
for n in MarkDown(node):
o += n # depends on [control=['for'], data=['n']]
return o |
def blockreplace(marker_start,
marker_end,
content='',
append_if_not_found=False,
prepend_if_not_found=False,
show_changes=True,
append_newline=False,
source='running',
path=None,
test=False,
commit=True,
debug=False,
replace=True):
'''
.. versionadded:: 2019.2.0
Replace content of the configuration source, delimited by the line markers.
A block of content delimited by comments can help you manage several lines
without worrying about old entries removal.
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered,
so whitespace or extra content before or after the marker is included
in final output.
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered,
so whitespace or extra content before or after the marker is included
in final output.
content
The content to be used between the two lines identified by
``marker_start`` and ``marker_stop``.
append_if_not_found: ``False``
If markers are not found and set to True then, the markers and content
will be appended to the file.
prepend_if_not_found: ``False``
If markers are not found and set to True then, the markers and content
will be prepended to the file.
append_newline: ``False``
Controls whether or not a newline is appended to the content block.
If the value of this argument is ``True`` then a newline will be added
to the content block. If it is ``False``, then a newline will not be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
show_changes: ``True``
Controls how changes are presented. If ``True``, this function will
return the of the changes made.
If ``False``, then it will return a boolean (``True`` if any changes
were made, otherwise False).
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path: ``None``
Save the temporary configuration to a specific path, then read from
there. This argument is optional, can be used when you prefers a
particular location of the temporary file.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
CLI Example:
.. code-block:: bash
salt '*' net.blockreplace 'ntp' 'interface' ''
'''
config_saved = save_config(source=source, path=path)
if not config_saved or not config_saved['result']:
return config_saved
path = config_saved['out']
replace_pattern = __salt__['file.blockreplace'](path,
marker_start=marker_start,
marker_end=marker_end,
content=content,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
show_changes=show_changes,
append_newline=append_newline)
with salt.utils.files.fopen(path, 'r') as fh_:
updated_config = fh_.read()
return __salt__['net.load_config'](text=updated_config,
test=test,
debug=debug,
replace=replace,
commit=commit) | def function[blockreplace, parameter[marker_start, marker_end, content, append_if_not_found, prepend_if_not_found, show_changes, append_newline, source, path, test, commit, debug, replace]]:
constant[
.. versionadded:: 2019.2.0
Replace content of the configuration source, delimited by the line markers.
A block of content delimited by comments can help you manage several lines
without worrying about old entries removal.
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered,
so whitespace or extra content before or after the marker is included
in final output.
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered,
so whitespace or extra content before or after the marker is included
in final output.
content
The content to be used between the two lines identified by
``marker_start`` and ``marker_stop``.
append_if_not_found: ``False``
If markers are not found and set to True then, the markers and content
will be appended to the file.
prepend_if_not_found: ``False``
If markers are not found and set to True then, the markers and content
will be prepended to the file.
append_newline: ``False``
Controls whether or not a newline is appended to the content block.
If the value of this argument is ``True`` then a newline will be added
to the content block. If it is ``False``, then a newline will not be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
show_changes: ``True``
Controls how changes are presented. If ``True``, this function will
return the of the changes made.
If ``False``, then it will return a boolean (``True`` if any changes
were made, otherwise False).
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path: ``None``
Save the temporary configuration to a specific path, then read from
there. This argument is optional, can be used when you prefers a
particular location of the temporary file.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
CLI Example:
.. code-block:: bash
salt '*' net.blockreplace 'ntp' 'interface' ''
]
variable[config_saved] assign[=] call[name[save_config], parameter[]]
if <ast.BoolOp object at 0x7da1b1f776d0> begin[:]
return[name[config_saved]]
variable[path] assign[=] call[name[config_saved]][constant[out]]
variable[replace_pattern] assign[=] call[call[name[__salt__]][constant[file.blockreplace]], parameter[name[path]]]
with call[name[salt].utils.files.fopen, parameter[name[path], constant[r]]] begin[:]
variable[updated_config] assign[=] call[name[fh_].read, parameter[]]
return[call[call[name[__salt__]][constant[net.load_config]], parameter[]]] | keyword[def] identifier[blockreplace] ( identifier[marker_start] ,
identifier[marker_end] ,
identifier[content] = literal[string] ,
identifier[append_if_not_found] = keyword[False] ,
identifier[prepend_if_not_found] = keyword[False] ,
identifier[show_changes] = keyword[True] ,
identifier[append_newline] = keyword[False] ,
identifier[source] = literal[string] ,
identifier[path] = keyword[None] ,
identifier[test] = keyword[False] ,
identifier[commit] = keyword[True] ,
identifier[debug] = keyword[False] ,
identifier[replace] = keyword[True] ):
literal[string]
identifier[config_saved] = identifier[save_config] ( identifier[source] = identifier[source] , identifier[path] = identifier[path] )
keyword[if] keyword[not] identifier[config_saved] keyword[or] keyword[not] identifier[config_saved] [ literal[string] ]:
keyword[return] identifier[config_saved]
identifier[path] = identifier[config_saved] [ literal[string] ]
identifier[replace_pattern] = identifier[__salt__] [ literal[string] ]( identifier[path] ,
identifier[marker_start] = identifier[marker_start] ,
identifier[marker_end] = identifier[marker_end] ,
identifier[content] = identifier[content] ,
identifier[append_if_not_found] = identifier[append_if_not_found] ,
identifier[prepend_if_not_found] = identifier[prepend_if_not_found] ,
identifier[show_changes] = identifier[show_changes] ,
identifier[append_newline] = identifier[append_newline] )
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[path] , literal[string] ) keyword[as] identifier[fh_] :
identifier[updated_config] = identifier[fh_] . identifier[read] ()
keyword[return] identifier[__salt__] [ literal[string] ]( identifier[text] = identifier[updated_config] ,
identifier[test] = identifier[test] ,
identifier[debug] = identifier[debug] ,
identifier[replace] = identifier[replace] ,
identifier[commit] = identifier[commit] ) | def blockreplace(marker_start, marker_end, content='', append_if_not_found=False, prepend_if_not_found=False, show_changes=True, append_newline=False, source='running', path=None, test=False, commit=True, debug=False, replace=True):
"""
.. versionadded:: 2019.2.0
Replace content of the configuration source, delimited by the line markers.
A block of content delimited by comments can help you manage several lines
without worrying about old entries removal.
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered,
so whitespace or extra content before or after the marker is included
in final output.
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered,
so whitespace or extra content before or after the marker is included
in final output.
content
The content to be used between the two lines identified by
``marker_start`` and ``marker_stop``.
append_if_not_found: ``False``
If markers are not found and set to True then, the markers and content
will be appended to the file.
prepend_if_not_found: ``False``
If markers are not found and set to True then, the markers and content
will be prepended to the file.
append_newline: ``False``
Controls whether or not a newline is appended to the content block.
If the value of this argument is ``True`` then a newline will be added
to the content block. If it is ``False``, then a newline will not be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
show_changes: ``True``
Controls how changes are presented. If ``True``, this function will
return the of the changes made.
If ``False``, then it will return a boolean (``True`` if any changes
were made, otherwise False).
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path: ``None``
Save the temporary configuration to a specific path, then read from
there. This argument is optional, can be used when you prefers a
particular location of the temporary file.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
CLI Example:
.. code-block:: bash
salt '*' net.blockreplace 'ntp' 'interface' ''
"""
config_saved = save_config(source=source, path=path)
if not config_saved or not config_saved['result']:
return config_saved # depends on [control=['if'], data=[]]
path = config_saved['out']
replace_pattern = __salt__['file.blockreplace'](path, marker_start=marker_start, marker_end=marker_end, content=content, append_if_not_found=append_if_not_found, prepend_if_not_found=prepend_if_not_found, show_changes=show_changes, append_newline=append_newline)
with salt.utils.files.fopen(path, 'r') as fh_:
updated_config = fh_.read() # depends on [control=['with'], data=['fh_']]
return __salt__['net.load_config'](text=updated_config, test=test, debug=debug, replace=replace, commit=commit) |
def srid(self):
"""Returns the EPSG ID as int if it exists."""
epsg_id = (self.GetAuthorityCode('PROJCS') or
self.GetAuthorityCode('GEOGCS'))
try:
return int(epsg_id)
except TypeError:
return | def function[srid, parameter[self]]:
constant[Returns the EPSG ID as int if it exists.]
variable[epsg_id] assign[=] <ast.BoolOp object at 0x7da1b021c040>
<ast.Try object at 0x7da1b021e290> | keyword[def] identifier[srid] ( identifier[self] ):
literal[string]
identifier[epsg_id] =( identifier[self] . identifier[GetAuthorityCode] ( literal[string] ) keyword[or]
identifier[self] . identifier[GetAuthorityCode] ( literal[string] ))
keyword[try] :
keyword[return] identifier[int] ( identifier[epsg_id] )
keyword[except] identifier[TypeError] :
keyword[return] | def srid(self):
"""Returns the EPSG ID as int if it exists."""
epsg_id = self.GetAuthorityCode('PROJCS') or self.GetAuthorityCode('GEOGCS')
try:
return int(epsg_id) # depends on [control=['try'], data=[]]
except TypeError:
return # depends on [control=['except'], data=[]] |
def jacard_similarity_from_nested_dicts(self, nested_dictionaries):
"""
Compute the continuous Jacard similarity between all pairs
of keys in dictionary-of-dictionaries given as an input.
Returns three element tuple:
- similarity dictionary: (key, key) -> float
- overlap count dictionary: key -> key -> int
- weight dictionary: key -> key -> float
"""
sims = {}
overlaps = {}
weights = {}
for a, column_dict_a in nested_dictionaries.items():
row_set_a = set(column_dict_a.keys())
for b, column_dict_b in nested_dictionaries.items():
row_set_b = set(column_dict_b.keys())
common_rows = row_set_a.intersection(row_set_b)
n_overlap = len(common_rows)
overlaps[(a, b)] = n_overlap
total = 0.0
weight = 0.0
for row_name in common_rows:
value_a = column_dict_a[row_name]
value_b = column_dict_b[row_name]
minval = min(value_a, value_b)
maxval = max(value_a, value_b)
total += minval
weight += maxval
weights[(a, b)] = weight
if weight < self.min_weight_for_similarity:
continue
if n_overlap < self.min_count_for_similarity:
continue
sims[(a, b)] = total / weight
return sims, overlaps, weights | def function[jacard_similarity_from_nested_dicts, parameter[self, nested_dictionaries]]:
constant[
Compute the continuous Jacard similarity between all pairs
of keys in dictionary-of-dictionaries given as an input.
Returns three element tuple:
- similarity dictionary: (key, key) -> float
- overlap count dictionary: key -> key -> int
- weight dictionary: key -> key -> float
]
variable[sims] assign[=] dictionary[[], []]
variable[overlaps] assign[=] dictionary[[], []]
variable[weights] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18ede66b0>, <ast.Name object at 0x7da18ede6d10>]]] in starred[call[name[nested_dictionaries].items, parameter[]]] begin[:]
variable[row_set_a] assign[=] call[name[set], parameter[call[name[column_dict_a].keys, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da18ede5420>, <ast.Name object at 0x7da18ede42e0>]]] in starred[call[name[nested_dictionaries].items, parameter[]]] begin[:]
variable[row_set_b] assign[=] call[name[set], parameter[call[name[column_dict_b].keys, parameter[]]]]
variable[common_rows] assign[=] call[name[row_set_a].intersection, parameter[name[row_set_b]]]
variable[n_overlap] assign[=] call[name[len], parameter[name[common_rows]]]
call[name[overlaps]][tuple[[<ast.Name object at 0x7da20c7cbeb0>, <ast.Name object at 0x7da20c7ca500>]]] assign[=] name[n_overlap]
variable[total] assign[=] constant[0.0]
variable[weight] assign[=] constant[0.0]
for taget[name[row_name]] in starred[name[common_rows]] begin[:]
variable[value_a] assign[=] call[name[column_dict_a]][name[row_name]]
variable[value_b] assign[=] call[name[column_dict_b]][name[row_name]]
variable[minval] assign[=] call[name[min], parameter[name[value_a], name[value_b]]]
variable[maxval] assign[=] call[name[max], parameter[name[value_a], name[value_b]]]
<ast.AugAssign object at 0x7da20c7cae90>
<ast.AugAssign object at 0x7da20c7c8730>
call[name[weights]][tuple[[<ast.Name object at 0x7da20c7cada0>, <ast.Name object at 0x7da20c7c8460>]]] assign[=] name[weight]
if compare[name[weight] less[<] name[self].min_weight_for_similarity] begin[:]
continue
if compare[name[n_overlap] less[<] name[self].min_count_for_similarity] begin[:]
continue
call[name[sims]][tuple[[<ast.Name object at 0x7da18ede5ed0>, <ast.Name object at 0x7da18ede7100>]]] assign[=] binary_operation[name[total] / name[weight]]
return[tuple[[<ast.Name object at 0x7da18ede78b0>, <ast.Name object at 0x7da18ede59f0>, <ast.Name object at 0x7da18ede7280>]]] | keyword[def] identifier[jacard_similarity_from_nested_dicts] ( identifier[self] , identifier[nested_dictionaries] ):
literal[string]
identifier[sims] ={}
identifier[overlaps] ={}
identifier[weights] ={}
keyword[for] identifier[a] , identifier[column_dict_a] keyword[in] identifier[nested_dictionaries] . identifier[items] ():
identifier[row_set_a] = identifier[set] ( identifier[column_dict_a] . identifier[keys] ())
keyword[for] identifier[b] , identifier[column_dict_b] keyword[in] identifier[nested_dictionaries] . identifier[items] ():
identifier[row_set_b] = identifier[set] ( identifier[column_dict_b] . identifier[keys] ())
identifier[common_rows] = identifier[row_set_a] . identifier[intersection] ( identifier[row_set_b] )
identifier[n_overlap] = identifier[len] ( identifier[common_rows] )
identifier[overlaps] [( identifier[a] , identifier[b] )]= identifier[n_overlap]
identifier[total] = literal[int]
identifier[weight] = literal[int]
keyword[for] identifier[row_name] keyword[in] identifier[common_rows] :
identifier[value_a] = identifier[column_dict_a] [ identifier[row_name] ]
identifier[value_b] = identifier[column_dict_b] [ identifier[row_name] ]
identifier[minval] = identifier[min] ( identifier[value_a] , identifier[value_b] )
identifier[maxval] = identifier[max] ( identifier[value_a] , identifier[value_b] )
identifier[total] += identifier[minval]
identifier[weight] += identifier[maxval]
identifier[weights] [( identifier[a] , identifier[b] )]= identifier[weight]
keyword[if] identifier[weight] < identifier[self] . identifier[min_weight_for_similarity] :
keyword[continue]
keyword[if] identifier[n_overlap] < identifier[self] . identifier[min_count_for_similarity] :
keyword[continue]
identifier[sims] [( identifier[a] , identifier[b] )]= identifier[total] / identifier[weight]
keyword[return] identifier[sims] , identifier[overlaps] , identifier[weights] | def jacard_similarity_from_nested_dicts(self, nested_dictionaries):
"""
Compute the continuous Jacard similarity between all pairs
of keys in dictionary-of-dictionaries given as an input.
Returns three element tuple:
- similarity dictionary: (key, key) -> float
- overlap count dictionary: key -> key -> int
- weight dictionary: key -> key -> float
"""
sims = {}
overlaps = {}
weights = {}
for (a, column_dict_a) in nested_dictionaries.items():
row_set_a = set(column_dict_a.keys())
for (b, column_dict_b) in nested_dictionaries.items():
row_set_b = set(column_dict_b.keys())
common_rows = row_set_a.intersection(row_set_b)
n_overlap = len(common_rows)
overlaps[a, b] = n_overlap
total = 0.0
weight = 0.0
for row_name in common_rows:
value_a = column_dict_a[row_name]
value_b = column_dict_b[row_name]
minval = min(value_a, value_b)
maxval = max(value_a, value_b)
total += minval
weight += maxval # depends on [control=['for'], data=['row_name']]
weights[a, b] = weight
if weight < self.min_weight_for_similarity:
continue # depends on [control=['if'], data=[]]
if n_overlap < self.min_count_for_similarity:
continue # depends on [control=['if'], data=[]]
sims[a, b] = total / weight # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return (sims, overlaps, weights) |
def calcHorizonPoints(self):
'''Updates the verticies of the patches for the ground and sky.'''
ydiff = math.tan(math.radians(-self.roll))*float(self.ratio)
pitchdiff = self.dist10deg*(self.pitch/10.0)
# Sky Polygon
vertsTop = [(-self.ratio,ydiff-pitchdiff),(-self.ratio,1),(self.ratio,1),(self.ratio,-ydiff-pitchdiff),(-self.ratio,ydiff-pitchdiff)]
self.topPolygon.set_xy(vertsTop)
# Ground Polygon
vertsBot = [(-self.ratio,ydiff-pitchdiff),(-self.ratio,-1),(self.ratio,-1),(self.ratio,-ydiff-pitchdiff),(-self.ratio,ydiff-pitchdiff)]
self.botPolygon.set_xy(vertsBot) | def function[calcHorizonPoints, parameter[self]]:
constant[Updates the verticies of the patches for the ground and sky.]
variable[ydiff] assign[=] binary_operation[call[name[math].tan, parameter[call[name[math].radians, parameter[<ast.UnaryOp object at 0x7da20c76ff40>]]]] * call[name[float], parameter[name[self].ratio]]]
variable[pitchdiff] assign[=] binary_operation[name[self].dist10deg * binary_operation[name[self].pitch / constant[10.0]]]
variable[vertsTop] assign[=] list[[<ast.Tuple object at 0x7da20c76faf0>, <ast.Tuple object at 0x7da20c76f160>, <ast.Tuple object at 0x7da20c76edd0>, <ast.Tuple object at 0x7da20c76c370>, <ast.Tuple object at 0x7da20c76fd90>]]
call[name[self].topPolygon.set_xy, parameter[name[vertsTop]]]
variable[vertsBot] assign[=] list[[<ast.Tuple object at 0x7da20c76e080>, <ast.Tuple object at 0x7da20c76c1c0>, <ast.Tuple object at 0x7da20c76c520>, <ast.Tuple object at 0x7da20c76df00>, <ast.Tuple object at 0x7da20c76da50>]]
call[name[self].botPolygon.set_xy, parameter[name[vertsBot]]] | keyword[def] identifier[calcHorizonPoints] ( identifier[self] ):
literal[string]
identifier[ydiff] = identifier[math] . identifier[tan] ( identifier[math] . identifier[radians] (- identifier[self] . identifier[roll] ))* identifier[float] ( identifier[self] . identifier[ratio] )
identifier[pitchdiff] = identifier[self] . identifier[dist10deg] *( identifier[self] . identifier[pitch] / literal[int] )
identifier[vertsTop] =[(- identifier[self] . identifier[ratio] , identifier[ydiff] - identifier[pitchdiff] ),(- identifier[self] . identifier[ratio] , literal[int] ),( identifier[self] . identifier[ratio] , literal[int] ),( identifier[self] . identifier[ratio] ,- identifier[ydiff] - identifier[pitchdiff] ),(- identifier[self] . identifier[ratio] , identifier[ydiff] - identifier[pitchdiff] )]
identifier[self] . identifier[topPolygon] . identifier[set_xy] ( identifier[vertsTop] )
identifier[vertsBot] =[(- identifier[self] . identifier[ratio] , identifier[ydiff] - identifier[pitchdiff] ),(- identifier[self] . identifier[ratio] ,- literal[int] ),( identifier[self] . identifier[ratio] ,- literal[int] ),( identifier[self] . identifier[ratio] ,- identifier[ydiff] - identifier[pitchdiff] ),(- identifier[self] . identifier[ratio] , identifier[ydiff] - identifier[pitchdiff] )]
identifier[self] . identifier[botPolygon] . identifier[set_xy] ( identifier[vertsBot] ) | def calcHorizonPoints(self):
"""Updates the verticies of the patches for the ground and sky."""
ydiff = math.tan(math.radians(-self.roll)) * float(self.ratio)
pitchdiff = self.dist10deg * (self.pitch / 10.0)
# Sky Polygon
vertsTop = [(-self.ratio, ydiff - pitchdiff), (-self.ratio, 1), (self.ratio, 1), (self.ratio, -ydiff - pitchdiff), (-self.ratio, ydiff - pitchdiff)]
self.topPolygon.set_xy(vertsTop)
# Ground Polygon
vertsBot = [(-self.ratio, ydiff - pitchdiff), (-self.ratio, -1), (self.ratio, -1), (self.ratio, -ydiff - pitchdiff), (-self.ratio, ydiff - pitchdiff)]
self.botPolygon.set_xy(vertsBot) |
async def get_proxies(self):
"""Receive proxies from the provider and return them.
:return: :attr:`.proxies`
"""
log.debug('Try to get proxies from %s' % self.domain)
async with aiohttp.ClientSession(
headers=get_headers(), cookies=self._cookies, loop=self._loop
) as self._session:
await self._pipe()
log.debug(
'%d proxies received from %s: %s'
% (len(self.proxies), self.domain, self.proxies)
)
return self.proxies | <ast.AsyncFunctionDef object at 0x7da1b1b0fbb0> | keyword[async] keyword[def] identifier[get_proxies] ( identifier[self] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] % identifier[self] . identifier[domain] )
keyword[async] keyword[with] identifier[aiohttp] . identifier[ClientSession] (
identifier[headers] = identifier[get_headers] (), identifier[cookies] = identifier[self] . identifier[_cookies] , identifier[loop] = identifier[self] . identifier[_loop]
) keyword[as] identifier[self] . identifier[_session] :
keyword[await] identifier[self] . identifier[_pipe] ()
identifier[log] . identifier[debug] (
literal[string]
%( identifier[len] ( identifier[self] . identifier[proxies] ), identifier[self] . identifier[domain] , identifier[self] . identifier[proxies] )
)
keyword[return] identifier[self] . identifier[proxies] | async def get_proxies(self):
"""Receive proxies from the provider and return them.
:return: :attr:`.proxies`
"""
log.debug('Try to get proxies from %s' % self.domain)
async with aiohttp.ClientSession(headers=get_headers(), cookies=self._cookies, loop=self._loop) as self._session:
await self._pipe()
log.debug('%d proxies received from %s: %s' % (len(self.proxies), self.domain, self.proxies))
return self.proxies |
def _update_prx(self):
"""Update `prx` from `phi`, `pi_codon`, and `beta`."""
qx = scipy.ones(N_CODON, dtype='float')
for j in range(3):
for w in range(N_NT):
qx[CODON_NT[j][w]] *= self.phi[w]
frx = self.pi_codon**self.beta
self.prx = frx * qx
with scipy.errstate(divide='raise', under='raise', over='raise',
invalid='raise'):
for r in range(self.nsites):
self.prx[r] /= self.prx[r].sum() | def function[_update_prx, parameter[self]]:
constant[Update `prx` from `phi`, `pi_codon`, and `beta`.]
variable[qx] assign[=] call[name[scipy].ones, parameter[name[N_CODON]]]
for taget[name[j]] in starred[call[name[range], parameter[constant[3]]]] begin[:]
for taget[name[w]] in starred[call[name[range], parameter[name[N_NT]]]] begin[:]
<ast.AugAssign object at 0x7da20e9b0490>
variable[frx] assign[=] binary_operation[name[self].pi_codon ** name[self].beta]
name[self].prx assign[=] binary_operation[name[frx] * name[qx]]
with call[name[scipy].errstate, parameter[]] begin[:]
for taget[name[r]] in starred[call[name[range], parameter[name[self].nsites]]] begin[:]
<ast.AugAssign object at 0x7da20e9b2f50> | keyword[def] identifier[_update_prx] ( identifier[self] ):
literal[string]
identifier[qx] = identifier[scipy] . identifier[ones] ( identifier[N_CODON] , identifier[dtype] = literal[string] )
keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] ):
keyword[for] identifier[w] keyword[in] identifier[range] ( identifier[N_NT] ):
identifier[qx] [ identifier[CODON_NT] [ identifier[j] ][ identifier[w] ]]*= identifier[self] . identifier[phi] [ identifier[w] ]
identifier[frx] = identifier[self] . identifier[pi_codon] ** identifier[self] . identifier[beta]
identifier[self] . identifier[prx] = identifier[frx] * identifier[qx]
keyword[with] identifier[scipy] . identifier[errstate] ( identifier[divide] = literal[string] , identifier[under] = literal[string] , identifier[over] = literal[string] ,
identifier[invalid] = literal[string] ):
keyword[for] identifier[r] keyword[in] identifier[range] ( identifier[self] . identifier[nsites] ):
identifier[self] . identifier[prx] [ identifier[r] ]/= identifier[self] . identifier[prx] [ identifier[r] ]. identifier[sum] () | def _update_prx(self):
"""Update `prx` from `phi`, `pi_codon`, and `beta`."""
qx = scipy.ones(N_CODON, dtype='float')
for j in range(3):
for w in range(N_NT):
qx[CODON_NT[j][w]] *= self.phi[w] # depends on [control=['for'], data=['w']] # depends on [control=['for'], data=['j']]
frx = self.pi_codon ** self.beta
self.prx = frx * qx
with scipy.errstate(divide='raise', under='raise', over='raise', invalid='raise'):
for r in range(self.nsites):
self.prx[r] /= self.prx[r].sum() # depends on [control=['for'], data=['r']] # depends on [control=['with'], data=[]] |
def summary(self, alpha=0.05, start=0, batches=100, chain=None, roundto=3):
"""
Generate a pretty-printed summary of the node.
:Parameters:
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
roundto : int
The number of digits to round posterior statistics.
"""
# Calculate statistics for Node
statdict = self.stats(
alpha=alpha,
start=start,
batches=batches,
chain=chain)
size = np.size(statdict['mean'])
print_('\n%s:' % self.__name__)
print_(' ')
# Initialize buffer
buffer = []
# Index to interval label
iindex = [key.split()[-1] for key in statdict.keys()].index('interval')
interval = list(statdict.keys())[iindex]
# Print basic stats
buffer += [
'Mean SD MC Error %s' %
interval]
buffer += ['-' * len(buffer[-1])]
indices = range(size)
if len(indices) == 1:
indices = [None]
_format_str = lambda x, i=None, roundto=2: str(np.round(x.ravel()[i].squeeze(), roundto))
for index in indices:
# Extract statistics and convert to string
m = _format_str(statdict['mean'], index, roundto)
sd = _format_str(statdict['standard deviation'], index, roundto)
mce = _format_str(statdict['mc error'], index, roundto)
hpd = str(statdict[interval].reshape(
(2, size))[:,index].squeeze().round(roundto))
# Build up string buffer of values
valstr = m
valstr += ' ' * (17 - len(m)) + sd
valstr += ' ' * (17 - len(sd)) + mce
valstr += ' ' * (len(buffer[-1]) - len(valstr) - len(hpd)) + hpd
buffer += [valstr]
buffer += [''] * 2
# Print quantiles
buffer += ['Posterior quantiles:', '']
buffer += [
'2.5 25 50 75 97.5']
buffer += [
' |---------------|===============|===============|---------------|']
for index in indices:
quantile_str = ''
for i, q in enumerate((2.5, 25, 50, 75, 97.5)):
qstr = _format_str(statdict['quantiles'][q], index, roundto)
quantile_str += qstr + ' ' * (17 - i - len(qstr))
buffer += [quantile_str.strip()]
buffer += ['']
print_('\t' + '\n\t'.join(buffer)) | def function[summary, parameter[self, alpha, start, batches, chain, roundto]]:
constant[
Generate a pretty-printed summary of the node.
:Parameters:
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
roundto : int
The number of digits to round posterior statistics.
]
variable[statdict] assign[=] call[name[self].stats, parameter[]]
variable[size] assign[=] call[name[np].size, parameter[call[name[statdict]][constant[mean]]]]
call[name[print_], parameter[binary_operation[constant[
%s:] <ast.Mod object at 0x7da2590d6920> name[self].__name__]]]
call[name[print_], parameter[constant[ ]]]
variable[buffer] assign[=] list[[]]
variable[iindex] assign[=] call[<ast.ListComp object at 0x7da2041db9a0>.index, parameter[constant[interval]]]
variable[interval] assign[=] call[call[name[list], parameter[call[name[statdict].keys, parameter[]]]]][name[iindex]]
<ast.AugAssign object at 0x7da2041d9d20>
<ast.AugAssign object at 0x7da2041d8bb0>
variable[indices] assign[=] call[name[range], parameter[name[size]]]
if compare[call[name[len], parameter[name[indices]]] equal[==] constant[1]] begin[:]
variable[indices] assign[=] list[[<ast.Constant object at 0x7da2041d9150>]]
variable[_format_str] assign[=] <ast.Lambda object at 0x7da2041da350>
for taget[name[index]] in starred[name[indices]] begin[:]
variable[m] assign[=] call[name[_format_str], parameter[call[name[statdict]][constant[mean]], name[index], name[roundto]]]
variable[sd] assign[=] call[name[_format_str], parameter[call[name[statdict]][constant[standard deviation]], name[index], name[roundto]]]
variable[mce] assign[=] call[name[_format_str], parameter[call[name[statdict]][constant[mc error]], name[index], name[roundto]]]
variable[hpd] assign[=] call[name[str], parameter[call[call[call[call[call[name[statdict]][name[interval]].reshape, parameter[tuple[[<ast.Constant object at 0x7da18f58d870>, <ast.Name object at 0x7da18f58d0c0>]]]]][tuple[[<ast.Slice object at 0x7da18f58f610>, <ast.Name object at 0x7da18f58cdf0>]]].squeeze, parameter[]].round, parameter[name[roundto]]]]]
variable[valstr] assign[=] name[m]
<ast.AugAssign object at 0x7da18f58f5e0>
<ast.AugAssign object at 0x7da18f58cca0>
<ast.AugAssign object at 0x7da18f58dd80>
<ast.AugAssign object at 0x7da18f58dea0>
<ast.AugAssign object at 0x7da18f58dcf0>
<ast.AugAssign object at 0x7da18f58c7c0>
<ast.AugAssign object at 0x7da18f58e200>
<ast.AugAssign object at 0x7da18f58f2b0>
for taget[name[index]] in starred[name[indices]] begin[:]
variable[quantile_str] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da18f58e2f0>, <ast.Name object at 0x7da18f58f490>]]] in starred[call[name[enumerate], parameter[tuple[[<ast.Constant object at 0x7da18f58cc40>, <ast.Constant object at 0x7da18f58e800>, <ast.Constant object at 0x7da18f58c8b0>, <ast.Constant object at 0x7da18f58ec50>, <ast.Constant object at 0x7da18f58cd30>]]]]] begin[:]
variable[qstr] assign[=] call[name[_format_str], parameter[call[call[name[statdict]][constant[quantiles]]][name[q]], name[index], name[roundto]]]
<ast.AugAssign object at 0x7da18f58f760>
<ast.AugAssign object at 0x7da18f58ca60>
<ast.AugAssign object at 0x7da18f58e440>
call[name[print_], parameter[binary_operation[constant[ ] + call[constant[
].join, parameter[name[buffer]]]]]] | keyword[def] identifier[summary] ( identifier[self] , identifier[alpha] = literal[int] , identifier[start] = literal[int] , identifier[batches] = literal[int] , identifier[chain] = keyword[None] , identifier[roundto] = literal[int] ):
literal[string]
identifier[statdict] = identifier[self] . identifier[stats] (
identifier[alpha] = identifier[alpha] ,
identifier[start] = identifier[start] ,
identifier[batches] = identifier[batches] ,
identifier[chain] = identifier[chain] )
identifier[size] = identifier[np] . identifier[size] ( identifier[statdict] [ literal[string] ])
identifier[print_] ( literal[string] % identifier[self] . identifier[__name__] )
identifier[print_] ( literal[string] )
identifier[buffer] =[]
identifier[iindex] =[ identifier[key] . identifier[split] ()[- literal[int] ] keyword[for] identifier[key] keyword[in] identifier[statdict] . identifier[keys] ()]. identifier[index] ( literal[string] )
identifier[interval] = identifier[list] ( identifier[statdict] . identifier[keys] ())[ identifier[iindex] ]
identifier[buffer] +=[
literal[string] %
identifier[interval] ]
identifier[buffer] +=[ literal[string] * identifier[len] ( identifier[buffer] [- literal[int] ])]
identifier[indices] = identifier[range] ( identifier[size] )
keyword[if] identifier[len] ( identifier[indices] )== literal[int] :
identifier[indices] =[ keyword[None] ]
identifier[_format_str] = keyword[lambda] identifier[x] , identifier[i] = keyword[None] , identifier[roundto] = literal[int] : identifier[str] ( identifier[np] . identifier[round] ( identifier[x] . identifier[ravel] ()[ identifier[i] ]. identifier[squeeze] (), identifier[roundto] ))
keyword[for] identifier[index] keyword[in] identifier[indices] :
identifier[m] = identifier[_format_str] ( identifier[statdict] [ literal[string] ], identifier[index] , identifier[roundto] )
identifier[sd] = identifier[_format_str] ( identifier[statdict] [ literal[string] ], identifier[index] , identifier[roundto] )
identifier[mce] = identifier[_format_str] ( identifier[statdict] [ literal[string] ], identifier[index] , identifier[roundto] )
identifier[hpd] = identifier[str] ( identifier[statdict] [ identifier[interval] ]. identifier[reshape] (
( literal[int] , identifier[size] ))[:, identifier[index] ]. identifier[squeeze] (). identifier[round] ( identifier[roundto] ))
identifier[valstr] = identifier[m]
identifier[valstr] += literal[string] *( literal[int] - identifier[len] ( identifier[m] ))+ identifier[sd]
identifier[valstr] += literal[string] *( literal[int] - identifier[len] ( identifier[sd] ))+ identifier[mce]
identifier[valstr] += literal[string] *( identifier[len] ( identifier[buffer] [- literal[int] ])- identifier[len] ( identifier[valstr] )- identifier[len] ( identifier[hpd] ))+ identifier[hpd]
identifier[buffer] +=[ identifier[valstr] ]
identifier[buffer] +=[ literal[string] ]* literal[int]
identifier[buffer] +=[ literal[string] , literal[string] ]
identifier[buffer] +=[
literal[string] ]
identifier[buffer] +=[
literal[string] ]
keyword[for] identifier[index] keyword[in] identifier[indices] :
identifier[quantile_str] = literal[string]
keyword[for] identifier[i] , identifier[q] keyword[in] identifier[enumerate] (( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] )):
identifier[qstr] = identifier[_format_str] ( identifier[statdict] [ literal[string] ][ identifier[q] ], identifier[index] , identifier[roundto] )
identifier[quantile_str] += identifier[qstr] + literal[string] *( literal[int] - identifier[i] - identifier[len] ( identifier[qstr] ))
identifier[buffer] +=[ identifier[quantile_str] . identifier[strip] ()]
identifier[buffer] +=[ literal[string] ]
identifier[print_] ( literal[string] + literal[string] . identifier[join] ( identifier[buffer] )) | def summary(self, alpha=0.05, start=0, batches=100, chain=None, roundto=3):
"""
Generate a pretty-printed summary of the node.
:Parameters:
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
roundto : int
The number of digits to round posterior statistics.
"""
# Calculate statistics for Node
statdict = self.stats(alpha=alpha, start=start, batches=batches, chain=chain)
size = np.size(statdict['mean'])
print_('\n%s:' % self.__name__)
print_(' ')
# Initialize buffer
buffer = []
# Index to interval label
iindex = [key.split()[-1] for key in statdict.keys()].index('interval')
interval = list(statdict.keys())[iindex]
# Print basic stats
buffer += ['Mean SD MC Error %s' % interval]
buffer += ['-' * len(buffer[-1])]
indices = range(size)
if len(indices) == 1:
indices = [None] # depends on [control=['if'], data=[]]
_format_str = lambda x, i=None, roundto=2: str(np.round(x.ravel()[i].squeeze(), roundto))
for index in indices:
# Extract statistics and convert to string
m = _format_str(statdict['mean'], index, roundto)
sd = _format_str(statdict['standard deviation'], index, roundto)
mce = _format_str(statdict['mc error'], index, roundto)
hpd = str(statdict[interval].reshape((2, size))[:, index].squeeze().round(roundto))
# Build up string buffer of values
valstr = m
valstr += ' ' * (17 - len(m)) + sd
valstr += ' ' * (17 - len(sd)) + mce
valstr += ' ' * (len(buffer[-1]) - len(valstr) - len(hpd)) + hpd
buffer += [valstr] # depends on [control=['for'], data=['index']]
buffer += [''] * 2
# Print quantiles
buffer += ['Posterior quantiles:', '']
buffer += ['2.5 25 50 75 97.5']
buffer += [' |---------------|===============|===============|---------------|']
for index in indices:
quantile_str = ''
for (i, q) in enumerate((2.5, 25, 50, 75, 97.5)):
qstr = _format_str(statdict['quantiles'][q], index, roundto)
quantile_str += qstr + ' ' * (17 - i - len(qstr)) # depends on [control=['for'], data=[]]
buffer += [quantile_str.strip()] # depends on [control=['for'], data=['index']]
buffer += ['']
print_('\t' + '\n\t'.join(buffer)) |
def iterate_dictionary(d, path, squash_single = False):
"""
Takes a dict, and a path delimited with slashes like A/B/C/D, and returns a list of objects found at all leaf nodes at all trajectories `dict[A][B][C][D]`. It does this using BFS not DFS.
The word "leaf" hereby refers to an item at the search path level. That is, upon calling the function
iterate_dictionary(d_to_search, "A/B/C/D")
If `d_to_search` has five levels A/B/C/D/E, then D is the "leaf node level". Since `[E]` exists, then at least one object in the return list will be a dictionary.
Rules
===========================
Each node can be either
1) an arbitrary non-list, non-dictionary object
2) a dictionary
3) a list of arbitrary objects
All nodes of type 3 at each level are searched for nodes of type 1 and 2. Nodes of type 2 are the ones iterated in this tree search.
At the current time, nodes of type 1 are *not* inspected. They are returned in a list if they are at the search path and ignored otherwise.
Returns
===========================
1) If the path is an empty string, returns the original dict
2) *If* at least one object exists at the search path, it returns a list of all items at the search path. Using the above example terminology, a list of all objects at all trajectories `"A/B/C/D"`.
*Special Parameter*: If the optional Boolean parameter `squash_single` is True, and the return list contains only one object, the object is returned (*not* a list), else a list with that one object is returned. This optional flag is useful so that [0] does not have to be indexed on the return list in the case where only one item is expected.
3) None in the case that there are no objects at the search path.
"""
path_parts = path.split("/")
return_list = []
if len(path_parts) == 0: #no search string
return d
else:
try:
sub_dicts = [d] #BFS, start with root node
for i in range(0, len(path_parts)): #BFS
new_sub_dicts = []
for s in sub_dicts:
if path_parts[i] in s: #this tree node is part of the search path
the_list = s[path_parts[i]] if isinstance(s[path_parts[i]], list) else [s[path_parts[i]]]
for j in the_list:
if i < len(path_parts) -1: #not a leaf node; check level
if isinstance(j, dict): #skip this non-leaf node if not a dict
new_sub_dicts.append(j) #BFS expansion
else: #leaf node at the desired path; add to final return list
return_list.append(j)
sub_dicts = new_sub_dicts
#return
return return_list[0] if squash_single and len(return_list) == 1 else return_list if len(return_list) >= 1 else None
except:
return None | def function[iterate_dictionary, parameter[d, path, squash_single]]:
constant[
Takes a dict, and a path delimited with slashes like A/B/C/D, and returns a list of objects found at all leaf nodes at all trajectories `dict[A][B][C][D]`. It does this using BFS not DFS.
The word "leaf" hereby refers to an item at the search path level. That is, upon calling the function
iterate_dictionary(d_to_search, "A/B/C/D")
If `d_to_search` has five levels A/B/C/D/E, then D is the "leaf node level". Since `[E]` exists, then at least one object in the return list will be a dictionary.
Rules
===========================
Each node can be either
1) an arbitrary non-list, non-dictionary object
2) a dictionary
3) a list of arbitrary objects
All nodes of type 3 at each level are searched for nodes of type 1 and 2. Nodes of type 2 are the ones iterated in this tree search.
At the current time, nodes of type 1 are *not* inspected. They are returned in a list if they are at the search path and ignored otherwise.
Returns
===========================
1) If the path is an empty string, returns the original dict
2) *If* at least one object exists at the search path, it returns a list of all items at the search path. Using the above example terminology, a list of all objects at all trajectories `"A/B/C/D"`.
*Special Parameter*: If the optional Boolean parameter `squash_single` is True, and the return list contains only one object, the object is returned (*not* a list), else a list with that one object is returned. This optional flag is useful so that [0] does not have to be indexed on the return list in the case where only one item is expected.
3) None in the case that there are no objects at the search path.
]
variable[path_parts] assign[=] call[name[path].split, parameter[constant[/]]]
variable[return_list] assign[=] list[[]]
if compare[call[name[len], parameter[name[path_parts]]] equal[==] constant[0]] begin[:]
return[name[d]] | keyword[def] identifier[iterate_dictionary] ( identifier[d] , identifier[path] , identifier[squash_single] = keyword[False] ):
literal[string]
identifier[path_parts] = identifier[path] . identifier[split] ( literal[string] )
identifier[return_list] =[]
keyword[if] identifier[len] ( identifier[path_parts] )== literal[int] :
keyword[return] identifier[d]
keyword[else] :
keyword[try] :
identifier[sub_dicts] =[ identifier[d] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[path_parts] )):
identifier[new_sub_dicts] =[]
keyword[for] identifier[s] keyword[in] identifier[sub_dicts] :
keyword[if] identifier[path_parts] [ identifier[i] ] keyword[in] identifier[s] :
identifier[the_list] = identifier[s] [ identifier[path_parts] [ identifier[i] ]] keyword[if] identifier[isinstance] ( identifier[s] [ identifier[path_parts] [ identifier[i] ]], identifier[list] ) keyword[else] [ identifier[s] [ identifier[path_parts] [ identifier[i] ]]]
keyword[for] identifier[j] keyword[in] identifier[the_list] :
keyword[if] identifier[i] < identifier[len] ( identifier[path_parts] )- literal[int] :
keyword[if] identifier[isinstance] ( identifier[j] , identifier[dict] ):
identifier[new_sub_dicts] . identifier[append] ( identifier[j] )
keyword[else] :
identifier[return_list] . identifier[append] ( identifier[j] )
identifier[sub_dicts] = identifier[new_sub_dicts]
keyword[return] identifier[return_list] [ literal[int] ] keyword[if] identifier[squash_single] keyword[and] identifier[len] ( identifier[return_list] )== literal[int] keyword[else] identifier[return_list] keyword[if] identifier[len] ( identifier[return_list] )>= literal[int] keyword[else] keyword[None]
keyword[except] :
keyword[return] keyword[None] | def iterate_dictionary(d, path, squash_single=False):
"""
Takes a dict, and a path delimited with slashes like A/B/C/D, and returns a list of objects found at all leaf nodes at all trajectories `dict[A][B][C][D]`. It does this using BFS not DFS.
The word "leaf" hereby refers to an item at the search path level. That is, upon calling the function
iterate_dictionary(d_to_search, "A/B/C/D")
If `d_to_search` has five levels A/B/C/D/E, then D is the "leaf node level". Since `[E]` exists, then at least one object in the return list will be a dictionary.
Rules
===========================
Each node can be either
1) an arbitrary non-list, non-dictionary object
2) a dictionary
3) a list of arbitrary objects
All nodes of type 3 at each level are searched for nodes of type 1 and 2. Nodes of type 2 are the ones iterated in this tree search.
At the current time, nodes of type 1 are *not* inspected. They are returned in a list if they are at the search path and ignored otherwise.
Returns
===========================
1) If the path is an empty string, returns the original dict
2) *If* at least one object exists at the search path, it returns a list of all items at the search path. Using the above example terminology, a list of all objects at all trajectories `"A/B/C/D"`.
*Special Parameter*: If the optional Boolean parameter `squash_single` is True, and the return list contains only one object, the object is returned (*not* a list), else a list with that one object is returned. This optional flag is useful so that [0] does not have to be indexed on the return list in the case where only one item is expected.
3) None in the case that there are no objects at the search path.
"""
path_parts = path.split('/')
return_list = []
if len(path_parts) == 0: #no search string
return d # depends on [control=['if'], data=[]]
else:
try:
sub_dicts = [d] #BFS, start with root node
for i in range(0, len(path_parts)): #BFS
new_sub_dicts = []
for s in sub_dicts:
if path_parts[i] in s: #this tree node is part of the search path
the_list = s[path_parts[i]] if isinstance(s[path_parts[i]], list) else [s[path_parts[i]]]
for j in the_list:
if i < len(path_parts) - 1: #not a leaf node; check level
if isinstance(j, dict): #skip this non-leaf node if not a dict
new_sub_dicts.append(j) #BFS expansion # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else: #leaf node at the desired path; add to final return list
return_list.append(j) # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=['s']] # depends on [control=['for'], data=['s']]
sub_dicts = new_sub_dicts # depends on [control=['for'], data=['i']] #return
return return_list[0] if squash_single and len(return_list) == 1 else return_list if len(return_list) >= 1 else None # depends on [control=['try'], data=[]]
except:
return None # depends on [control=['except'], data=[]] |
def list_move_to_back(l,value='other'):
"""if the value is in the list, move it to the back and return it."""
l=list(l)
if value in l:
l.remove(value)
l.append(value)
return l | def function[list_move_to_back, parameter[l, value]]:
constant[if the value is in the list, move it to the back and return it.]
variable[l] assign[=] call[name[list], parameter[name[l]]]
if compare[name[value] in name[l]] begin[:]
call[name[l].remove, parameter[name[value]]]
call[name[l].append, parameter[name[value]]]
return[name[l]] | keyword[def] identifier[list_move_to_back] ( identifier[l] , identifier[value] = literal[string] ):
literal[string]
identifier[l] = identifier[list] ( identifier[l] )
keyword[if] identifier[value] keyword[in] identifier[l] :
identifier[l] . identifier[remove] ( identifier[value] )
identifier[l] . identifier[append] ( identifier[value] )
keyword[return] identifier[l] | def list_move_to_back(l, value='other'):
"""if the value is in the list, move it to the back and return it."""
l = list(l)
if value in l:
l.remove(value)
l.append(value) # depends on [control=['if'], data=['value', 'l']]
return l |
def length_prefix(length, offset):
"""Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
"""
if length < 56:
return ALL_BYTES[offset + length]
elif length < LONG_LENGTH:
length_string = int_to_big_endian(length)
return ALL_BYTES[offset + 56 - 1 + len(length_string)] + length_string
else:
raise ValueError('Length greater than 256**8') | def function[length_prefix, parameter[length, offset]]:
constant[Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
]
if compare[name[length] less[<] constant[56]] begin[:]
return[call[name[ALL_BYTES]][binary_operation[name[offset] + name[length]]]] | keyword[def] identifier[length_prefix] ( identifier[length] , identifier[offset] ):
literal[string]
keyword[if] identifier[length] < literal[int] :
keyword[return] identifier[ALL_BYTES] [ identifier[offset] + identifier[length] ]
keyword[elif] identifier[length] < identifier[LONG_LENGTH] :
identifier[length_string] = identifier[int_to_big_endian] ( identifier[length] )
keyword[return] identifier[ALL_BYTES] [ identifier[offset] + literal[int] - literal[int] + identifier[len] ( identifier[length_string] )]+ identifier[length_string]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def length_prefix(length, offset):
"""Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
"""
if length < 56:
return ALL_BYTES[offset + length] # depends on [control=['if'], data=['length']]
elif length < LONG_LENGTH:
length_string = int_to_big_endian(length)
return ALL_BYTES[offset + 56 - 1 + len(length_string)] + length_string # depends on [control=['if'], data=['length']]
else:
raise ValueError('Length greater than 256**8') |
def with_stmt__26(self, with_loc, context, with_var, colon_loc, body):
"""(2.6, 3.0) with_stmt: 'with' test [ with_var ] ':' suite"""
if with_var:
as_loc, optional_vars = with_var
item = ast.withitem(context_expr=context, optional_vars=optional_vars,
as_loc=as_loc, loc=context.loc.join(optional_vars.loc))
else:
item = ast.withitem(context_expr=context, optional_vars=None,
as_loc=None, loc=context.loc)
return ast.With(items=[item], body=body,
keyword_loc=with_loc, colon_loc=colon_loc,
loc=with_loc.join(body[-1].loc)) | def function[with_stmt__26, parameter[self, with_loc, context, with_var, colon_loc, body]]:
constant[(2.6, 3.0) with_stmt: 'with' test [ with_var ] ':' suite]
if name[with_var] begin[:]
<ast.Tuple object at 0x7da20e9b1300> assign[=] name[with_var]
variable[item] assign[=] call[name[ast].withitem, parameter[]]
return[call[name[ast].With, parameter[]]] | keyword[def] identifier[with_stmt__26] ( identifier[self] , identifier[with_loc] , identifier[context] , identifier[with_var] , identifier[colon_loc] , identifier[body] ):
literal[string]
keyword[if] identifier[with_var] :
identifier[as_loc] , identifier[optional_vars] = identifier[with_var]
identifier[item] = identifier[ast] . identifier[withitem] ( identifier[context_expr] = identifier[context] , identifier[optional_vars] = identifier[optional_vars] ,
identifier[as_loc] = identifier[as_loc] , identifier[loc] = identifier[context] . identifier[loc] . identifier[join] ( identifier[optional_vars] . identifier[loc] ))
keyword[else] :
identifier[item] = identifier[ast] . identifier[withitem] ( identifier[context_expr] = identifier[context] , identifier[optional_vars] = keyword[None] ,
identifier[as_loc] = keyword[None] , identifier[loc] = identifier[context] . identifier[loc] )
keyword[return] identifier[ast] . identifier[With] ( identifier[items] =[ identifier[item] ], identifier[body] = identifier[body] ,
identifier[keyword_loc] = identifier[with_loc] , identifier[colon_loc] = identifier[colon_loc] ,
identifier[loc] = identifier[with_loc] . identifier[join] ( identifier[body] [- literal[int] ]. identifier[loc] )) | def with_stmt__26(self, with_loc, context, with_var, colon_loc, body):
"""(2.6, 3.0) with_stmt: 'with' test [ with_var ] ':' suite"""
if with_var:
(as_loc, optional_vars) = with_var
item = ast.withitem(context_expr=context, optional_vars=optional_vars, as_loc=as_loc, loc=context.loc.join(optional_vars.loc)) # depends on [control=['if'], data=[]]
else:
item = ast.withitem(context_expr=context, optional_vars=None, as_loc=None, loc=context.loc)
return ast.With(items=[item], body=body, keyword_loc=with_loc, colon_loc=colon_loc, loc=with_loc.join(body[-1].loc)) |
def get_languages_from_model(app_label, model_label):
"""
Get the languages configured for the current model
:param model_label:
:param app_label:
:return:
"""
try:
mod_lan = TransModelLanguage.objects.filter(model='{} - {}'.format(app_label, model_label)).get()
languages = [lang.code for lang in mod_lan.languages.all()]
return languages
except TransModelLanguage.DoesNotExist:
return [] | def function[get_languages_from_model, parameter[app_label, model_label]]:
constant[
Get the languages configured for the current model
:param model_label:
:param app_label:
:return:
]
<ast.Try object at 0x7da204566a40> | keyword[def] identifier[get_languages_from_model] ( identifier[app_label] , identifier[model_label] ):
literal[string]
keyword[try] :
identifier[mod_lan] = identifier[TransModelLanguage] . identifier[objects] . identifier[filter] ( identifier[model] = literal[string] . identifier[format] ( identifier[app_label] , identifier[model_label] )). identifier[get] ()
identifier[languages] =[ identifier[lang] . identifier[code] keyword[for] identifier[lang] keyword[in] identifier[mod_lan] . identifier[languages] . identifier[all] ()]
keyword[return] identifier[languages]
keyword[except] identifier[TransModelLanguage] . identifier[DoesNotExist] :
keyword[return] [] | def get_languages_from_model(app_label, model_label):
"""
Get the languages configured for the current model
:param model_label:
:param app_label:
:return:
"""
try:
mod_lan = TransModelLanguage.objects.filter(model='{} - {}'.format(app_label, model_label)).get()
languages = [lang.code for lang in mod_lan.languages.all()]
return languages # depends on [control=['try'], data=[]]
except TransModelLanguage.DoesNotExist:
return [] # depends on [control=['except'], data=[]] |
def _property_search(self, fobj):
"""Return full name if object is a class property, otherwise return None."""
# Get class object
scontext = fobj.f_locals.get("self", None)
class_obj = scontext.__class__ if scontext is not None else None
if not class_obj:
del fobj, scontext, class_obj
return None
# Get class properties objects
class_props = [
(member_name, member_obj)
for member_name, member_obj in inspect.getmembers(class_obj)
if isinstance(member_obj, property)
]
if not class_props:
del fobj, scontext, class_obj
return None
class_file = inspect.getfile(class_obj).replace(".pyc", ".py")
class_name = self._callables_obj.get_callable_from_line(
class_file, inspect.getsourcelines(class_obj)[1]
)
# Get properties actions
prop_actions_dicts = {}
for prop_name, prop_obj in class_props:
prop_dict = {"fdel": None, "fget": None, "fset": None}
for action in prop_dict:
action_obj = getattr(prop_obj, action)
if action_obj:
# Unwrap action object. Contracts match the wrapped
# code object while exceptions registered in the
# body of the function/method which has decorators
# match the unwrapped object
prev_func_obj, next_func_obj = (
action_obj,
getattr(action_obj, "__wrapped__", None),
)
while next_func_obj:
prev_func_obj, next_func_obj = (
next_func_obj,
getattr(next_func_obj, "__wrapped__", None),
)
prop_dict[action] = [
id(_get_func_code(action_obj)),
id(_get_func_code(prev_func_obj)),
]
prop_actions_dicts[prop_name] = prop_dict
# Create properties directory
func_id = id(fobj.f_code)
desc_dict = {"fget": "getter", "fset": "setter", "fdel": "deleter"}
for prop_name, prop_actions_dict in prop_actions_dicts.items():
for action_name, action_id_list in prop_actions_dict.items():
if action_id_list and (func_id in action_id_list):
prop_name = ".".join([class_name, prop_name])
del fobj, scontext, class_obj, class_props
return "{prop_name}({prop_action})".format(
prop_name=prop_name, prop_action=desc_dict[action_name]
)
return None | def function[_property_search, parameter[self, fobj]]:
constant[Return full name if object is a class property, otherwise return None.]
variable[scontext] assign[=] call[name[fobj].f_locals.get, parameter[constant[self], constant[None]]]
variable[class_obj] assign[=] <ast.IfExp object at 0x7da20c6e50f0>
if <ast.UnaryOp object at 0x7da20c6e4d60> begin[:]
<ast.Delete object at 0x7da20c6e4850>
return[constant[None]]
variable[class_props] assign[=] <ast.ListComp object at 0x7da20c6e43d0>
if <ast.UnaryOp object at 0x7da20c6e7c40> begin[:]
<ast.Delete object at 0x7da20c6e5c60>
return[constant[None]]
variable[class_file] assign[=] call[call[name[inspect].getfile, parameter[name[class_obj]]].replace, parameter[constant[.pyc], constant[.py]]]
variable[class_name] assign[=] call[name[self]._callables_obj.get_callable_from_line, parameter[name[class_file], call[call[name[inspect].getsourcelines, parameter[name[class_obj]]]][constant[1]]]]
variable[prop_actions_dicts] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c6e5930>, <ast.Name object at 0x7da20c6e4730>]]] in starred[name[class_props]] begin[:]
variable[prop_dict] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e6860>, <ast.Constant object at 0x7da20c6e7a60>, <ast.Constant object at 0x7da20c6e52a0>], [<ast.Constant object at 0x7da20c6e4d30>, <ast.Constant object at 0x7da20c6e78e0>, <ast.Constant object at 0x7da20c6e7820>]]
for taget[name[action]] in starred[name[prop_dict]] begin[:]
variable[action_obj] assign[=] call[name[getattr], parameter[name[prop_obj], name[action]]]
if name[action_obj] begin[:]
<ast.Tuple object at 0x7da20c6e7c10> assign[=] tuple[[<ast.Name object at 0x7da20c6e4970>, <ast.Call object at 0x7da20c6e4ac0>]]
while name[next_func_obj] begin[:]
<ast.Tuple object at 0x7da20c6c48b0> assign[=] tuple[[<ast.Name object at 0x7da20c6c4bb0>, <ast.Call object at 0x7da20c6c6f20>]]
call[name[prop_dict]][name[action]] assign[=] list[[<ast.Call object at 0x7da20c6c6620>, <ast.Call object at 0x7da20c6c6650>]]
call[name[prop_actions_dicts]][name[prop_name]] assign[=] name[prop_dict]
variable[func_id] assign[=] call[name[id], parameter[name[fobj].f_code]]
variable[desc_dict] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c5930>, <ast.Constant object at 0x7da20c6c5990>, <ast.Constant object at 0x7da20c6c7160>], [<ast.Constant object at 0x7da20c6c76a0>, <ast.Constant object at 0x7da20c6c4670>, <ast.Constant object at 0x7da20c6c7a00>]]
for taget[tuple[[<ast.Name object at 0x7da20c6c7b80>, <ast.Name object at 0x7da20c6c77f0>]]] in starred[call[name[prop_actions_dicts].items, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c6c5b10>, <ast.Name object at 0x7da20c6c6e00>]]] in starred[call[name[prop_actions_dict].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da20c6c4f10> begin[:]
variable[prop_name] assign[=] call[constant[.].join, parameter[list[[<ast.Name object at 0x7da20c6c58d0>, <ast.Name object at 0x7da20c6c7010>]]]]
<ast.Delete object at 0x7da20c6c5180>
return[call[constant[{prop_name}({prop_action})].format, parameter[]]]
return[constant[None]] | keyword[def] identifier[_property_search] ( identifier[self] , identifier[fobj] ):
literal[string]
identifier[scontext] = identifier[fobj] . identifier[f_locals] . identifier[get] ( literal[string] , keyword[None] )
identifier[class_obj] = identifier[scontext] . identifier[__class__] keyword[if] identifier[scontext] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None]
keyword[if] keyword[not] identifier[class_obj] :
keyword[del] identifier[fobj] , identifier[scontext] , identifier[class_obj]
keyword[return] keyword[None]
identifier[class_props] =[
( identifier[member_name] , identifier[member_obj] )
keyword[for] identifier[member_name] , identifier[member_obj] keyword[in] identifier[inspect] . identifier[getmembers] ( identifier[class_obj] )
keyword[if] identifier[isinstance] ( identifier[member_obj] , identifier[property] )
]
keyword[if] keyword[not] identifier[class_props] :
keyword[del] identifier[fobj] , identifier[scontext] , identifier[class_obj]
keyword[return] keyword[None]
identifier[class_file] = identifier[inspect] . identifier[getfile] ( identifier[class_obj] ). identifier[replace] ( literal[string] , literal[string] )
identifier[class_name] = identifier[self] . identifier[_callables_obj] . identifier[get_callable_from_line] (
identifier[class_file] , identifier[inspect] . identifier[getsourcelines] ( identifier[class_obj] )[ literal[int] ]
)
identifier[prop_actions_dicts] ={}
keyword[for] identifier[prop_name] , identifier[prop_obj] keyword[in] identifier[class_props] :
identifier[prop_dict] ={ literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] }
keyword[for] identifier[action] keyword[in] identifier[prop_dict] :
identifier[action_obj] = identifier[getattr] ( identifier[prop_obj] , identifier[action] )
keyword[if] identifier[action_obj] :
identifier[prev_func_obj] , identifier[next_func_obj] =(
identifier[action_obj] ,
identifier[getattr] ( identifier[action_obj] , literal[string] , keyword[None] ),
)
keyword[while] identifier[next_func_obj] :
identifier[prev_func_obj] , identifier[next_func_obj] =(
identifier[next_func_obj] ,
identifier[getattr] ( identifier[next_func_obj] , literal[string] , keyword[None] ),
)
identifier[prop_dict] [ identifier[action] ]=[
identifier[id] ( identifier[_get_func_code] ( identifier[action_obj] )),
identifier[id] ( identifier[_get_func_code] ( identifier[prev_func_obj] )),
]
identifier[prop_actions_dicts] [ identifier[prop_name] ]= identifier[prop_dict]
identifier[func_id] = identifier[id] ( identifier[fobj] . identifier[f_code] )
identifier[desc_dict] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }
keyword[for] identifier[prop_name] , identifier[prop_actions_dict] keyword[in] identifier[prop_actions_dicts] . identifier[items] ():
keyword[for] identifier[action_name] , identifier[action_id_list] keyword[in] identifier[prop_actions_dict] . identifier[items] ():
keyword[if] identifier[action_id_list] keyword[and] ( identifier[func_id] keyword[in] identifier[action_id_list] ):
identifier[prop_name] = literal[string] . identifier[join] ([ identifier[class_name] , identifier[prop_name] ])
keyword[del] identifier[fobj] , identifier[scontext] , identifier[class_obj] , identifier[class_props]
keyword[return] literal[string] . identifier[format] (
identifier[prop_name] = identifier[prop_name] , identifier[prop_action] = identifier[desc_dict] [ identifier[action_name] ]
)
keyword[return] keyword[None] | def _property_search(self, fobj):
"""Return full name if object is a class property, otherwise return None."""
# Get class object
scontext = fobj.f_locals.get('self', None)
class_obj = scontext.__class__ if scontext is not None else None
if not class_obj:
del fobj, scontext, class_obj
return None # depends on [control=['if'], data=[]]
# Get class properties objects
class_props = [(member_name, member_obj) for (member_name, member_obj) in inspect.getmembers(class_obj) if isinstance(member_obj, property)]
if not class_props:
del fobj, scontext, class_obj
return None # depends on [control=['if'], data=[]]
class_file = inspect.getfile(class_obj).replace('.pyc', '.py')
class_name = self._callables_obj.get_callable_from_line(class_file, inspect.getsourcelines(class_obj)[1])
# Get properties actions
prop_actions_dicts = {}
for (prop_name, prop_obj) in class_props:
prop_dict = {'fdel': None, 'fget': None, 'fset': None}
for action in prop_dict:
action_obj = getattr(prop_obj, action)
if action_obj:
# Unwrap action object. Contracts match the wrapped
# code object while exceptions registered in the
# body of the function/method which has decorators
# match the unwrapped object
(prev_func_obj, next_func_obj) = (action_obj, getattr(action_obj, '__wrapped__', None))
while next_func_obj:
(prev_func_obj, next_func_obj) = (next_func_obj, getattr(next_func_obj, '__wrapped__', None)) # depends on [control=['while'], data=[]]
prop_dict[action] = [id(_get_func_code(action_obj)), id(_get_func_code(prev_func_obj))] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['action']]
prop_actions_dicts[prop_name] = prop_dict # depends on [control=['for'], data=[]]
# Create properties directory
func_id = id(fobj.f_code)
desc_dict = {'fget': 'getter', 'fset': 'setter', 'fdel': 'deleter'}
for (prop_name, prop_actions_dict) in prop_actions_dicts.items():
for (action_name, action_id_list) in prop_actions_dict.items():
if action_id_list and func_id in action_id_list:
prop_name = '.'.join([class_name, prop_name])
del fobj, scontext, class_obj, class_props
return '{prop_name}({prop_action})'.format(prop_name=prop_name, prop_action=desc_dict[action_name]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return None |
def register(linter):
'''required method to auto register this checker '''
linter.register_checker(StringCurlyBracesFormatIndexChecker(linter))
linter.register_checker(StringLiteralChecker(linter)) | def function[register, parameter[linter]]:
constant[required method to auto register this checker ]
call[name[linter].register_checker, parameter[call[name[StringCurlyBracesFormatIndexChecker], parameter[name[linter]]]]]
call[name[linter].register_checker, parameter[call[name[StringLiteralChecker], parameter[name[linter]]]]] | keyword[def] identifier[register] ( identifier[linter] ):
literal[string]
identifier[linter] . identifier[register_checker] ( identifier[StringCurlyBracesFormatIndexChecker] ( identifier[linter] ))
identifier[linter] . identifier[register_checker] ( identifier[StringLiteralChecker] ( identifier[linter] )) | def register(linter):
"""required method to auto register this checker """
linter.register_checker(StringCurlyBracesFormatIndexChecker(linter))
linter.register_checker(StringLiteralChecker(linter)) |
def retry(self):
""" Restarts failed tasks of a job. """
logger.info('Job {0} retrying all failed tasks'.format(self.name))
self.initialize_snapshot()
failed_task_names = []
for task_name, log in self.run_log['tasks'].items():
if log.get('success', True) == False:
failed_task_names.append(task_name)
if len(failed_task_names) == 0:
raise DagobahError('no failed tasks to retry')
self._set_status('running')
self.run_log['last_retry_time'] = datetime.utcnow()
logger.debug('Job {0} seeding run logs'.format(self.name))
for task_name in failed_task_names:
self._put_task_in_run_log(task_name)
self.tasks[task_name].start()
self._commit_run_log() | def function[retry, parameter[self]]:
constant[ Restarts failed tasks of a job. ]
call[name[logger].info, parameter[call[constant[Job {0} retrying all failed tasks].format, parameter[name[self].name]]]]
call[name[self].initialize_snapshot, parameter[]]
variable[failed_task_names] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0e3ab90>, <ast.Name object at 0x7da1b0e39690>]]] in starred[call[call[name[self].run_log][constant[tasks]].items, parameter[]]] begin[:]
if compare[call[name[log].get, parameter[constant[success], constant[True]]] equal[==] constant[False]] begin[:]
call[name[failed_task_names].append, parameter[name[task_name]]]
if compare[call[name[len], parameter[name[failed_task_names]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b0e393c0>
call[name[self]._set_status, parameter[constant[running]]]
call[name[self].run_log][constant[last_retry_time]] assign[=] call[name[datetime].utcnow, parameter[]]
call[name[logger].debug, parameter[call[constant[Job {0} seeding run logs].format, parameter[name[self].name]]]]
for taget[name[task_name]] in starred[name[failed_task_names]] begin[:]
call[name[self]._put_task_in_run_log, parameter[name[task_name]]]
call[call[name[self].tasks][name[task_name]].start, parameter[]]
call[name[self]._commit_run_log, parameter[]] | keyword[def] identifier[retry] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] ))
identifier[self] . identifier[initialize_snapshot] ()
identifier[failed_task_names] =[]
keyword[for] identifier[task_name] , identifier[log] keyword[in] identifier[self] . identifier[run_log] [ literal[string] ]. identifier[items] ():
keyword[if] identifier[log] . identifier[get] ( literal[string] , keyword[True] )== keyword[False] :
identifier[failed_task_names] . identifier[append] ( identifier[task_name] )
keyword[if] identifier[len] ( identifier[failed_task_names] )== literal[int] :
keyword[raise] identifier[DagobahError] ( literal[string] )
identifier[self] . identifier[_set_status] ( literal[string] )
identifier[self] . identifier[run_log] [ literal[string] ]= identifier[datetime] . identifier[utcnow] ()
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] ))
keyword[for] identifier[task_name] keyword[in] identifier[failed_task_names] :
identifier[self] . identifier[_put_task_in_run_log] ( identifier[task_name] )
identifier[self] . identifier[tasks] [ identifier[task_name] ]. identifier[start] ()
identifier[self] . identifier[_commit_run_log] () | def retry(self):
""" Restarts failed tasks of a job. """
logger.info('Job {0} retrying all failed tasks'.format(self.name))
self.initialize_snapshot()
failed_task_names = []
for (task_name, log) in self.run_log['tasks'].items():
if log.get('success', True) == False:
failed_task_names.append(task_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if len(failed_task_names) == 0:
raise DagobahError('no failed tasks to retry') # depends on [control=['if'], data=[]]
self._set_status('running')
self.run_log['last_retry_time'] = datetime.utcnow()
logger.debug('Job {0} seeding run logs'.format(self.name))
for task_name in failed_task_names:
self._put_task_in_run_log(task_name)
self.tasks[task_name].start() # depends on [control=['for'], data=['task_name']]
self._commit_run_log() |
def register(self, observer):
"""
Register an observer for it be notified when occurs changes.
For more details, see :class:`.UpdatesObserver`
:param UpdatesObserver observer: Observer that will be notified then occurs changes
"""
self.observer_manager.append(observer)
observer.manager = self | def function[register, parameter[self, observer]]:
constant[
Register an observer for it be notified when occurs changes.
For more details, see :class:`.UpdatesObserver`
:param UpdatesObserver observer: Observer that will be notified then occurs changes
]
call[name[self].observer_manager.append, parameter[name[observer]]]
name[observer].manager assign[=] name[self] | keyword[def] identifier[register] ( identifier[self] , identifier[observer] ):
literal[string]
identifier[self] . identifier[observer_manager] . identifier[append] ( identifier[observer] )
identifier[observer] . identifier[manager] = identifier[self] | def register(self, observer):
"""
Register an observer for it be notified when occurs changes.
For more details, see :class:`.UpdatesObserver`
:param UpdatesObserver observer: Observer that will be notified then occurs changes
"""
self.observer_manager.append(observer)
observer.manager = self |
def filter_metadata(metadata, user_filter, default_filter):
"""Filter the cell or notebook metadata, according to the user preference"""
actual_keys = set(metadata.keys())
keep_keys = apply_metadata_filters(user_filter, default_filter, actual_keys)
for key in actual_keys:
if key not in keep_keys:
metadata.pop(key)
return metadata | def function[filter_metadata, parameter[metadata, user_filter, default_filter]]:
constant[Filter the cell or notebook metadata, according to the user preference]
variable[actual_keys] assign[=] call[name[set], parameter[call[name[metadata].keys, parameter[]]]]
variable[keep_keys] assign[=] call[name[apply_metadata_filters], parameter[name[user_filter], name[default_filter], name[actual_keys]]]
for taget[name[key]] in starred[name[actual_keys]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[keep_keys]] begin[:]
call[name[metadata].pop, parameter[name[key]]]
return[name[metadata]] | keyword[def] identifier[filter_metadata] ( identifier[metadata] , identifier[user_filter] , identifier[default_filter] ):
literal[string]
identifier[actual_keys] = identifier[set] ( identifier[metadata] . identifier[keys] ())
identifier[keep_keys] = identifier[apply_metadata_filters] ( identifier[user_filter] , identifier[default_filter] , identifier[actual_keys] )
keyword[for] identifier[key] keyword[in] identifier[actual_keys] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[keep_keys] :
identifier[metadata] . identifier[pop] ( identifier[key] )
keyword[return] identifier[metadata] | def filter_metadata(metadata, user_filter, default_filter):
"""Filter the cell or notebook metadata, according to the user preference"""
actual_keys = set(metadata.keys())
keep_keys = apply_metadata_filters(user_filter, default_filter, actual_keys)
for key in actual_keys:
if key not in keep_keys:
metadata.pop(key) # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
return metadata |
def _remove(self, telegram):
"""
Remove telegram from buffer and incomplete data preceding it. This
is easier than validating the data before adding it to the buffer.
:param str telegram:
:return:
"""
# Remove data leading up to the telegram and the telegram itself.
index = self._buffer.index(telegram) + len(telegram)
self._buffer = self._buffer[index:] | def function[_remove, parameter[self, telegram]]:
constant[
Remove telegram from buffer and incomplete data preceding it. This
is easier than validating the data before adding it to the buffer.
:param str telegram:
:return:
]
variable[index] assign[=] binary_operation[call[name[self]._buffer.index, parameter[name[telegram]]] + call[name[len], parameter[name[telegram]]]]
name[self]._buffer assign[=] call[name[self]._buffer][<ast.Slice object at 0x7da18ede4700>] | keyword[def] identifier[_remove] ( identifier[self] , identifier[telegram] ):
literal[string]
identifier[index] = identifier[self] . identifier[_buffer] . identifier[index] ( identifier[telegram] )+ identifier[len] ( identifier[telegram] )
identifier[self] . identifier[_buffer] = identifier[self] . identifier[_buffer] [ identifier[index] :] | def _remove(self, telegram):
"""
Remove telegram from buffer and incomplete data preceding it. This
is easier than validating the data before adding it to the buffer.
:param str telegram:
:return:
"""
# Remove data leading up to the telegram and the telegram itself.
index = self._buffer.index(telegram) + len(telegram)
self._buffer = self._buffer[index:] |
def get_nn(unit):
"""获取文本行中阿拉伯数字数的个数
Keyword arguments:
unit -- 文本行
Return:
nn -- 数字数
"""
nn = 0
match_re = re.findall(number, unit)
if match_re:
string = ''.join(match_re)
nn = len(string)
return int(nn) | def function[get_nn, parameter[unit]]:
constant[获取文本行中阿拉伯数字数的个数
Keyword arguments:
unit -- 文本行
Return:
nn -- 数字数
]
variable[nn] assign[=] constant[0]
variable[match_re] assign[=] call[name[re].findall, parameter[name[number], name[unit]]]
if name[match_re] begin[:]
variable[string] assign[=] call[constant[].join, parameter[name[match_re]]]
variable[nn] assign[=] call[name[len], parameter[name[string]]]
return[call[name[int], parameter[name[nn]]]] | keyword[def] identifier[get_nn] ( identifier[unit] ):
literal[string]
identifier[nn] = literal[int]
identifier[match_re] = identifier[re] . identifier[findall] ( identifier[number] , identifier[unit] )
keyword[if] identifier[match_re] :
identifier[string] = literal[string] . identifier[join] ( identifier[match_re] )
identifier[nn] = identifier[len] ( identifier[string] )
keyword[return] identifier[int] ( identifier[nn] ) | def get_nn(unit):
"""获取文本行中阿拉伯数字数的个数
Keyword arguments:
unit -- 文本行
Return:
nn -- 数字数
"""
nn = 0
match_re = re.findall(number, unit)
if match_re:
string = ''.join(match_re)
nn = len(string) # depends on [control=['if'], data=[]]
return int(nn) |
def add_package_repo(
repo_name,
repo_url,
index=None,
wait_for_package=None,
expect_prev_version=None):
""" Add a repository to the list of package sources
:param repo_name: name of the repository to add
:type repo_name: str
:param repo_url: location of the repository to add
:type repo_url: str
:param index: index (precedence) for this repository
:type index: int
:param wait_for_package: the package whose version should change after the repo is added
:type wait_for_package: str, or None
:return: True if successful, False otherwise
:rtype: bool
"""
package_manager = _get_package_manager()
if wait_for_package:
prev_version = package_manager.get_package_version(wait_for_package, None)
if not package_manager.add_repo(repo_name, repo_url, index):
return False
if wait_for_package:
try:
spinner.time_wait(lambda: package_version_changed_predicate(package_manager, wait_for_package, prev_version))
except TimeoutExpired:
return False
return True | def function[add_package_repo, parameter[repo_name, repo_url, index, wait_for_package, expect_prev_version]]:
constant[ Add a repository to the list of package sources
:param repo_name: name of the repository to add
:type repo_name: str
:param repo_url: location of the repository to add
:type repo_url: str
:param index: index (precedence) for this repository
:type index: int
:param wait_for_package: the package whose version should change after the repo is added
:type wait_for_package: str, or None
:return: True if successful, False otherwise
:rtype: bool
]
variable[package_manager] assign[=] call[name[_get_package_manager], parameter[]]
if name[wait_for_package] begin[:]
variable[prev_version] assign[=] call[name[package_manager].get_package_version, parameter[name[wait_for_package], constant[None]]]
if <ast.UnaryOp object at 0x7da18c4cc760> begin[:]
return[constant[False]]
if name[wait_for_package] begin[:]
<ast.Try object at 0x7da18c4ce9e0>
return[constant[True]] | keyword[def] identifier[add_package_repo] (
identifier[repo_name] ,
identifier[repo_url] ,
identifier[index] = keyword[None] ,
identifier[wait_for_package] = keyword[None] ,
identifier[expect_prev_version] = keyword[None] ):
literal[string]
identifier[package_manager] = identifier[_get_package_manager] ()
keyword[if] identifier[wait_for_package] :
identifier[prev_version] = identifier[package_manager] . identifier[get_package_version] ( identifier[wait_for_package] , keyword[None] )
keyword[if] keyword[not] identifier[package_manager] . identifier[add_repo] ( identifier[repo_name] , identifier[repo_url] , identifier[index] ):
keyword[return] keyword[False]
keyword[if] identifier[wait_for_package] :
keyword[try] :
identifier[spinner] . identifier[time_wait] ( keyword[lambda] : identifier[package_version_changed_predicate] ( identifier[package_manager] , identifier[wait_for_package] , identifier[prev_version] ))
keyword[except] identifier[TimeoutExpired] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def add_package_repo(repo_name, repo_url, index=None, wait_for_package=None, expect_prev_version=None):
""" Add a repository to the list of package sources
:param repo_name: name of the repository to add
:type repo_name: str
:param repo_url: location of the repository to add
:type repo_url: str
:param index: index (precedence) for this repository
:type index: int
:param wait_for_package: the package whose version should change after the repo is added
:type wait_for_package: str, or None
:return: True if successful, False otherwise
:rtype: bool
"""
package_manager = _get_package_manager()
if wait_for_package:
prev_version = package_manager.get_package_version(wait_for_package, None) # depends on [control=['if'], data=[]]
if not package_manager.add_repo(repo_name, repo_url, index):
return False # depends on [control=['if'], data=[]]
if wait_for_package:
try:
spinner.time_wait(lambda : package_version_changed_predicate(package_manager, wait_for_package, prev_version)) # depends on [control=['try'], data=[]]
except TimeoutExpired:
return False # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return True |
def isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0):
"""
Python 3.4 does not have math.isclose, so we need to steal it and add it here.
"""
try:
return math.isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol)
except AttributeError:
# Running on older version of python, fall back to hand-rolled implementation
if (rel_tol < 0.0) or (abs_tol < 0.0):
raise ValueError("Tolerances must be non-negative, but are rel_tol: {} and abs_tol: {}".format(rel_tol, abs_tol))
if math.isnan(a) or math.isnan(b):
return False # NaNs are never close to anything, even other NaNs
if (a == b):
return True
if math.isinf(a) or math.isinf(b):
return False # Infinity is only close to itself, and we already handled that case
diff = abs(a - b)
return (diff <= rel_tol * abs(b)) or (diff <= rel_tol * abs(a)) or (diff <= abs_tol) | def function[isclose, parameter[a, b]]:
constant[
Python 3.4 does not have math.isclose, so we need to steal it and add it here.
]
<ast.Try object at 0x7da1b0550af0> | keyword[def] identifier[isclose] ( identifier[a] , identifier[b] ,*, identifier[rel_tol] = literal[int] , identifier[abs_tol] = literal[int] ):
literal[string]
keyword[try] :
keyword[return] identifier[math] . identifier[isclose] ( identifier[a] , identifier[b] , identifier[rel_tol] = identifier[rel_tol] , identifier[abs_tol] = identifier[abs_tol] )
keyword[except] identifier[AttributeError] :
keyword[if] ( identifier[rel_tol] < literal[int] ) keyword[or] ( identifier[abs_tol] < literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[rel_tol] , identifier[abs_tol] ))
keyword[if] identifier[math] . identifier[isnan] ( identifier[a] ) keyword[or] identifier[math] . identifier[isnan] ( identifier[b] ):
keyword[return] keyword[False]
keyword[if] ( identifier[a] == identifier[b] ):
keyword[return] keyword[True]
keyword[if] identifier[math] . identifier[isinf] ( identifier[a] ) keyword[or] identifier[math] . identifier[isinf] ( identifier[b] ):
keyword[return] keyword[False]
identifier[diff] = identifier[abs] ( identifier[a] - identifier[b] )
keyword[return] ( identifier[diff] <= identifier[rel_tol] * identifier[abs] ( identifier[b] )) keyword[or] ( identifier[diff] <= identifier[rel_tol] * identifier[abs] ( identifier[a] )) keyword[or] ( identifier[diff] <= identifier[abs_tol] ) | def isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0):
"""
Python 3.4 does not have math.isclose, so we need to steal it and add it here.
"""
try:
return math.isclose(a, b, rel_tol=rel_tol, abs_tol=abs_tol) # depends on [control=['try'], data=[]]
except AttributeError:
# Running on older version of python, fall back to hand-rolled implementation
if rel_tol < 0.0 or abs_tol < 0.0:
raise ValueError('Tolerances must be non-negative, but are rel_tol: {} and abs_tol: {}'.format(rel_tol, abs_tol)) # depends on [control=['if'], data=[]]
if math.isnan(a) or math.isnan(b):
return False # NaNs are never close to anything, even other NaNs # depends on [control=['if'], data=[]]
if a == b:
return True # depends on [control=['if'], data=[]]
if math.isinf(a) or math.isinf(b):
return False # Infinity is only close to itself, and we already handled that case # depends on [control=['if'], data=[]]
diff = abs(a - b)
return diff <= rel_tol * abs(b) or diff <= rel_tol * abs(a) or diff <= abs_tol # depends on [control=['except'], data=[]] |
def getcharcount(self, window_name, object_name):
"""
Get character count
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
return object_handle.AXNumberOfCharacters | def function[getcharcount, parameter[self, window_name, object_name]]:
constant[
Get character count
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
]
variable[object_handle] assign[=] call[name[self]._get_object_handle, parameter[name[window_name], name[object_name]]]
if <ast.UnaryOp object at 0x7da18f812a40> begin[:]
<ast.Raise object at 0x7da18f810a30>
return[name[object_handle].AXNumberOfCharacters] | keyword[def] identifier[getcharcount] ( identifier[self] , identifier[window_name] , identifier[object_name] ):
literal[string]
identifier[object_handle] = identifier[self] . identifier[_get_object_handle] ( identifier[window_name] , identifier[object_name] )
keyword[if] keyword[not] identifier[object_handle] . identifier[AXEnabled] :
keyword[raise] identifier[LdtpServerException] ( literal[string] % identifier[object_name] )
keyword[return] identifier[object_handle] . identifier[AXNumberOfCharacters] | def getcharcount(self, window_name, object_name):
"""
Get character count
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u'Object %s state disabled' % object_name) # depends on [control=['if'], data=[]]
return object_handle.AXNumberOfCharacters |
def resume_all(self):
"""Resumes all service instances."""
for alias, service in self._service_objects.items():
with expects.expect_no_raises(
'Failed to pause service "%s".' % alias):
service.resume() | def function[resume_all, parameter[self]]:
constant[Resumes all service instances.]
for taget[tuple[[<ast.Name object at 0x7da1b086beb0>, <ast.Name object at 0x7da1b0868130>]]] in starred[call[name[self]._service_objects.items, parameter[]]] begin[:]
with call[name[expects].expect_no_raises, parameter[binary_operation[constant[Failed to pause service "%s".] <ast.Mod object at 0x7da2590d6920> name[alias]]]] begin[:]
call[name[service].resume, parameter[]] | keyword[def] identifier[resume_all] ( identifier[self] ):
literal[string]
keyword[for] identifier[alias] , identifier[service] keyword[in] identifier[self] . identifier[_service_objects] . identifier[items] ():
keyword[with] identifier[expects] . identifier[expect_no_raises] (
literal[string] % identifier[alias] ):
identifier[service] . identifier[resume] () | def resume_all(self):
"""Resumes all service instances."""
for (alias, service) in self._service_objects.items():
with expects.expect_no_raises('Failed to pause service "%s".' % alias):
service.resume() # depends on [control=['with'], data=[]] # depends on [control=['for'], data=[]] |
def get_credits():
"""Extract credits from `AUTHORS.rst`"""
credits = read(os.path.join(_HERE, "AUTHORS.rst")).split("\n")
from_index = credits.index("Active Contributors")
credits = "\n".join(credits[from_index + 2:])
return credits | def function[get_credits, parameter[]]:
constant[Extract credits from `AUTHORS.rst`]
variable[credits] assign[=] call[call[name[read], parameter[call[name[os].path.join, parameter[name[_HERE], constant[AUTHORS.rst]]]]].split, parameter[constant[
]]]
variable[from_index] assign[=] call[name[credits].index, parameter[constant[Active Contributors]]]
variable[credits] assign[=] call[constant[
].join, parameter[call[name[credits]][<ast.Slice object at 0x7da20c990940>]]]
return[name[credits]] | keyword[def] identifier[get_credits] ():
literal[string]
identifier[credits] = identifier[read] ( identifier[os] . identifier[path] . identifier[join] ( identifier[_HERE] , literal[string] )). identifier[split] ( literal[string] )
identifier[from_index] = identifier[credits] . identifier[index] ( literal[string] )
identifier[credits] = literal[string] . identifier[join] ( identifier[credits] [ identifier[from_index] + literal[int] :])
keyword[return] identifier[credits] | def get_credits():
"""Extract credits from `AUTHORS.rst`"""
credits = read(os.path.join(_HERE, 'AUTHORS.rst')).split('\n')
from_index = credits.index('Active Contributors')
credits = '\n'.join(credits[from_index + 2:])
return credits |
def get_firmware(self):
"""
Gets the installed firmware for a logical interconnect.
Returns:
dict: LIFirmware.
"""
firmware_uri = self._helper.build_subresource_uri(self.data["uri"], subresource_path=self.FIRMWARE_PATH)
return self._helper.do_get(firmware_uri) | def function[get_firmware, parameter[self]]:
constant[
Gets the installed firmware for a logical interconnect.
Returns:
dict: LIFirmware.
]
variable[firmware_uri] assign[=] call[name[self]._helper.build_subresource_uri, parameter[call[name[self].data][constant[uri]]]]
return[call[name[self]._helper.do_get, parameter[name[firmware_uri]]]] | keyword[def] identifier[get_firmware] ( identifier[self] ):
literal[string]
identifier[firmware_uri] = identifier[self] . identifier[_helper] . identifier[build_subresource_uri] ( identifier[self] . identifier[data] [ literal[string] ], identifier[subresource_path] = identifier[self] . identifier[FIRMWARE_PATH] )
keyword[return] identifier[self] . identifier[_helper] . identifier[do_get] ( identifier[firmware_uri] ) | def get_firmware(self):
"""
Gets the installed firmware for a logical interconnect.
Returns:
dict: LIFirmware.
"""
firmware_uri = self._helper.build_subresource_uri(self.data['uri'], subresource_path=self.FIRMWARE_PATH)
return self._helper.do_get(firmware_uri) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.