code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def labels(self):
"""Retrieve or set labels assigned to this bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
.. note::
The getter for this property returns a dict which is a *copy*
of the bucket's labels. Mutating that dict has no effect unless
you then re-assign the dict via the setter. E.g.:
>>> labels = bucket.labels
>>> labels['new_key'] = 'some-label'
>>> del labels['old_key']
>>> bucket.labels = labels
>>> bucket.update()
:setter: Set labels for this bucket.
:getter: Gets the labels for this bucket.
:rtype: :class:`dict`
:returns: Name-value pairs (string->string) labelling the bucket.
"""
labels = self._properties.get("labels")
if labels is None:
return {}
return copy.deepcopy(labels) | def function[labels, parameter[self]]:
constant[Retrieve or set labels assigned to this bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
.. note::
The getter for this property returns a dict which is a *copy*
of the bucket's labels. Mutating that dict has no effect unless
you then re-assign the dict via the setter. E.g.:
>>> labels = bucket.labels
>>> labels['new_key'] = 'some-label'
>>> del labels['old_key']
>>> bucket.labels = labels
>>> bucket.update()
:setter: Set labels for this bucket.
:getter: Gets the labels for this bucket.
:rtype: :class:`dict`
:returns: Name-value pairs (string->string) labelling the bucket.
]
variable[labels] assign[=] call[name[self]._properties.get, parameter[constant[labels]]]
if compare[name[labels] is constant[None]] begin[:]
return[dictionary[[], []]]
return[call[name[copy].deepcopy, parameter[name[labels]]]] | keyword[def] identifier[labels] ( identifier[self] ):
literal[string]
identifier[labels] = identifier[self] . identifier[_properties] . identifier[get] ( literal[string] )
keyword[if] identifier[labels] keyword[is] keyword[None] :
keyword[return] {}
keyword[return] identifier[copy] . identifier[deepcopy] ( identifier[labels] ) | def labels(self):
"""Retrieve or set labels assigned to this bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
.. note::
The getter for this property returns a dict which is a *copy*
of the bucket's labels. Mutating that dict has no effect unless
you then re-assign the dict via the setter. E.g.:
>>> labels = bucket.labels
>>> labels['new_key'] = 'some-label'
>>> del labels['old_key']
>>> bucket.labels = labels
>>> bucket.update()
:setter: Set labels for this bucket.
:getter: Gets the labels for this bucket.
:rtype: :class:`dict`
:returns: Name-value pairs (string->string) labelling the bucket.
"""
labels = self._properties.get('labels')
if labels is None:
return {} # depends on [control=['if'], data=[]]
return copy.deepcopy(labels) |
def deleteOverlapping(self, targetList):
'''
Erase points from another list that overlap with points in this list
'''
start = self.pointList[0][0]
stop = self.pointList[-1][0]
if self.netLeftShift < 0:
start += self.netLeftShift
if self.netRightShift > 0:
stop += self.netRightShift
targetList = _deletePoints(targetList, start, stop)
return targetList | def function[deleteOverlapping, parameter[self, targetList]]:
constant[
Erase points from another list that overlap with points in this list
]
variable[start] assign[=] call[call[name[self].pointList][constant[0]]][constant[0]]
variable[stop] assign[=] call[call[name[self].pointList][<ast.UnaryOp object at 0x7da1b1106b00>]][constant[0]]
if compare[name[self].netLeftShift less[<] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b10d7d00>
if compare[name[self].netRightShift greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b10d6d70>
variable[targetList] assign[=] call[name[_deletePoints], parameter[name[targetList], name[start], name[stop]]]
return[name[targetList]] | keyword[def] identifier[deleteOverlapping] ( identifier[self] , identifier[targetList] ):
literal[string]
identifier[start] = identifier[self] . identifier[pointList] [ literal[int] ][ literal[int] ]
identifier[stop] = identifier[self] . identifier[pointList] [- literal[int] ][ literal[int] ]
keyword[if] identifier[self] . identifier[netLeftShift] < literal[int] :
identifier[start] += identifier[self] . identifier[netLeftShift]
keyword[if] identifier[self] . identifier[netRightShift] > literal[int] :
identifier[stop] += identifier[self] . identifier[netRightShift]
identifier[targetList] = identifier[_deletePoints] ( identifier[targetList] , identifier[start] , identifier[stop] )
keyword[return] identifier[targetList] | def deleteOverlapping(self, targetList):
"""
Erase points from another list that overlap with points in this list
"""
start = self.pointList[0][0]
stop = self.pointList[-1][0]
if self.netLeftShift < 0:
start += self.netLeftShift # depends on [control=['if'], data=[]]
if self.netRightShift > 0:
stop += self.netRightShift # depends on [control=['if'], data=[]]
targetList = _deletePoints(targetList, start, stop)
return targetList |
def _mod(field, value, document):
"""
Performs a mod on a document field. Value must be a list or tuple with
two values divisor and remainder (i.e. [2, 0]). This will essentially
perform the following:
document[field] % divisor == remainder
If the value does not contain integers or is not a two-item list/tuple,
a MalformedQueryException will be raised. If the value of document[field]
cannot be converted to an integer, this will return False.
"""
try:
divisor, remainder = map(int, value)
except (TypeError, ValueError):
raise MalformedQueryException("'$mod' must accept an iterable: [divisor, remainder]")
try:
return int(document.get(field, None)) % divisor == remainder
except (TypeError, ValueError):
return False | def function[_mod, parameter[field, value, document]]:
constant[
Performs a mod on a document field. Value must be a list or tuple with
two values divisor and remainder (i.e. [2, 0]). This will essentially
perform the following:
document[field] % divisor == remainder
If the value does not contain integers or is not a two-item list/tuple,
a MalformedQueryException will be raised. If the value of document[field]
cannot be converted to an integer, this will return False.
]
<ast.Try object at 0x7da1b0ae30a0>
<ast.Try object at 0x7da1b0ae3400> | keyword[def] identifier[_mod] ( identifier[field] , identifier[value] , identifier[document] ):
literal[string]
keyword[try] :
identifier[divisor] , identifier[remainder] = identifier[map] ( identifier[int] , identifier[value] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[MalformedQueryException] ( literal[string] )
keyword[try] :
keyword[return] identifier[int] ( identifier[document] . identifier[get] ( identifier[field] , keyword[None] ))% identifier[divisor] == identifier[remainder]
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[return] keyword[False] | def _mod(field, value, document):
"""
Performs a mod on a document field. Value must be a list or tuple with
two values divisor and remainder (i.e. [2, 0]). This will essentially
perform the following:
document[field] % divisor == remainder
If the value does not contain integers or is not a two-item list/tuple,
a MalformedQueryException will be raised. If the value of document[field]
cannot be converted to an integer, this will return False.
"""
try:
(divisor, remainder) = map(int, value) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise MalformedQueryException("'$mod' must accept an iterable: [divisor, remainder]") # depends on [control=['except'], data=[]]
try:
return int(document.get(field, None)) % divisor == remainder # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
return False # depends on [control=['except'], data=[]] |
def findBest(self, pattern):
""" Returns the *best* match in the region (instead of the first match) """
findFailedRetry = True
while findFailedRetry:
best_match = None
all_matches = self.findAll(pattern)
for match in all_matches:
if best_match is None or best_match.getScore() < match.getScore():
best_match = match
self._lastMatch = best_match
if best_match is not None:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return best_match | def function[findBest, parameter[self, pattern]]:
constant[ Returns the *best* match in the region (instead of the first match) ]
variable[findFailedRetry] assign[=] constant[True]
while name[findFailedRetry] begin[:]
variable[best_match] assign[=] constant[None]
variable[all_matches] assign[=] call[name[self].findAll, parameter[name[pattern]]]
for taget[name[match]] in starred[name[all_matches]] begin[:]
if <ast.BoolOp object at 0x7da1b1113640> begin[:]
variable[best_match] assign[=] name[match]
name[self]._lastMatch assign[=] name[best_match]
if compare[name[best_match] is_not constant[None]] begin[:]
break
variable[path] assign[=] <ast.IfExp object at 0x7da1b1111db0>
variable[findFailedRetry] assign[=] call[name[self]._raiseFindFailed, parameter[call[constant[Could not find pattern '{}'].format, parameter[name[path]]]]]
if name[findFailedRetry] begin[:]
call[name[time].sleep, parameter[name[self]._repeatWaitTime]]
return[name[best_match]] | keyword[def] identifier[findBest] ( identifier[self] , identifier[pattern] ):
literal[string]
identifier[findFailedRetry] = keyword[True]
keyword[while] identifier[findFailedRetry] :
identifier[best_match] = keyword[None]
identifier[all_matches] = identifier[self] . identifier[findAll] ( identifier[pattern] )
keyword[for] identifier[match] keyword[in] identifier[all_matches] :
keyword[if] identifier[best_match] keyword[is] keyword[None] keyword[or] identifier[best_match] . identifier[getScore] ()< identifier[match] . identifier[getScore] ():
identifier[best_match] = identifier[match]
identifier[self] . identifier[_lastMatch] = identifier[best_match]
keyword[if] identifier[best_match] keyword[is] keyword[not] keyword[None] :
keyword[break]
identifier[path] = identifier[pattern] . identifier[path] keyword[if] identifier[isinstance] ( identifier[pattern] , identifier[Pattern] ) keyword[else] identifier[pattern]
identifier[findFailedRetry] = identifier[self] . identifier[_raiseFindFailed] ( literal[string] . identifier[format] ( identifier[path] ))
keyword[if] identifier[findFailedRetry] :
identifier[time] . identifier[sleep] ( identifier[self] . identifier[_repeatWaitTime] )
keyword[return] identifier[best_match] | def findBest(self, pattern):
""" Returns the *best* match in the region (instead of the first match) """
findFailedRetry = True
while findFailedRetry:
best_match = None
all_matches = self.findAll(pattern)
for match in all_matches:
if best_match is None or best_match.getScore() < match.getScore():
best_match = match # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['match']]
self._lastMatch = best_match
if best_match is not None:
break # depends on [control=['if'], data=[]]
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return best_match |
def parse_name(lexer: Lexer) -> NameNode:
"""Convert a name lex token into a name parse node."""
token = expect_token(lexer, TokenKind.NAME)
return NameNode(value=token.value, loc=loc(lexer, token)) | def function[parse_name, parameter[lexer]]:
constant[Convert a name lex token into a name parse node.]
variable[token] assign[=] call[name[expect_token], parameter[name[lexer], name[TokenKind].NAME]]
return[call[name[NameNode], parameter[]]] | keyword[def] identifier[parse_name] ( identifier[lexer] : identifier[Lexer] )-> identifier[NameNode] :
literal[string]
identifier[token] = identifier[expect_token] ( identifier[lexer] , identifier[TokenKind] . identifier[NAME] )
keyword[return] identifier[NameNode] ( identifier[value] = identifier[token] . identifier[value] , identifier[loc] = identifier[loc] ( identifier[lexer] , identifier[token] )) | def parse_name(lexer: Lexer) -> NameNode:
"""Convert a name lex token into a name parse node."""
token = expect_token(lexer, TokenKind.NAME)
return NameNode(value=token.value, loc=loc(lexer, token)) |
def waitPuppetCatalogToBeApplied(self, key, sleepTime=5):
""" Function waitPuppetCatalogToBeApplied
Wait for puppet catalog to be applied
@param key: The host name or ID
@return RETURN: None
"""
# Wait for puppet catalog to be applied
loop_stop = False
while not loop_stop:
status = self[key].getStatus()
if status == 'No Changes' or status == 'Active':
self.__printProgression__(True,
key + ' creation: provisioning OK')
loop_stop = True
elif status == 'Error':
self.__printProgression__(False,
key + ' creation: Error - '
'Error during provisioning')
loop_stop = True
return False
else:
self.__printProgression__('In progress',
key + ' creation: provisioning ({})'
.format(status),
eol='\r')
time.sleep(sleepTime) | def function[waitPuppetCatalogToBeApplied, parameter[self, key, sleepTime]]:
constant[ Function waitPuppetCatalogToBeApplied
Wait for puppet catalog to be applied
@param key: The host name or ID
@return RETURN: None
]
variable[loop_stop] assign[=] constant[False]
while <ast.UnaryOp object at 0x7da1b10698a0> begin[:]
variable[status] assign[=] call[call[name[self]][name[key]].getStatus, parameter[]]
if <ast.BoolOp object at 0x7da1b1069ba0> begin[:]
call[name[self].__printProgression__, parameter[constant[True], binary_operation[name[key] + constant[ creation: provisioning OK]]]]
variable[loop_stop] assign[=] constant[True]
call[name[time].sleep, parameter[name[sleepTime]]] | keyword[def] identifier[waitPuppetCatalogToBeApplied] ( identifier[self] , identifier[key] , identifier[sleepTime] = literal[int] ):
literal[string]
identifier[loop_stop] = keyword[False]
keyword[while] keyword[not] identifier[loop_stop] :
identifier[status] = identifier[self] [ identifier[key] ]. identifier[getStatus] ()
keyword[if] identifier[status] == literal[string] keyword[or] identifier[status] == literal[string] :
identifier[self] . identifier[__printProgression__] ( keyword[True] ,
identifier[key] + literal[string] )
identifier[loop_stop] = keyword[True]
keyword[elif] identifier[status] == literal[string] :
identifier[self] . identifier[__printProgression__] ( keyword[False] ,
identifier[key] + literal[string]
literal[string] )
identifier[loop_stop] = keyword[True]
keyword[return] keyword[False]
keyword[else] :
identifier[self] . identifier[__printProgression__] ( literal[string] ,
identifier[key] + literal[string]
. identifier[format] ( identifier[status] ),
identifier[eol] = literal[string] )
identifier[time] . identifier[sleep] ( identifier[sleepTime] ) | def waitPuppetCatalogToBeApplied(self, key, sleepTime=5):
""" Function waitPuppetCatalogToBeApplied
Wait for puppet catalog to be applied
@param key: The host name or ID
@return RETURN: None
"""
# Wait for puppet catalog to be applied
loop_stop = False
while not loop_stop:
status = self[key].getStatus()
if status == 'No Changes' or status == 'Active':
self.__printProgression__(True, key + ' creation: provisioning OK')
loop_stop = True # depends on [control=['if'], data=[]]
elif status == 'Error':
self.__printProgression__(False, key + ' creation: Error - Error during provisioning')
loop_stop = True
return False # depends on [control=['if'], data=[]]
else:
self.__printProgression__('In progress', key + ' creation: provisioning ({})'.format(status), eol='\r')
time.sleep(sleepTime) # depends on [control=['while'], data=[]] |
def new_chain(table='filter', chain=None, family='ipv4'):
'''
.. versionadded:: 2014.1.0
Create new custom chain to the specified table.
CLI Example:
.. code-block:: bash
salt '*' iptables.new_chain filter CUSTOM_CHAIN
IPv6:
salt '*' iptables.new_chain filter CUSTOM_CHAIN family=ipv6
'''
if not chain:
return 'Error: Chain needs to be specified'
wait = '--wait' if _has_option('--wait', family) else ''
cmd = '{0} {1} -t {2} -N {3}'.format(
_iptables_cmd(family), wait, table, chain)
out = __salt__['cmd.run'](cmd)
if not out:
out = True
return out | def function[new_chain, parameter[table, chain, family]]:
constant[
.. versionadded:: 2014.1.0
Create new custom chain to the specified table.
CLI Example:
.. code-block:: bash
salt '*' iptables.new_chain filter CUSTOM_CHAIN
IPv6:
salt '*' iptables.new_chain filter CUSTOM_CHAIN family=ipv6
]
if <ast.UnaryOp object at 0x7da1b21e17e0> begin[:]
return[constant[Error: Chain needs to be specified]]
variable[wait] assign[=] <ast.IfExp object at 0x7da1b21e2e30>
variable[cmd] assign[=] call[constant[{0} {1} -t {2} -N {3}].format, parameter[call[name[_iptables_cmd], parameter[name[family]]], name[wait], name[table], name[chain]]]
variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]]
if <ast.UnaryOp object at 0x7da1b21e3b20> begin[:]
variable[out] assign[=] constant[True]
return[name[out]] | keyword[def] identifier[new_chain] ( identifier[table] = literal[string] , identifier[chain] = keyword[None] , identifier[family] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[chain] :
keyword[return] literal[string]
identifier[wait] = literal[string] keyword[if] identifier[_has_option] ( literal[string] , identifier[family] ) keyword[else] literal[string]
identifier[cmd] = literal[string] . identifier[format] (
identifier[_iptables_cmd] ( identifier[family] ), identifier[wait] , identifier[table] , identifier[chain] )
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] )
keyword[if] keyword[not] identifier[out] :
identifier[out] = keyword[True]
keyword[return] identifier[out] | def new_chain(table='filter', chain=None, family='ipv4'):
"""
.. versionadded:: 2014.1.0
Create new custom chain to the specified table.
CLI Example:
.. code-block:: bash
salt '*' iptables.new_chain filter CUSTOM_CHAIN
IPv6:
salt '*' iptables.new_chain filter CUSTOM_CHAIN family=ipv6
"""
if not chain:
return 'Error: Chain needs to be specified' # depends on [control=['if'], data=[]]
wait = '--wait' if _has_option('--wait', family) else ''
cmd = '{0} {1} -t {2} -N {3}'.format(_iptables_cmd(family), wait, table, chain)
out = __salt__['cmd.run'](cmd)
if not out:
out = True # depends on [control=['if'], data=[]]
return out |
def connection_string_parser(uri: str) -> list:
"""
Parse Connection string to extract host and port.
:param uri: full URI for redis connection in the form of host:port
:returns: list of RedisConnection objects
"""
redis_connections = []
raw_connections = uri.split(',')
connections = [
connection for connection in raw_connections if len(connection) > 0
]
for connection in connections:
raw_connection = connection.split(':')
if len(raw_connection) == 1:
host = raw_connection[0].strip()
port = _DEFAULT_REDIS_PORT
elif len(raw_connection) == 2:
host = raw_connection[0].strip()
port = int(raw_connection[1])
else:
raise RuntimeError(
"Unable to parse redis connection string: {0}".format(
raw_connection
)
)
redis_connection = _RedisConnection(host, port)
redis_connections.append(redis_connection)
return redis_connections | def function[connection_string_parser, parameter[uri]]:
constant[
Parse Connection string to extract host and port.
:param uri: full URI for redis connection in the form of host:port
:returns: list of RedisConnection objects
]
variable[redis_connections] assign[=] list[[]]
variable[raw_connections] assign[=] call[name[uri].split, parameter[constant[,]]]
variable[connections] assign[=] <ast.ListComp object at 0x7da204344250>
for taget[name[connection]] in starred[name[connections]] begin[:]
variable[raw_connection] assign[=] call[name[connection].split, parameter[constant[:]]]
if compare[call[name[len], parameter[name[raw_connection]]] equal[==] constant[1]] begin[:]
variable[host] assign[=] call[call[name[raw_connection]][constant[0]].strip, parameter[]]
variable[port] assign[=] name[_DEFAULT_REDIS_PORT]
variable[redis_connection] assign[=] call[name[_RedisConnection], parameter[name[host], name[port]]]
call[name[redis_connections].append, parameter[name[redis_connection]]]
return[name[redis_connections]] | keyword[def] identifier[connection_string_parser] ( identifier[uri] : identifier[str] )-> identifier[list] :
literal[string]
identifier[redis_connections] =[]
identifier[raw_connections] = identifier[uri] . identifier[split] ( literal[string] )
identifier[connections] =[
identifier[connection] keyword[for] identifier[connection] keyword[in] identifier[raw_connections] keyword[if] identifier[len] ( identifier[connection] )> literal[int]
]
keyword[for] identifier[connection] keyword[in] identifier[connections] :
identifier[raw_connection] = identifier[connection] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[raw_connection] )== literal[int] :
identifier[host] = identifier[raw_connection] [ literal[int] ]. identifier[strip] ()
identifier[port] = identifier[_DEFAULT_REDIS_PORT]
keyword[elif] identifier[len] ( identifier[raw_connection] )== literal[int] :
identifier[host] = identifier[raw_connection] [ literal[int] ]. identifier[strip] ()
identifier[port] = identifier[int] ( identifier[raw_connection] [ literal[int] ])
keyword[else] :
keyword[raise] identifier[RuntimeError] (
literal[string] . identifier[format] (
identifier[raw_connection]
)
)
identifier[redis_connection] = identifier[_RedisConnection] ( identifier[host] , identifier[port] )
identifier[redis_connections] . identifier[append] ( identifier[redis_connection] )
keyword[return] identifier[redis_connections] | def connection_string_parser(uri: str) -> list:
"""
Parse Connection string to extract host and port.
:param uri: full URI for redis connection in the form of host:port
:returns: list of RedisConnection objects
"""
redis_connections = []
raw_connections = uri.split(',')
connections = [connection for connection in raw_connections if len(connection) > 0]
for connection in connections:
raw_connection = connection.split(':')
if len(raw_connection) == 1:
host = raw_connection[0].strip()
port = _DEFAULT_REDIS_PORT # depends on [control=['if'], data=[]]
elif len(raw_connection) == 2:
host = raw_connection[0].strip()
port = int(raw_connection[1]) # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Unable to parse redis connection string: {0}'.format(raw_connection))
redis_connection = _RedisConnection(host, port)
redis_connections.append(redis_connection) # depends on [control=['for'], data=['connection']]
return redis_connections |
def logical_chassis_fwdl_sanity_input_host(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
host = ET.SubElement(input, "host")
host.text = kwargs.pop('host')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[logical_chassis_fwdl_sanity_input_host, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[logical_chassis_fwdl_sanity] assign[=] call[name[ET].Element, parameter[constant[logical_chassis_fwdl_sanity]]]
variable[config] assign[=] name[logical_chassis_fwdl_sanity]
variable[input] assign[=] call[name[ET].SubElement, parameter[name[logical_chassis_fwdl_sanity], constant[input]]]
variable[host] assign[=] call[name[ET].SubElement, parameter[name[input], constant[host]]]
name[host].text assign[=] call[name[kwargs].pop, parameter[constant[host]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[logical_chassis_fwdl_sanity_input_host] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[logical_chassis_fwdl_sanity] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[logical_chassis_fwdl_sanity]
identifier[input] = identifier[ET] . identifier[SubElement] ( identifier[logical_chassis_fwdl_sanity] , literal[string] )
identifier[host] = identifier[ET] . identifier[SubElement] ( identifier[input] , literal[string] )
identifier[host] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def logical_chassis_fwdl_sanity_input_host(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
logical_chassis_fwdl_sanity = ET.Element('logical_chassis_fwdl_sanity')
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, 'input')
host = ET.SubElement(input, 'host')
host.text = kwargs.pop('host')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None):
"""
Checks if an email might be valid by getting the status from the SMTP server.
:param mx_resolver: MXResolver
:param recipient_address: string
:param sender_address: string
:param smtp_timeout: integer
:param helo_hostname: string
:return: dict
"""
domain = recipient_address[recipient_address.find('@') + 1:]
if helo_hostname is None:
helo_hostname = domain
ret = {'status': 101, 'extended_status': None, 'message': "The server is unable to connect."}
records = []
try:
records = mx_resolver.get_mx_records(helo_hostname)
except socket.gaierror:
ret['status'] = 512
ret['extended_status'] = "5.1.2 Domain name address resolution failed in MX lookup."
smtp = smtplib.SMTP(timeout=smtp_timeout)
for mx in records:
try:
connection_status, connection_message = smtp.connect(mx.exchange)
if connection_status == 220:
smtp.helo(domain)
smtp.mail(sender_address)
status, message = smtp.rcpt(recipient_address)
ret['status'] = status
pattern = re.compile('(\d+\.\d+\.\d+)')
matches = re.match(pattern, message)
if matches:
ret['extended_status'] = matches.group(1)
ret['message'] = message
smtp.quit()
break
except smtplib.SMTPConnectError:
ret['status'] = 111
ret['message'] = "Connection refused or unable to open an SMTP stream."
except smtplib.SMTPServerDisconnected:
ret['status'] = 111
ret['extended_status'] = "SMTP Server disconnected"
except socket.gaierror:
ret['status'] = 512
ret['extended_status'] = "5.1.2 Domain name address resolution failed."
return ret | def function[check_email_status, parameter[mx_resolver, recipient_address, sender_address, smtp_timeout, helo_hostname]]:
constant[
Checks if an email might be valid by getting the status from the SMTP server.
:param mx_resolver: MXResolver
:param recipient_address: string
:param sender_address: string
:param smtp_timeout: integer
:param helo_hostname: string
:return: dict
]
variable[domain] assign[=] call[name[recipient_address]][<ast.Slice object at 0x7da2049633d0>]
if compare[name[helo_hostname] is constant[None]] begin[:]
variable[helo_hostname] assign[=] name[domain]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da2049624d0>, <ast.Constant object at 0x7da204963130>, <ast.Constant object at 0x7da204960cd0>], [<ast.Constant object at 0x7da204960dc0>, <ast.Constant object at 0x7da2049623b0>, <ast.Constant object at 0x7da2049631f0>]]
variable[records] assign[=] list[[]]
<ast.Try object at 0x7da20c6c4970>
variable[smtp] assign[=] call[name[smtplib].SMTP, parameter[]]
for taget[name[mx]] in starred[name[records]] begin[:]
<ast.Try object at 0x7da20c6c7130>
return[name[ret]] | keyword[def] identifier[check_email_status] ( identifier[mx_resolver] , identifier[recipient_address] , identifier[sender_address] , identifier[smtp_timeout] = literal[int] , identifier[helo_hostname] = keyword[None] ):
literal[string]
identifier[domain] = identifier[recipient_address] [ identifier[recipient_address] . identifier[find] ( literal[string] )+ literal[int] :]
keyword[if] identifier[helo_hostname] keyword[is] keyword[None] :
identifier[helo_hostname] = identifier[domain]
identifier[ret] ={ literal[string] : literal[int] , literal[string] : keyword[None] , literal[string] : literal[string] }
identifier[records] =[]
keyword[try] :
identifier[records] = identifier[mx_resolver] . identifier[get_mx_records] ( identifier[helo_hostname] )
keyword[except] identifier[socket] . identifier[gaierror] :
identifier[ret] [ literal[string] ]= literal[int]
identifier[ret] [ literal[string] ]= literal[string]
identifier[smtp] = identifier[smtplib] . identifier[SMTP] ( identifier[timeout] = identifier[smtp_timeout] )
keyword[for] identifier[mx] keyword[in] identifier[records] :
keyword[try] :
identifier[connection_status] , identifier[connection_message] = identifier[smtp] . identifier[connect] ( identifier[mx] . identifier[exchange] )
keyword[if] identifier[connection_status] == literal[int] :
identifier[smtp] . identifier[helo] ( identifier[domain] )
identifier[smtp] . identifier[mail] ( identifier[sender_address] )
identifier[status] , identifier[message] = identifier[smtp] . identifier[rcpt] ( identifier[recipient_address] )
identifier[ret] [ literal[string] ]= identifier[status]
identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[matches] = identifier[re] . identifier[match] ( identifier[pattern] , identifier[message] )
keyword[if] identifier[matches] :
identifier[ret] [ literal[string] ]= identifier[matches] . identifier[group] ( literal[int] )
identifier[ret] [ literal[string] ]= identifier[message]
identifier[smtp] . identifier[quit] ()
keyword[break]
keyword[except] identifier[smtplib] . identifier[SMTPConnectError] :
identifier[ret] [ literal[string] ]= literal[int]
identifier[ret] [ literal[string] ]= literal[string]
keyword[except] identifier[smtplib] . identifier[SMTPServerDisconnected] :
identifier[ret] [ literal[string] ]= literal[int]
identifier[ret] [ literal[string] ]= literal[string]
keyword[except] identifier[socket] . identifier[gaierror] :
identifier[ret] [ literal[string] ]= literal[int]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret] | def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None):
"""
Checks if an email might be valid by getting the status from the SMTP server.
:param mx_resolver: MXResolver
:param recipient_address: string
:param sender_address: string
:param smtp_timeout: integer
:param helo_hostname: string
:return: dict
"""
domain = recipient_address[recipient_address.find('@') + 1:]
if helo_hostname is None:
helo_hostname = domain # depends on [control=['if'], data=['helo_hostname']]
ret = {'status': 101, 'extended_status': None, 'message': 'The server is unable to connect.'}
records = []
try:
records = mx_resolver.get_mx_records(helo_hostname) # depends on [control=['try'], data=[]]
except socket.gaierror:
ret['status'] = 512
ret['extended_status'] = '5.1.2 Domain name address resolution failed in MX lookup.' # depends on [control=['except'], data=[]]
smtp = smtplib.SMTP(timeout=smtp_timeout)
for mx in records:
try:
(connection_status, connection_message) = smtp.connect(mx.exchange)
if connection_status == 220:
smtp.helo(domain)
smtp.mail(sender_address)
(status, message) = smtp.rcpt(recipient_address)
ret['status'] = status
pattern = re.compile('(\\d+\\.\\d+\\.\\d+)')
matches = re.match(pattern, message)
if matches:
ret['extended_status'] = matches.group(1) # depends on [control=['if'], data=[]]
ret['message'] = message # depends on [control=['if'], data=[]]
smtp.quit()
break # depends on [control=['try'], data=[]]
except smtplib.SMTPConnectError:
ret['status'] = 111
ret['message'] = 'Connection refused or unable to open an SMTP stream.' # depends on [control=['except'], data=[]]
except smtplib.SMTPServerDisconnected:
ret['status'] = 111
ret['extended_status'] = 'SMTP Server disconnected' # depends on [control=['except'], data=[]]
except socket.gaierror:
ret['status'] = 512
ret['extended_status'] = '5.1.2 Domain name address resolution failed.' # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['mx']]
return ret |
def add_section(self, section, friendly_name = None):
"""Adds a section and optionally gives it a friendly name.."""
if not isinstance(section, BASESTRING): # Make sure the user isn't expecting to use something stupid as a key.
raise ValueError(section)
# See if we've got this section already:
if section in self.config:
raise DuplicateSectionError(section) # Yep... Kick off.
else:
self.config[section] = OrderedDict() # Nope... Ad it
if friendly_name == None:
friendly_name = section.title()
if '&' not in friendly_name:
friendly_name = '&' + friendly_name
self.section_names[section] = friendly_name | def function[add_section, parameter[self, section, friendly_name]]:
constant[Adds a section and optionally gives it a friendly name..]
if <ast.UnaryOp object at 0x7da1b1452740> begin[:]
<ast.Raise object at 0x7da1b1452140>
if compare[name[section] in name[self].config] begin[:]
<ast.Raise object at 0x7da1b1450640> | keyword[def] identifier[add_section] ( identifier[self] , identifier[section] , identifier[friendly_name] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[section] , identifier[BASESTRING] ):
keyword[raise] identifier[ValueError] ( identifier[section] )
keyword[if] identifier[section] keyword[in] identifier[self] . identifier[config] :
keyword[raise] identifier[DuplicateSectionError] ( identifier[section] )
keyword[else] :
identifier[self] . identifier[config] [ identifier[section] ]= identifier[OrderedDict] ()
keyword[if] identifier[friendly_name] == keyword[None] :
identifier[friendly_name] = identifier[section] . identifier[title] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[friendly_name] :
identifier[friendly_name] = literal[string] + identifier[friendly_name]
identifier[self] . identifier[section_names] [ identifier[section] ]= identifier[friendly_name] | def add_section(self, section, friendly_name=None):
"""Adds a section and optionally gives it a friendly name.."""
if not isinstance(section, BASESTRING): # Make sure the user isn't expecting to use something stupid as a key.
raise ValueError(section) # depends on [control=['if'], data=[]] # See if we've got this section already:
if section in self.config:
raise DuplicateSectionError(section) # Yep... Kick off. # depends on [control=['if'], data=['section']]
else:
self.config[section] = OrderedDict() # Nope... Ad it
if friendly_name == None:
friendly_name = section.title() # depends on [control=['if'], data=['friendly_name']]
if '&' not in friendly_name:
friendly_name = '&' + friendly_name # depends on [control=['if'], data=['friendly_name']]
self.section_names[section] = friendly_name |
def color_map_data(self) -> typing.Optional[numpy.ndarray]:
"""Return the color map data as a uint8 ndarray with shape (256, 3)."""
if self.display_data_shape is None: # is there display data?
return None
else:
return self.__color_map_data if self.__color_map_data is not None else ColorMaps.get_color_map_data_by_id("grayscale") | def function[color_map_data, parameter[self]]:
constant[Return the color map data as a uint8 ndarray with shape (256, 3).]
if compare[name[self].display_data_shape is constant[None]] begin[:]
return[constant[None]] | keyword[def] identifier[color_map_data] ( identifier[self] )-> identifier[typing] . identifier[Optional] [ identifier[numpy] . identifier[ndarray] ]:
literal[string]
keyword[if] identifier[self] . identifier[display_data_shape] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[else] :
keyword[return] identifier[self] . identifier[__color_map_data] keyword[if] identifier[self] . identifier[__color_map_data] keyword[is] keyword[not] keyword[None] keyword[else] identifier[ColorMaps] . identifier[get_color_map_data_by_id] ( literal[string] ) | def color_map_data(self) -> typing.Optional[numpy.ndarray]:
"""Return the color map data as a uint8 ndarray with shape (256, 3)."""
if self.display_data_shape is None: # is there display data?
return None # depends on [control=['if'], data=[]]
else:
return self.__color_map_data if self.__color_map_data is not None else ColorMaps.get_color_map_data_by_id('grayscale') |
def has_perm(self, user, perm, obj=None, *args, **kwargs):
"""Test user permissions for a single action and object.
:param user: The user to test.
:type user: ``User``
:param perm: The action to test.
:type perm: ``str``
:param obj: The object path to test.
:type obj: ``tutelary.engine.Object``
:returns: ``bool`` -- is the action permitted?
"""
try:
if not self._obj_ok(obj):
if hasattr(obj, 'get_permissions_object'):
obj = obj.get_permissions_object(perm)
else:
raise InvalidPermissionObjectException
return user.permset_tree.allow(Action(perm), obj)
except ObjectDoesNotExist:
return False | def function[has_perm, parameter[self, user, perm, obj]]:
constant[Test user permissions for a single action and object.
:param user: The user to test.
:type user: ``User``
:param perm: The action to test.
:type perm: ``str``
:param obj: The object path to test.
:type obj: ``tutelary.engine.Object``
:returns: ``bool`` -- is the action permitted?
]
<ast.Try object at 0x7da20c795720> | keyword[def] identifier[has_perm] ( identifier[self] , identifier[user] , identifier[perm] , identifier[obj] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[if] keyword[not] identifier[self] . identifier[_obj_ok] ( identifier[obj] ):
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ):
identifier[obj] = identifier[obj] . identifier[get_permissions_object] ( identifier[perm] )
keyword[else] :
keyword[raise] identifier[InvalidPermissionObjectException]
keyword[return] identifier[user] . identifier[permset_tree] . identifier[allow] ( identifier[Action] ( identifier[perm] ), identifier[obj] )
keyword[except] identifier[ObjectDoesNotExist] :
keyword[return] keyword[False] | def has_perm(self, user, perm, obj=None, *args, **kwargs):
"""Test user permissions for a single action and object.
:param user: The user to test.
:type user: ``User``
:param perm: The action to test.
:type perm: ``str``
:param obj: The object path to test.
:type obj: ``tutelary.engine.Object``
:returns: ``bool`` -- is the action permitted?
"""
try:
if not self._obj_ok(obj):
if hasattr(obj, 'get_permissions_object'):
obj = obj.get_permissions_object(perm) # depends on [control=['if'], data=[]]
else:
raise InvalidPermissionObjectException # depends on [control=['if'], data=[]]
return user.permset_tree.allow(Action(perm), obj) # depends on [control=['try'], data=[]]
except ObjectDoesNotExist:
return False # depends on [control=['except'], data=[]] |
def runlist_add_app(name, app, profile, force, **kwargs):
"""
Add specified application with profile to the specified runlist.
Existence of application or profile is not checked.
"""
ctx = Context(**kwargs)
ctx.execute_action('runlist:add-app', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
'app': app,
'profile': profile,
'force': force
}) | def function[runlist_add_app, parameter[name, app, profile, force]]:
constant[
Add specified application with profile to the specified runlist.
Existence of application or profile is not checked.
]
variable[ctx] assign[=] call[name[Context], parameter[]]
call[name[ctx].execute_action, parameter[constant[runlist:add-app]]] | keyword[def] identifier[runlist_add_app] ( identifier[name] , identifier[app] , identifier[profile] , identifier[force] ,** identifier[kwargs] ):
literal[string]
identifier[ctx] = identifier[Context] (** identifier[kwargs] )
identifier[ctx] . identifier[execute_action] ( literal[string] ,**{
literal[string] : identifier[ctx] . identifier[repo] . identifier[create_secure_service] ( literal[string] ),
literal[string] : identifier[name] ,
literal[string] : identifier[app] ,
literal[string] : identifier[profile] ,
literal[string] : identifier[force]
}) | def runlist_add_app(name, app, profile, force, **kwargs):
"""
Add specified application with profile to the specified runlist.
Existence of application or profile is not checked.
"""
ctx = Context(**kwargs)
ctx.execute_action('runlist:add-app', **{'storage': ctx.repo.create_secure_service('storage'), 'name': name, 'app': app, 'profile': profile, 'force': force}) |
def main():
"""Main"""
bands = MSI_BAND_NAMES['S2A'].values()
bands.sort()
for platform_name in ['Sentinel-2A', ]:
tohdf5(MsiRSR, platform_name, bands)
bands = MSI_BAND_NAMES['S2B'].values()
bands.sort()
for platform_name in ['Sentinel-2B', ]:
tohdf5(MsiRSR, platform_name, bands) | def function[main, parameter[]]:
constant[Main]
variable[bands] assign[=] call[call[name[MSI_BAND_NAMES]][constant[S2A]].values, parameter[]]
call[name[bands].sort, parameter[]]
for taget[name[platform_name]] in starred[list[[<ast.Constant object at 0x7da1b0cb3e20>]]] begin[:]
call[name[tohdf5], parameter[name[MsiRSR], name[platform_name], name[bands]]]
variable[bands] assign[=] call[call[name[MSI_BAND_NAMES]][constant[S2B]].values, parameter[]]
call[name[bands].sort, parameter[]]
for taget[name[platform_name]] in starred[list[[<ast.Constant object at 0x7da1b0cb2d70>]]] begin[:]
call[name[tohdf5], parameter[name[MsiRSR], name[platform_name], name[bands]]] | keyword[def] identifier[main] ():
literal[string]
identifier[bands] = identifier[MSI_BAND_NAMES] [ literal[string] ]. identifier[values] ()
identifier[bands] . identifier[sort] ()
keyword[for] identifier[platform_name] keyword[in] [ literal[string] ,]:
identifier[tohdf5] ( identifier[MsiRSR] , identifier[platform_name] , identifier[bands] )
identifier[bands] = identifier[MSI_BAND_NAMES] [ literal[string] ]. identifier[values] ()
identifier[bands] . identifier[sort] ()
keyword[for] identifier[platform_name] keyword[in] [ literal[string] ,]:
identifier[tohdf5] ( identifier[MsiRSR] , identifier[platform_name] , identifier[bands] ) | def main():
"""Main"""
bands = MSI_BAND_NAMES['S2A'].values()
bands.sort()
for platform_name in ['Sentinel-2A']:
tohdf5(MsiRSR, platform_name, bands) # depends on [control=['for'], data=['platform_name']]
bands = MSI_BAND_NAMES['S2B'].values()
bands.sort()
for platform_name in ['Sentinel-2B']:
tohdf5(MsiRSR, platform_name, bands) # depends on [control=['for'], data=['platform_name']] |
def forward(self):
""" Creates a new message that is a forward this message
:return: new message
:rtype: Message
"""
if not self.object_id or self.__is_draft:
raise RuntimeError("Can't forward this message")
url = self.build_url(
self._endpoints.get('forward_message').format(id=self.object_id))
response = self.con.post(url)
if not response:
return None
message = response.json()
# Everything received from cloud must be passed as self._cloud_data_key
return self.__class__(parent=self, **{self._cloud_data_key: message}) | def function[forward, parameter[self]]:
constant[ Creates a new message that is a forward this message
:return: new message
:rtype: Message
]
if <ast.BoolOp object at 0x7da1b1b2bd60> begin[:]
<ast.Raise object at 0x7da1b1b28340>
variable[url] assign[=] call[name[self].build_url, parameter[call[call[name[self]._endpoints.get, parameter[constant[forward_message]]].format, parameter[]]]]
variable[response] assign[=] call[name[self].con.post, parameter[name[url]]]
if <ast.UnaryOp object at 0x7da1b1b2b5b0> begin[:]
return[constant[None]]
variable[message] assign[=] call[name[response].json, parameter[]]
return[call[name[self].__class__, parameter[]]] | keyword[def] identifier[forward] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[object_id] keyword[or] identifier[self] . identifier[__is_draft] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[url] = identifier[self] . identifier[build_url] (
identifier[self] . identifier[_endpoints] . identifier[get] ( literal[string] ). identifier[format] ( identifier[id] = identifier[self] . identifier[object_id] ))
identifier[response] = identifier[self] . identifier[con] . identifier[post] ( identifier[url] )
keyword[if] keyword[not] identifier[response] :
keyword[return] keyword[None]
identifier[message] = identifier[response] . identifier[json] ()
keyword[return] identifier[self] . identifier[__class__] ( identifier[parent] = identifier[self] ,**{ identifier[self] . identifier[_cloud_data_key] : identifier[message] }) | def forward(self):
""" Creates a new message that is a forward this message
:return: new message
:rtype: Message
"""
if not self.object_id or self.__is_draft:
raise RuntimeError("Can't forward this message") # depends on [control=['if'], data=[]]
url = self.build_url(self._endpoints.get('forward_message').format(id=self.object_id))
response = self.con.post(url)
if not response:
return None # depends on [control=['if'], data=[]]
message = response.json()
# Everything received from cloud must be passed as self._cloud_data_key
return self.__class__(parent=self, **{self._cloud_data_key: message}) |
def v2(self):
"""Return voltage phasors at the "to buses" (bus2)"""
Vm = self.system.dae.y[self.v]
Va = self.system.dae.y[self.a]
return polar(Vm[self.a2], Va[self.a2]) | def function[v2, parameter[self]]:
constant[Return voltage phasors at the "to buses" (bus2)]
variable[Vm] assign[=] call[name[self].system.dae.y][name[self].v]
variable[Va] assign[=] call[name[self].system.dae.y][name[self].a]
return[call[name[polar], parameter[call[name[Vm]][name[self].a2], call[name[Va]][name[self].a2]]]] | keyword[def] identifier[v2] ( identifier[self] ):
literal[string]
identifier[Vm] = identifier[self] . identifier[system] . identifier[dae] . identifier[y] [ identifier[self] . identifier[v] ]
identifier[Va] = identifier[self] . identifier[system] . identifier[dae] . identifier[y] [ identifier[self] . identifier[a] ]
keyword[return] identifier[polar] ( identifier[Vm] [ identifier[self] . identifier[a2] ], identifier[Va] [ identifier[self] . identifier[a2] ]) | def v2(self):
"""Return voltage phasors at the "to buses" (bus2)"""
Vm = self.system.dae.y[self.v]
Va = self.system.dae.y[self.a]
return polar(Vm[self.a2], Va[self.a2]) |
def exclude(self, scheduled_operation: ScheduledOperation) -> bool:
"""Omits a scheduled operation from the schedule, if present.
Args:
scheduled_operation: The operation to try to remove.
Returns:
True if the operation was present and is now removed, False if it
was already not present.
"""
try:
self.scheduled_operations.remove(scheduled_operation)
return True
except ValueError:
return False | def function[exclude, parameter[self, scheduled_operation]]:
constant[Omits a scheduled operation from the schedule, if present.
Args:
scheduled_operation: The operation to try to remove.
Returns:
True if the operation was present and is now removed, False if it
was already not present.
]
<ast.Try object at 0x7da1b1ce50c0> | keyword[def] identifier[exclude] ( identifier[self] , identifier[scheduled_operation] : identifier[ScheduledOperation] )-> identifier[bool] :
literal[string]
keyword[try] :
identifier[self] . identifier[scheduled_operations] . identifier[remove] ( identifier[scheduled_operation] )
keyword[return] keyword[True]
keyword[except] identifier[ValueError] :
keyword[return] keyword[False] | def exclude(self, scheduled_operation: ScheduledOperation) -> bool:
"""Omits a scheduled operation from the schedule, if present.
Args:
scheduled_operation: The operation to try to remove.
Returns:
True if the operation was present and is now removed, False if it
was already not present.
"""
try:
self.scheduled_operations.remove(scheduled_operation)
return True # depends on [control=['try'], data=[]]
except ValueError:
return False # depends on [control=['except'], data=[]] |
def SetProperties(has_props_cls, input_dict, include_immutable=True):
"""A helper method to set an ``HasProperties`` object's properties from a dictionary"""
props = has_props_cls()
if not isinstance(input_dict, (dict, collections.OrderedDict)):
raise RuntimeError('input_dict invalid: ', input_dict)
for k, v in iter(input_dict.items()):
if (k in has_props_cls._props and (
include_immutable or
any(hasattr(has_props_cls._props[k], att) for att in ('required', 'new_name'))
)
):
p = props._props.get(k)
if isinstance(p, properties.HasProperties):
props._set(k, SetProperties(p, v, include_immutable=include_immutable))
elif isinstance(p, properties.Instance):
props._set(k, SetProperties(p.instance_class, v, include_immutable=include_immutable))
elif isinstance(p, properties.List):
if not isinstance(v, list):
raise RuntimeError('property value mismatch', p, v)
if not isinstance(v[0], properties.HasProperties):
prop = p.prop.instance_class
newlist = []
for i in v:
value = SetProperties(prop, i, include_immutable=include_immutable)
newlist.append(value)
props._set(k, newlist)
else:
props._set(k, v)
else:
props._set(k, p.from_json(v))
# Return others as well
# others_dict = {k: v for k, v in iter(input_dict.items())
# if k not in has_props_cls._props}
return props | def function[SetProperties, parameter[has_props_cls, input_dict, include_immutable]]:
constant[A helper method to set an ``HasProperties`` object's properties from a dictionary]
variable[props] assign[=] call[name[has_props_cls], parameter[]]
if <ast.UnaryOp object at 0x7da1b0ad9ff0> begin[:]
<ast.Raise object at 0x7da1b0adad40>
for taget[tuple[[<ast.Name object at 0x7da1b0adba90>, <ast.Name object at 0x7da1b0ad8e50>]]] in starred[call[name[iter], parameter[call[name[input_dict].items, parameter[]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b0ad9b40> begin[:]
variable[p] assign[=] call[name[props]._props.get, parameter[name[k]]]
if call[name[isinstance], parameter[name[p], name[properties].HasProperties]] begin[:]
call[name[props]._set, parameter[name[k], call[name[SetProperties], parameter[name[p], name[v]]]]]
return[name[props]] | keyword[def] identifier[SetProperties] ( identifier[has_props_cls] , identifier[input_dict] , identifier[include_immutable] = keyword[True] ):
literal[string]
identifier[props] = identifier[has_props_cls] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[input_dict] ,( identifier[dict] , identifier[collections] . identifier[OrderedDict] )):
keyword[raise] identifier[RuntimeError] ( literal[string] , identifier[input_dict] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[iter] ( identifier[input_dict] . identifier[items] ()):
keyword[if] ( identifier[k] keyword[in] identifier[has_props_cls] . identifier[_props] keyword[and] (
identifier[include_immutable] keyword[or]
identifier[any] ( identifier[hasattr] ( identifier[has_props_cls] . identifier[_props] [ identifier[k] ], identifier[att] ) keyword[for] identifier[att] keyword[in] ( literal[string] , literal[string] ))
)
):
identifier[p] = identifier[props] . identifier[_props] . identifier[get] ( identifier[k] )
keyword[if] identifier[isinstance] ( identifier[p] , identifier[properties] . identifier[HasProperties] ):
identifier[props] . identifier[_set] ( identifier[k] , identifier[SetProperties] ( identifier[p] , identifier[v] , identifier[include_immutable] = identifier[include_immutable] ))
keyword[elif] identifier[isinstance] ( identifier[p] , identifier[properties] . identifier[Instance] ):
identifier[props] . identifier[_set] ( identifier[k] , identifier[SetProperties] ( identifier[p] . identifier[instance_class] , identifier[v] , identifier[include_immutable] = identifier[include_immutable] ))
keyword[elif] identifier[isinstance] ( identifier[p] , identifier[properties] . identifier[List] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] , identifier[list] ):
keyword[raise] identifier[RuntimeError] ( literal[string] , identifier[p] , identifier[v] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] [ literal[int] ], identifier[properties] . identifier[HasProperties] ):
identifier[prop] = identifier[p] . identifier[prop] . identifier[instance_class]
identifier[newlist] =[]
keyword[for] identifier[i] keyword[in] identifier[v] :
identifier[value] = identifier[SetProperties] ( identifier[prop] , identifier[i] , identifier[include_immutable] = identifier[include_immutable] )
identifier[newlist] . identifier[append] ( identifier[value] )
identifier[props] . identifier[_set] ( identifier[k] , identifier[newlist] )
keyword[else] :
identifier[props] . identifier[_set] ( identifier[k] , identifier[v] )
keyword[else] :
identifier[props] . identifier[_set] ( identifier[k] , identifier[p] . identifier[from_json] ( identifier[v] ))
keyword[return] identifier[props] | def SetProperties(has_props_cls, input_dict, include_immutable=True):
"""A helper method to set an ``HasProperties`` object's properties from a dictionary"""
props = has_props_cls()
if not isinstance(input_dict, (dict, collections.OrderedDict)):
raise RuntimeError('input_dict invalid: ', input_dict) # depends on [control=['if'], data=[]]
for (k, v) in iter(input_dict.items()):
if k in has_props_cls._props and (include_immutable or any((hasattr(has_props_cls._props[k], att) for att in ('required', 'new_name')))):
p = props._props.get(k)
if isinstance(p, properties.HasProperties):
props._set(k, SetProperties(p, v, include_immutable=include_immutable)) # depends on [control=['if'], data=[]]
elif isinstance(p, properties.Instance):
props._set(k, SetProperties(p.instance_class, v, include_immutable=include_immutable)) # depends on [control=['if'], data=[]]
elif isinstance(p, properties.List):
if not isinstance(v, list):
raise RuntimeError('property value mismatch', p, v) # depends on [control=['if'], data=[]]
if not isinstance(v[0], properties.HasProperties):
prop = p.prop.instance_class
newlist = []
for i in v:
value = SetProperties(prop, i, include_immutable=include_immutable)
newlist.append(value) # depends on [control=['for'], data=['i']]
props._set(k, newlist) # depends on [control=['if'], data=[]]
else:
props._set(k, v) # depends on [control=['if'], data=[]]
else:
props._set(k, p.from_json(v)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Return others as well
# others_dict = {k: v for k, v in iter(input_dict.items())
# if k not in has_props_cls._props}
return props |
def create_table(self, table, fields):
""" Responsys.createTable call
Accepts:
InteractObject table
list fields
Returns True on success
"""
table = table.get_soap_object(self.client)
return self.call('createTable', table, fields) | def function[create_table, parameter[self, table, fields]]:
constant[ Responsys.createTable call
Accepts:
InteractObject table
list fields
Returns True on success
]
variable[table] assign[=] call[name[table].get_soap_object, parameter[name[self].client]]
return[call[name[self].call, parameter[constant[createTable], name[table], name[fields]]]] | keyword[def] identifier[create_table] ( identifier[self] , identifier[table] , identifier[fields] ):
literal[string]
identifier[table] = identifier[table] . identifier[get_soap_object] ( identifier[self] . identifier[client] )
keyword[return] identifier[self] . identifier[call] ( literal[string] , identifier[table] , identifier[fields] ) | def create_table(self, table, fields):
""" Responsys.createTable call
Accepts:
InteractObject table
list fields
Returns True on success
"""
table = table.get_soap_object(self.client)
return self.call('createTable', table, fields) |
def delete_for_obj(self, entity_model_obj):
"""
Delete the entities associated with a model object.
"""
return self.filter(
entity_type=ContentType.objects.get_for_model(
entity_model_obj, for_concrete_model=False), entity_id=entity_model_obj.id).delete(
force=True) | def function[delete_for_obj, parameter[self, entity_model_obj]]:
constant[
Delete the entities associated with a model object.
]
return[call[call[name[self].filter, parameter[]].delete, parameter[]]] | keyword[def] identifier[delete_for_obj] ( identifier[self] , identifier[entity_model_obj] ):
literal[string]
keyword[return] identifier[self] . identifier[filter] (
identifier[entity_type] = identifier[ContentType] . identifier[objects] . identifier[get_for_model] (
identifier[entity_model_obj] , identifier[for_concrete_model] = keyword[False] ), identifier[entity_id] = identifier[entity_model_obj] . identifier[id] ). identifier[delete] (
identifier[force] = keyword[True] ) | def delete_for_obj(self, entity_model_obj):
"""
Delete the entities associated with a model object.
"""
return self.filter(entity_type=ContentType.objects.get_for_model(entity_model_obj, for_concrete_model=False), entity_id=entity_model_obj.id).delete(force=True) |
def migrate(self, conn):
"""Migrate a database as needed.
This method is safe to call on an up-to-date database, on an old
database, on a newer database, or an uninitialized database
(version 0).
This method is idempotent.
"""
while self.should_migrate(conn):
current_version = get_user_version(conn)
migration = self._get_migration(current_version)
assert migration.from_ver == current_version
logger.info(f'Migrating database from {migration.from_ver}'
f' to {migration.to_ver}')
self._migrate_single(conn, migration)
set_user_version(conn, migration.to_ver)
logger.info(f'Migrated database to {migration.to_ver}') | def function[migrate, parameter[self, conn]]:
constant[Migrate a database as needed.
This method is safe to call on an up-to-date database, on an old
database, on a newer database, or an uninitialized database
(version 0).
This method is idempotent.
]
while call[name[self].should_migrate, parameter[name[conn]]] begin[:]
variable[current_version] assign[=] call[name[get_user_version], parameter[name[conn]]]
variable[migration] assign[=] call[name[self]._get_migration, parameter[name[current_version]]]
assert[compare[name[migration].from_ver equal[==] name[current_version]]]
call[name[logger].info, parameter[<ast.JoinedStr object at 0x7da2041da5f0>]]
call[name[self]._migrate_single, parameter[name[conn], name[migration]]]
call[name[set_user_version], parameter[name[conn], name[migration].to_ver]]
call[name[logger].info, parameter[<ast.JoinedStr object at 0x7da1b1350c10>]] | keyword[def] identifier[migrate] ( identifier[self] , identifier[conn] ):
literal[string]
keyword[while] identifier[self] . identifier[should_migrate] ( identifier[conn] ):
identifier[current_version] = identifier[get_user_version] ( identifier[conn] )
identifier[migration] = identifier[self] . identifier[_get_migration] ( identifier[current_version] )
keyword[assert] identifier[migration] . identifier[from_ver] == identifier[current_version]
identifier[logger] . identifier[info] ( literal[string]
literal[string] )
identifier[self] . identifier[_migrate_single] ( identifier[conn] , identifier[migration] )
identifier[set_user_version] ( identifier[conn] , identifier[migration] . identifier[to_ver] )
identifier[logger] . identifier[info] ( literal[string] ) | def migrate(self, conn):
"""Migrate a database as needed.
This method is safe to call on an up-to-date database, on an old
database, on a newer database, or an uninitialized database
(version 0).
This method is idempotent.
"""
while self.should_migrate(conn):
current_version = get_user_version(conn)
migration = self._get_migration(current_version)
assert migration.from_ver == current_version
logger.info(f'Migrating database from {migration.from_ver} to {migration.to_ver}')
self._migrate_single(conn, migration)
set_user_version(conn, migration.to_ver)
logger.info(f'Migrated database to {migration.to_ver}') # depends on [control=['while'], data=[]] |
def app_state(self):
"""Current service state in current application.
:raise:RuntimeError if working outside application context.
"""
try:
return current_app.extensions[self.name]
except KeyError:
raise ServiceNotRegistered(self.name) | def function[app_state, parameter[self]]:
constant[Current service state in current application.
:raise:RuntimeError if working outside application context.
]
<ast.Try object at 0x7da18f720c40> | keyword[def] identifier[app_state] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[current_app] . identifier[extensions] [ identifier[self] . identifier[name] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[ServiceNotRegistered] ( identifier[self] . identifier[name] ) | def app_state(self):
"""Current service state in current application.
:raise:RuntimeError if working outside application context.
"""
try:
return current_app.extensions[self.name] # depends on [control=['try'], data=[]]
except KeyError:
raise ServiceNotRegistered(self.name) # depends on [control=['except'], data=[]] |
def pop(self):
"""Pops the data stack, returning the value."""
try:
return self.data_stack.pop()
except errors.MachineError as e:
raise errors.MachineError("%s: At index %d in code: %s" %
(e, self.instruction_pointer, self.code_string)) | def function[pop, parameter[self]]:
constant[Pops the data stack, returning the value.]
<ast.Try object at 0x7da1b2527250> | keyword[def] identifier[pop] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[data_stack] . identifier[pop] ()
keyword[except] identifier[errors] . identifier[MachineError] keyword[as] identifier[e] :
keyword[raise] identifier[errors] . identifier[MachineError] ( literal[string] %
( identifier[e] , identifier[self] . identifier[instruction_pointer] , identifier[self] . identifier[code_string] )) | def pop(self):
"""Pops the data stack, returning the value."""
try:
return self.data_stack.pop() # depends on [control=['try'], data=[]]
except errors.MachineError as e:
raise errors.MachineError('%s: At index %d in code: %s' % (e, self.instruction_pointer, self.code_string)) # depends on [control=['except'], data=['e']] |
def parse(self, only_known = False):
'''Ensure all sources are ready to be queried.
Parses ``sys.argv`` with the contained ``argparse.ArgumentParser`` and
sets ``parsed`` to True if ``only_known`` is False. Once ``parsed`` is
set to True, it is inadvisable to add more parameters (cf.
``add_parameter``). Also, if ``parsed`` is not set to True, retrieving
items (cf. ``__getitem__``) will result in a warning that values are
being retrieved from an uparsed Parameters.
**Arguments**
:``only_known``: If True, do not error or fail when unknown parameters
are encountered.
.. note::
If ``only_known`` is True, the ``--help`` and
``-h`` options on the command line (``sys.argv``)
will be ignored during parsing as it is unexpected
that these parameters' default behavior would be
desired at this stage of execution.
'''
self.parsed = not only_known or self.parsed
logger.info('parsing parameters')
logger.debug('sys.argv: %s', sys.argv)
if only_known:
args = [ _ for _ in copy.copy(sys.argv) if not re.match('-h|--help', _) ]
self._group_parsers['default'].parse_known_args(args = args, namespace = self._argument_namespace)
else:
self._group_parsers['default'].parse_args(namespace = self._argument_namespace) | def function[parse, parameter[self, only_known]]:
constant[Ensure all sources are ready to be queried.
Parses ``sys.argv`` with the contained ``argparse.ArgumentParser`` and
sets ``parsed`` to True if ``only_known`` is False. Once ``parsed`` is
set to True, it is inadvisable to add more parameters (cf.
``add_parameter``). Also, if ``parsed`` is not set to True, retrieving
items (cf. ``__getitem__``) will result in a warning that values are
being retrieved from an uparsed Parameters.
**Arguments**
:``only_known``: If True, do not error or fail when unknown parameters
are encountered.
.. note::
If ``only_known`` is True, the ``--help`` and
``-h`` options on the command line (``sys.argv``)
will be ignored during parsing as it is unexpected
that these parameters' default behavior would be
desired at this stage of execution.
]
name[self].parsed assign[=] <ast.BoolOp object at 0x7da18eb56dd0>
call[name[logger].info, parameter[constant[parsing parameters]]]
call[name[logger].debug, parameter[constant[sys.argv: %s], name[sys].argv]]
if name[only_known] begin[:]
variable[args] assign[=] <ast.ListComp object at 0x7da18eb553f0>
call[call[name[self]._group_parsers][constant[default]].parse_known_args, parameter[]] | keyword[def] identifier[parse] ( identifier[self] , identifier[only_known] = keyword[False] ):
literal[string]
identifier[self] . identifier[parsed] = keyword[not] identifier[only_known] keyword[or] identifier[self] . identifier[parsed]
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[sys] . identifier[argv] )
keyword[if] identifier[only_known] :
identifier[args] =[ identifier[_] keyword[for] identifier[_] keyword[in] identifier[copy] . identifier[copy] ( identifier[sys] . identifier[argv] ) keyword[if] keyword[not] identifier[re] . identifier[match] ( literal[string] , identifier[_] )]
identifier[self] . identifier[_group_parsers] [ literal[string] ]. identifier[parse_known_args] ( identifier[args] = identifier[args] , identifier[namespace] = identifier[self] . identifier[_argument_namespace] )
keyword[else] :
identifier[self] . identifier[_group_parsers] [ literal[string] ]. identifier[parse_args] ( identifier[namespace] = identifier[self] . identifier[_argument_namespace] ) | def parse(self, only_known=False):
"""Ensure all sources are ready to be queried.
Parses ``sys.argv`` with the contained ``argparse.ArgumentParser`` and
sets ``parsed`` to True if ``only_known`` is False. Once ``parsed`` is
set to True, it is inadvisable to add more parameters (cf.
``add_parameter``). Also, if ``parsed`` is not set to True, retrieving
items (cf. ``__getitem__``) will result in a warning that values are
being retrieved from an uparsed Parameters.
**Arguments**
:``only_known``: If True, do not error or fail when unknown parameters
are encountered.
.. note::
If ``only_known`` is True, the ``--help`` and
``-h`` options on the command line (``sys.argv``)
will be ignored during parsing as it is unexpected
that these parameters' default behavior would be
desired at this stage of execution.
"""
self.parsed = not only_known or self.parsed
logger.info('parsing parameters')
logger.debug('sys.argv: %s', sys.argv)
if only_known:
args = [_ for _ in copy.copy(sys.argv) if not re.match('-h|--help', _)]
self._group_parsers['default'].parse_known_args(args=args, namespace=self._argument_namespace) # depends on [control=['if'], data=[]]
else:
self._group_parsers['default'].parse_args(namespace=self._argument_namespace) |
def list_namespaced_daemon_set(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_daemon_set # noqa: E501
list or watch objects of kind DaemonSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_daemon_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1DaemonSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_daemon_set_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.list_namespaced_daemon_set_with_http_info(namespace, **kwargs) # noqa: E501
return data | def function[list_namespaced_daemon_set, parameter[self, namespace]]:
constant[list_namespaced_daemon_set # noqa: E501
list or watch objects of kind DaemonSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_daemon_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1DaemonSetList
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].list_namespaced_daemon_set_with_http_info, parameter[name[namespace]]]] | keyword[def] identifier[list_namespaced_daemon_set] ( identifier[self] , identifier[namespace] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[list_namespaced_daemon_set_with_http_info] ( identifier[namespace] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[list_namespaced_daemon_set_with_http_info] ( identifier[namespace] ,** identifier[kwargs] )
keyword[return] identifier[data] | def list_namespaced_daemon_set(self, namespace, **kwargs): # noqa: E501
'list_namespaced_daemon_set # noqa: E501\n\n list or watch objects of kind DaemonSet # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.list_namespaced_daemon_set(namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If \'true\', then the output is pretty printed.\n :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.\n :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.\n :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.\n :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.\n :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it\'s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.\n :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.\n :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.\n :return: V1beta1DaemonSetList\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_daemon_set_with_http_info(namespace, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.list_namespaced_daemon_set_with_http_info(namespace, **kwargs) # noqa: E501
return data |
def modify_object(self, modification, obj):
"""
Modify an object that supports pymatgen's as_dict() and from_dict API.
Args:
modification (dict): Modification must be {action_keyword :
settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
obj (object): Object to modify
"""
d = obj.as_dict()
self.modify(modification, d)
return obj.from_dict(d) | def function[modify_object, parameter[self, modification, obj]]:
constant[
Modify an object that supports pymatgen's as_dict() and from_dict API.
Args:
modification (dict): Modification must be {action_keyword :
settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
obj (object): Object to modify
]
variable[d] assign[=] call[name[obj].as_dict, parameter[]]
call[name[self].modify, parameter[name[modification], name[d]]]
return[call[name[obj].from_dict, parameter[name[d]]]] | keyword[def] identifier[modify_object] ( identifier[self] , identifier[modification] , identifier[obj] ):
literal[string]
identifier[d] = identifier[obj] . identifier[as_dict] ()
identifier[self] . identifier[modify] ( identifier[modification] , identifier[d] )
keyword[return] identifier[obj] . identifier[from_dict] ( identifier[d] ) | def modify_object(self, modification, obj):
"""
Modify an object that supports pymatgen's as_dict() and from_dict API.
Args:
modification (dict): Modification must be {action_keyword :
settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
obj (object): Object to modify
"""
d = obj.as_dict()
self.modify(modification, d)
return obj.from_dict(d) |
def align_texts(source_blocks, target_blocks, params = LanguageIndependent):
"""Creates the sentence alignment of two texts.
Texts can consist of several blocks. Block boundaries cannot be crossed by sentence
alignment links.
Each block consists of a list that contains the lengths (in characters) of the sentences
in this block.
@param source_blocks: The list of blocks in the source text.
@param target_blocks: The list of blocks in the target text.
@param params: the sentence alignment parameters.
@returns: A list of sentence alignment lists
"""
if len(source_blocks) != len(target_blocks):
raise ValueError("Source and target texts do not have the same number of blocks.")
return [align_blocks(source_block, target_block, params)
for source_block, target_block in zip(source_blocks, target_blocks)] | def function[align_texts, parameter[source_blocks, target_blocks, params]]:
constant[Creates the sentence alignment of two texts.
Texts can consist of several blocks. Block boundaries cannot be crossed by sentence
alignment links.
Each block consists of a list that contains the lengths (in characters) of the sentences
in this block.
@param source_blocks: The list of blocks in the source text.
@param target_blocks: The list of blocks in the target text.
@param params: the sentence alignment parameters.
@returns: A list of sentence alignment lists
]
if compare[call[name[len], parameter[name[source_blocks]]] not_equal[!=] call[name[len], parameter[name[target_blocks]]]] begin[:]
<ast.Raise object at 0x7da1b020cc40>
return[<ast.ListComp object at 0x7da1b020c520>] | keyword[def] identifier[align_texts] ( identifier[source_blocks] , identifier[target_blocks] , identifier[params] = identifier[LanguageIndependent] ):
literal[string]
keyword[if] identifier[len] ( identifier[source_blocks] )!= identifier[len] ( identifier[target_blocks] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] [ identifier[align_blocks] ( identifier[source_block] , identifier[target_block] , identifier[params] )
keyword[for] identifier[source_block] , identifier[target_block] keyword[in] identifier[zip] ( identifier[source_blocks] , identifier[target_blocks] )] | def align_texts(source_blocks, target_blocks, params=LanguageIndependent):
"""Creates the sentence alignment of two texts.
Texts can consist of several blocks. Block boundaries cannot be crossed by sentence
alignment links.
Each block consists of a list that contains the lengths (in characters) of the sentences
in this block.
@param source_blocks: The list of blocks in the source text.
@param target_blocks: The list of blocks in the target text.
@param params: the sentence alignment parameters.
@returns: A list of sentence alignment lists
"""
if len(source_blocks) != len(target_blocks):
raise ValueError('Source and target texts do not have the same number of blocks.') # depends on [control=['if'], data=[]]
return [align_blocks(source_block, target_block, params) for (source_block, target_block) in zip(source_blocks, target_blocks)] |
def define_baseargs(self, parser):
'''
Define basic command-line arguments required by the script.
@parser is a parser object created using the `argparse` module.
returns: None
'''
parser.add_argument('--name', default=sys.argv[0],
help='Name to identify this instance')
parser.add_argument('--log-level', default=None,
help='Logging level as picked from the logging module')
parser.add_argument('--log-format', default=None,
# TODO add more formats
choices=("json", "pretty",),
help=("Force the format of the logs. By default, if the "
"command is from a terminal, print colorful logs. "
"Otherwise print json."),
)
parser.add_argument('--log-file', default=None,
help='Writes logs to log file if specified, default: %(default)s',
)
parser.add_argument('--quiet', default=False, action="store_true",
help='if true, does not print logs to stderr, default: %(default)s',
)
parser.add_argument('--metric-grouping-interval', default=None, type=int,
help='To group metrics based on time interval ex:10 i.e;(10 sec)',
)
parser.add_argument('--debug', default=False, action="store_true",
help='To run the code in debug mode',
) | def function[define_baseargs, parameter[self, parser]]:
constant[
Define basic command-line arguments required by the script.
@parser is a parser object created using the `argparse` module.
returns: None
]
call[name[parser].add_argument, parameter[constant[--name]]]
call[name[parser].add_argument, parameter[constant[--log-level]]]
call[name[parser].add_argument, parameter[constant[--log-format]]]
call[name[parser].add_argument, parameter[constant[--log-file]]]
call[name[parser].add_argument, parameter[constant[--quiet]]]
call[name[parser].add_argument, parameter[constant[--metric-grouping-interval]]]
call[name[parser].add_argument, parameter[constant[--debug]]] | keyword[def] identifier[define_baseargs] ( identifier[self] , identifier[parser] ):
literal[string]
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = identifier[sys] . identifier[argv] [ literal[int] ],
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = keyword[None] ,
identifier[choices] =( literal[string] , literal[string] ,),
identifier[help] =( literal[string]
literal[string]
literal[string] ),
)
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = keyword[None] ,
identifier[help] = literal[string] ,
)
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] ,
)
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = keyword[None] , identifier[type] = identifier[int] ,
identifier[help] = literal[string] ,
)
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] ,
) | def define_baseargs(self, parser):
"""
Define basic command-line arguments required by the script.
@parser is a parser object created using the `argparse` module.
returns: None
"""
parser.add_argument('--name', default=sys.argv[0], help='Name to identify this instance')
parser.add_argument('--log-level', default=None, help='Logging level as picked from the logging module')
# TODO add more formats
parser.add_argument('--log-format', default=None, choices=('json', 'pretty'), help='Force the format of the logs. By default, if the command is from a terminal, print colorful logs. Otherwise print json.')
parser.add_argument('--log-file', default=None, help='Writes logs to log file if specified, default: %(default)s')
parser.add_argument('--quiet', default=False, action='store_true', help='if true, does not print logs to stderr, default: %(default)s')
parser.add_argument('--metric-grouping-interval', default=None, type=int, help='To group metrics based on time interval ex:10 i.e;(10 sec)')
parser.add_argument('--debug', default=False, action='store_true', help='To run the code in debug mode') |
def get_nameday(self, month=None, day=None):
"""Return name(s) as a string based on given date and month.
If no arguments given, use current date"""
if month is None:
month = datetime.now().month
if day is None:
day = datetime.now().day
return self.NAMEDAYS[month-1][day-1] | def function[get_nameday, parameter[self, month, day]]:
constant[Return name(s) as a string based on given date and month.
If no arguments given, use current date]
if compare[name[month] is constant[None]] begin[:]
variable[month] assign[=] call[name[datetime].now, parameter[]].month
if compare[name[day] is constant[None]] begin[:]
variable[day] assign[=] call[name[datetime].now, parameter[]].day
return[call[call[name[self].NAMEDAYS][binary_operation[name[month] - constant[1]]]][binary_operation[name[day] - constant[1]]]] | keyword[def] identifier[get_nameday] ( identifier[self] , identifier[month] = keyword[None] , identifier[day] = keyword[None] ):
literal[string]
keyword[if] identifier[month] keyword[is] keyword[None] :
identifier[month] = identifier[datetime] . identifier[now] (). identifier[month]
keyword[if] identifier[day] keyword[is] keyword[None] :
identifier[day] = identifier[datetime] . identifier[now] (). identifier[day]
keyword[return] identifier[self] . identifier[NAMEDAYS] [ identifier[month] - literal[int] ][ identifier[day] - literal[int] ] | def get_nameday(self, month=None, day=None):
"""Return name(s) as a string based on given date and month.
If no arguments given, use current date"""
if month is None:
month = datetime.now().month # depends on [control=['if'], data=['month']]
if day is None:
day = datetime.now().day # depends on [control=['if'], data=['day']]
return self.NAMEDAYS[month - 1][day - 1] |
def spisend(self, bytes_to_send):
"""Sends bytes via the SPI bus.
:param bytes_to_send: The bytes to send on the SPI device.
:type bytes_to_send: bytes
:returns: bytes -- returned bytes from SPI device
:raises: InitError
"""
# make some buffer space to store reading/writing
wbuffer = ctypes.create_string_buffer(bytes_to_send,
len(bytes_to_send))
rbuffer = ctypes.create_string_buffer(len(bytes_to_send))
# create the spi transfer struct
transfer = spi_ioc_transfer(
tx_buf=ctypes.addressof(wbuffer),
rx_buf=ctypes.addressof(rbuffer),
len=ctypes.sizeof(wbuffer),
speed_hz=ctypes.c_uint32(self.speed_hz)
)
if self.spi_callback is not None:
self.spi_callback(bytes_to_send)
# send the spi command
ioctl(self.fd, SPI_IOC_MESSAGE(1), transfer)
return ctypes.string_at(rbuffer, ctypes.sizeof(rbuffer)) | def function[spisend, parameter[self, bytes_to_send]]:
constant[Sends bytes via the SPI bus.
:param bytes_to_send: The bytes to send on the SPI device.
:type bytes_to_send: bytes
:returns: bytes -- returned bytes from SPI device
:raises: InitError
]
variable[wbuffer] assign[=] call[name[ctypes].create_string_buffer, parameter[name[bytes_to_send], call[name[len], parameter[name[bytes_to_send]]]]]
variable[rbuffer] assign[=] call[name[ctypes].create_string_buffer, parameter[call[name[len], parameter[name[bytes_to_send]]]]]
variable[transfer] assign[=] call[name[spi_ioc_transfer], parameter[]]
if compare[name[self].spi_callback is_not constant[None]] begin[:]
call[name[self].spi_callback, parameter[name[bytes_to_send]]]
call[name[ioctl], parameter[name[self].fd, call[name[SPI_IOC_MESSAGE], parameter[constant[1]]], name[transfer]]]
return[call[name[ctypes].string_at, parameter[name[rbuffer], call[name[ctypes].sizeof, parameter[name[rbuffer]]]]]] | keyword[def] identifier[spisend] ( identifier[self] , identifier[bytes_to_send] ):
literal[string]
identifier[wbuffer] = identifier[ctypes] . identifier[create_string_buffer] ( identifier[bytes_to_send] ,
identifier[len] ( identifier[bytes_to_send] ))
identifier[rbuffer] = identifier[ctypes] . identifier[create_string_buffer] ( identifier[len] ( identifier[bytes_to_send] ))
identifier[transfer] = identifier[spi_ioc_transfer] (
identifier[tx_buf] = identifier[ctypes] . identifier[addressof] ( identifier[wbuffer] ),
identifier[rx_buf] = identifier[ctypes] . identifier[addressof] ( identifier[rbuffer] ),
identifier[len] = identifier[ctypes] . identifier[sizeof] ( identifier[wbuffer] ),
identifier[speed_hz] = identifier[ctypes] . identifier[c_uint32] ( identifier[self] . identifier[speed_hz] )
)
keyword[if] identifier[self] . identifier[spi_callback] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[spi_callback] ( identifier[bytes_to_send] )
identifier[ioctl] ( identifier[self] . identifier[fd] , identifier[SPI_IOC_MESSAGE] ( literal[int] ), identifier[transfer] )
keyword[return] identifier[ctypes] . identifier[string_at] ( identifier[rbuffer] , identifier[ctypes] . identifier[sizeof] ( identifier[rbuffer] )) | def spisend(self, bytes_to_send):
"""Sends bytes via the SPI bus.
:param bytes_to_send: The bytes to send on the SPI device.
:type bytes_to_send: bytes
:returns: bytes -- returned bytes from SPI device
:raises: InitError
"""
# make some buffer space to store reading/writing
wbuffer = ctypes.create_string_buffer(bytes_to_send, len(bytes_to_send))
rbuffer = ctypes.create_string_buffer(len(bytes_to_send))
# create the spi transfer struct
transfer = spi_ioc_transfer(tx_buf=ctypes.addressof(wbuffer), rx_buf=ctypes.addressof(rbuffer), len=ctypes.sizeof(wbuffer), speed_hz=ctypes.c_uint32(self.speed_hz))
if self.spi_callback is not None:
self.spi_callback(bytes_to_send) # depends on [control=['if'], data=[]]
# send the spi command
ioctl(self.fd, SPI_IOC_MESSAGE(1), transfer)
return ctypes.string_at(rbuffer, ctypes.sizeof(rbuffer)) |
def deserialize_model(data, klass):
"""
Deserializes list or dict to model.
:param data: dict, list.
:type data: dict | list
:param klass: class literal.
:return: model object.
"""
instance = klass()
if not instance.swagger_types:
return data
for attr, attr_type in iteritems(instance.swagger_types):
if data is not None \
and instance.attribute_map[attr] in data \
and isinstance(data, (list, dict)):
value = data[instance.attribute_map[attr]]
setattr(instance, attr, _deserialize(value, attr_type))
return instance | def function[deserialize_model, parameter[data, klass]]:
constant[
Deserializes list or dict to model.
:param data: dict, list.
:type data: dict | list
:param klass: class literal.
:return: model object.
]
variable[instance] assign[=] call[name[klass], parameter[]]
if <ast.UnaryOp object at 0x7da1b236c970> begin[:]
return[name[data]]
for taget[tuple[[<ast.Name object at 0x7da1b235e590>, <ast.Name object at 0x7da1b235c070>]]] in starred[call[name[iteritems], parameter[name[instance].swagger_types]]] begin[:]
if <ast.BoolOp object at 0x7da1b235ffa0> begin[:]
variable[value] assign[=] call[name[data]][call[name[instance].attribute_map][name[attr]]]
call[name[setattr], parameter[name[instance], name[attr], call[name[_deserialize], parameter[name[value], name[attr_type]]]]]
return[name[instance]] | keyword[def] identifier[deserialize_model] ( identifier[data] , identifier[klass] ):
literal[string]
identifier[instance] = identifier[klass] ()
keyword[if] keyword[not] identifier[instance] . identifier[swagger_types] :
keyword[return] identifier[data]
keyword[for] identifier[attr] , identifier[attr_type] keyword[in] identifier[iteritems] ( identifier[instance] . identifier[swagger_types] ):
keyword[if] identifier[data] keyword[is] keyword[not] keyword[None] keyword[and] identifier[instance] . identifier[attribute_map] [ identifier[attr] ] keyword[in] identifier[data] keyword[and] identifier[isinstance] ( identifier[data] ,( identifier[list] , identifier[dict] )):
identifier[value] = identifier[data] [ identifier[instance] . identifier[attribute_map] [ identifier[attr] ]]
identifier[setattr] ( identifier[instance] , identifier[attr] , identifier[_deserialize] ( identifier[value] , identifier[attr_type] ))
keyword[return] identifier[instance] | def deserialize_model(data, klass):
"""
Deserializes list or dict to model.
:param data: dict, list.
:type data: dict | list
:param klass: class literal.
:return: model object.
"""
instance = klass()
if not instance.swagger_types:
return data # depends on [control=['if'], data=[]]
for (attr, attr_type) in iteritems(instance.swagger_types):
if data is not None and instance.attribute_map[attr] in data and isinstance(data, (list, dict)):
value = data[instance.attribute_map[attr]]
setattr(instance, attr, _deserialize(value, attr_type)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return instance |
def generate(self, url, browsers=None, orientation=None, mac_res=None, win_res=None,
quality=None, local=None, wait_time=None, callback_url=None):
"""
Generates screenshots for a URL.
"""
if isinstance(browsers, dict):
browsers = [browsers]
if browsers is None:
browsers = [self.default_browser]
data = dict((key, value) for key, value in locals().items() if value is not None and key != 'self')
return self.execute('POST', '/screenshots', json=data) | def function[generate, parameter[self, url, browsers, orientation, mac_res, win_res, quality, local, wait_time, callback_url]]:
constant[
Generates screenshots for a URL.
]
if call[name[isinstance], parameter[name[browsers], name[dict]]] begin[:]
variable[browsers] assign[=] list[[<ast.Name object at 0x7da18dc06e90>]]
if compare[name[browsers] is constant[None]] begin[:]
variable[browsers] assign[=] list[[<ast.Attribute object at 0x7da18dc05330>]]
variable[data] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18dc06aa0>]]
return[call[name[self].execute, parameter[constant[POST], constant[/screenshots]]]] | keyword[def] identifier[generate] ( identifier[self] , identifier[url] , identifier[browsers] = keyword[None] , identifier[orientation] = keyword[None] , identifier[mac_res] = keyword[None] , identifier[win_res] = keyword[None] ,
identifier[quality] = keyword[None] , identifier[local] = keyword[None] , identifier[wait_time] = keyword[None] , identifier[callback_url] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[browsers] , identifier[dict] ):
identifier[browsers] =[ identifier[browsers] ]
keyword[if] identifier[browsers] keyword[is] keyword[None] :
identifier[browsers] =[ identifier[self] . identifier[default_browser] ]
identifier[data] = identifier[dict] (( identifier[key] , identifier[value] ) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[locals] (). identifier[items] () keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] keyword[and] identifier[key] != literal[string] )
keyword[return] identifier[self] . identifier[execute] ( literal[string] , literal[string] , identifier[json] = identifier[data] ) | def generate(self, url, browsers=None, orientation=None, mac_res=None, win_res=None, quality=None, local=None, wait_time=None, callback_url=None):
"""
Generates screenshots for a URL.
"""
if isinstance(browsers, dict):
browsers = [browsers] # depends on [control=['if'], data=[]]
if browsers is None:
browsers = [self.default_browser] # depends on [control=['if'], data=['browsers']]
data = dict(((key, value) for (key, value) in locals().items() if value is not None and key != 'self'))
return self.execute('POST', '/screenshots', json=data) |
def cut_edges(graph):
"""
Return the cut-edges of the given graph.
A cut edge, or bridge, is an edge of a graph whose removal increases the number of connected
components in the graph.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: list
@return: List of cut-edges.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
# Dispatch if we have a hypergraph
if 'hypergraph' == graph.__class__.__name__:
return _cut_hyperedges(graph)
pre = {} # Pre-ordering
low = {} # Lowest pre[] reachable from this node going down the spanning tree + one backedge
spanning_tree = {}
reply = []
pre[None] = 0
for each in graph:
if (each not in pre):
spanning_tree[each] = None
_cut_dfs(graph, spanning_tree, pre, low, reply, each)
setrecursionlimit(recursionlimit)
return reply | def function[cut_edges, parameter[graph]]:
constant[
Return the cut-edges of the given graph.
A cut edge, or bridge, is an edge of a graph whose removal increases the number of connected
components in the graph.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: list
@return: List of cut-edges.
]
variable[recursionlimit] assign[=] call[name[getrecursionlimit], parameter[]]
call[name[setrecursionlimit], parameter[call[name[max], parameter[binary_operation[call[name[len], parameter[call[name[graph].nodes, parameter[]]]] * constant[2]], name[recursionlimit]]]]]
if compare[constant[hypergraph] equal[==] name[graph].__class__.__name__] begin[:]
return[call[name[_cut_hyperedges], parameter[name[graph]]]]
variable[pre] assign[=] dictionary[[], []]
variable[low] assign[=] dictionary[[], []]
variable[spanning_tree] assign[=] dictionary[[], []]
variable[reply] assign[=] list[[]]
call[name[pre]][constant[None]] assign[=] constant[0]
for taget[name[each]] in starred[name[graph]] begin[:]
if compare[name[each] <ast.NotIn object at 0x7da2590d7190> name[pre]] begin[:]
call[name[spanning_tree]][name[each]] assign[=] constant[None]
call[name[_cut_dfs], parameter[name[graph], name[spanning_tree], name[pre], name[low], name[reply], name[each]]]
call[name[setrecursionlimit], parameter[name[recursionlimit]]]
return[name[reply]] | keyword[def] identifier[cut_edges] ( identifier[graph] ):
literal[string]
identifier[recursionlimit] = identifier[getrecursionlimit] ()
identifier[setrecursionlimit] ( identifier[max] ( identifier[len] ( identifier[graph] . identifier[nodes] ())* literal[int] , identifier[recursionlimit] ))
keyword[if] literal[string] == identifier[graph] . identifier[__class__] . identifier[__name__] :
keyword[return] identifier[_cut_hyperedges] ( identifier[graph] )
identifier[pre] ={}
identifier[low] ={}
identifier[spanning_tree] ={}
identifier[reply] =[]
identifier[pre] [ keyword[None] ]= literal[int]
keyword[for] identifier[each] keyword[in] identifier[graph] :
keyword[if] ( identifier[each] keyword[not] keyword[in] identifier[pre] ):
identifier[spanning_tree] [ identifier[each] ]= keyword[None]
identifier[_cut_dfs] ( identifier[graph] , identifier[spanning_tree] , identifier[pre] , identifier[low] , identifier[reply] , identifier[each] )
identifier[setrecursionlimit] ( identifier[recursionlimit] )
keyword[return] identifier[reply] | def cut_edges(graph):
"""
Return the cut-edges of the given graph.
A cut edge, or bridge, is an edge of a graph whose removal increases the number of connected
components in the graph.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: list
@return: List of cut-edges.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes()) * 2, recursionlimit))
# Dispatch if we have a hypergraph
if 'hypergraph' == graph.__class__.__name__:
return _cut_hyperedges(graph) # depends on [control=['if'], data=[]]
pre = {} # Pre-ordering
low = {} # Lowest pre[] reachable from this node going down the spanning tree + one backedge
spanning_tree = {}
reply = []
pre[None] = 0
for each in graph:
if each not in pre:
spanning_tree[each] = None
_cut_dfs(graph, spanning_tree, pre, low, reply, each) # depends on [control=['if'], data=['each', 'pre']] # depends on [control=['for'], data=['each']]
setrecursionlimit(recursionlimit)
return reply |
def _download_rtd_zip(rtd_version=None, **kwargs):
"""
Download and extract HTML ZIP from RTD to installed doc data path.
Download is skipped if content already exists.
Parameters
----------
rtd_version : str or `None`
RTD version to download; e.g., "latest", "stable", or "v2.6.0".
If not given, download closest match to software version.
kwargs : dict
Keywords for ``urlretrieve()``.
Returns
-------
index_html : str
Path to local "index.html".
"""
# https://github.com/ejeschke/ginga/pull/451#issuecomment-298403134
if not toolkit.family.startswith('qt'):
raise ValueError('Downloaded documentation not compatible with {} '
'UI toolkit browser'.format(toolkit.family))
if rtd_version is None:
rtd_version = _find_rtd_version()
data_path = os.path.dirname(
_find_pkg_data_path('help.html', package='ginga.doc'))
index_html = os.path.join(data_path, 'index.html')
# There is a previous download of documentation; Do nothing.
# There is no check if downloaded version is outdated; The idea is that
# this folder would be empty again when installing new version.
if os.path.isfile(index_html):
return index_html
url = ('https://readthedocs.org/projects/ginga/downloads/htmlzip/'
'{}/'.format(rtd_version))
local_path = urllib.request.urlretrieve(url, **kwargs)[0]
with zipfile.ZipFile(local_path, 'r') as zf:
zf.extractall(data_path)
# RTD makes an undesirable sub-directory, so move everything there
# up one level and delete it.
subdir = os.path.join(data_path, 'ginga-{}'.format(rtd_version))
for s in os.listdir(subdir):
src = os.path.join(subdir, s)
if os.path.isfile(src):
shutil.copy(src, data_path)
else: # directory
shutil.copytree(src, os.path.join(data_path, s))
shutil.rmtree(subdir)
if not os.path.isfile(index_html):
raise OSError(
'{} is missing; Ginga doc download failed'.format(index_html))
return index_html | def function[_download_rtd_zip, parameter[rtd_version]]:
constant[
Download and extract HTML ZIP from RTD to installed doc data path.
Download is skipped if content already exists.
Parameters
----------
rtd_version : str or `None`
RTD version to download; e.g., "latest", "stable", or "v2.6.0".
If not given, download closest match to software version.
kwargs : dict
Keywords for ``urlretrieve()``.
Returns
-------
index_html : str
Path to local "index.html".
]
if <ast.UnaryOp object at 0x7da1b0da3fd0> begin[:]
<ast.Raise object at 0x7da1b0da05e0>
if compare[name[rtd_version] is constant[None]] begin[:]
variable[rtd_version] assign[=] call[name[_find_rtd_version], parameter[]]
variable[data_path] assign[=] call[name[os].path.dirname, parameter[call[name[_find_pkg_data_path], parameter[constant[help.html]]]]]
variable[index_html] assign[=] call[name[os].path.join, parameter[name[data_path], constant[index.html]]]
if call[name[os].path.isfile, parameter[name[index_html]]] begin[:]
return[name[index_html]]
variable[url] assign[=] call[constant[https://readthedocs.org/projects/ginga/downloads/htmlzip/{}/].format, parameter[name[rtd_version]]]
variable[local_path] assign[=] call[call[name[urllib].request.urlretrieve, parameter[name[url]]]][constant[0]]
with call[name[zipfile].ZipFile, parameter[name[local_path], constant[r]]] begin[:]
call[name[zf].extractall, parameter[name[data_path]]]
variable[subdir] assign[=] call[name[os].path.join, parameter[name[data_path], call[constant[ginga-{}].format, parameter[name[rtd_version]]]]]
for taget[name[s]] in starred[call[name[os].listdir, parameter[name[subdir]]]] begin[:]
variable[src] assign[=] call[name[os].path.join, parameter[name[subdir], name[s]]]
if call[name[os].path.isfile, parameter[name[src]]] begin[:]
call[name[shutil].copy, parameter[name[src], name[data_path]]]
call[name[shutil].rmtree, parameter[name[subdir]]]
if <ast.UnaryOp object at 0x7da20e955270> begin[:]
<ast.Raise object at 0x7da20e956bc0>
return[name[index_html]] | keyword[def] identifier[_download_rtd_zip] ( identifier[rtd_version] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[toolkit] . identifier[family] . identifier[startswith] ( literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[toolkit] . identifier[family] ))
keyword[if] identifier[rtd_version] keyword[is] keyword[None] :
identifier[rtd_version] = identifier[_find_rtd_version] ()
identifier[data_path] = identifier[os] . identifier[path] . identifier[dirname] (
identifier[_find_pkg_data_path] ( literal[string] , identifier[package] = literal[string] ))
identifier[index_html] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[index_html] ):
keyword[return] identifier[index_html]
identifier[url] =( literal[string]
literal[string] . identifier[format] ( identifier[rtd_version] ))
identifier[local_path] = identifier[urllib] . identifier[request] . identifier[urlretrieve] ( identifier[url] ,** identifier[kwargs] )[ literal[int] ]
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[local_path] , literal[string] ) keyword[as] identifier[zf] :
identifier[zf] . identifier[extractall] ( identifier[data_path] )
identifier[subdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , literal[string] . identifier[format] ( identifier[rtd_version] ))
keyword[for] identifier[s] keyword[in] identifier[os] . identifier[listdir] ( identifier[subdir] ):
identifier[src] = identifier[os] . identifier[path] . identifier[join] ( identifier[subdir] , identifier[s] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[src] ):
identifier[shutil] . identifier[copy] ( identifier[src] , identifier[data_path] )
keyword[else] :
identifier[shutil] . identifier[copytree] ( identifier[src] , identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[s] ))
identifier[shutil] . identifier[rmtree] ( identifier[subdir] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[index_html] ):
keyword[raise] identifier[OSError] (
literal[string] . identifier[format] ( identifier[index_html] ))
keyword[return] identifier[index_html] | def _download_rtd_zip(rtd_version=None, **kwargs):
"""
Download and extract HTML ZIP from RTD to installed doc data path.
Download is skipped if content already exists.
Parameters
----------
rtd_version : str or `None`
RTD version to download; e.g., "latest", "stable", or "v2.6.0".
If not given, download closest match to software version.
kwargs : dict
Keywords for ``urlretrieve()``.
Returns
-------
index_html : str
Path to local "index.html".
"""
# https://github.com/ejeschke/ginga/pull/451#issuecomment-298403134
if not toolkit.family.startswith('qt'):
raise ValueError('Downloaded documentation not compatible with {} UI toolkit browser'.format(toolkit.family)) # depends on [control=['if'], data=[]]
if rtd_version is None:
rtd_version = _find_rtd_version() # depends on [control=['if'], data=['rtd_version']]
data_path = os.path.dirname(_find_pkg_data_path('help.html', package='ginga.doc'))
index_html = os.path.join(data_path, 'index.html')
# There is a previous download of documentation; Do nothing.
# There is no check if downloaded version is outdated; The idea is that
# this folder would be empty again when installing new version.
if os.path.isfile(index_html):
return index_html # depends on [control=['if'], data=[]]
url = 'https://readthedocs.org/projects/ginga/downloads/htmlzip/{}/'.format(rtd_version)
local_path = urllib.request.urlretrieve(url, **kwargs)[0]
with zipfile.ZipFile(local_path, 'r') as zf:
zf.extractall(data_path) # depends on [control=['with'], data=['zf']]
# RTD makes an undesirable sub-directory, so move everything there
# up one level and delete it.
subdir = os.path.join(data_path, 'ginga-{}'.format(rtd_version))
for s in os.listdir(subdir):
src = os.path.join(subdir, s)
if os.path.isfile(src):
shutil.copy(src, data_path) # depends on [control=['if'], data=[]]
else: # directory
shutil.copytree(src, os.path.join(data_path, s)) # depends on [control=['for'], data=['s']]
shutil.rmtree(subdir)
if not os.path.isfile(index_html):
raise OSError('{} is missing; Ginga doc download failed'.format(index_html)) # depends on [control=['if'], data=[]]
return index_html |
def versionString(version):
"""Create version string.
For a sequence containing version information such as (2, 0, 0, 'pre'),
this returns a printable string such as '2.0pre'.
The micro version number is only excluded from the string if it is zero.
"""
ver = list(map(str, version))
numbers, rest = ver[:2 if ver[2] == '0' else 3], ver[3:]
return '.'.join(numbers) + '-'.join(rest) | def function[versionString, parameter[version]]:
constant[Create version string.
For a sequence containing version information such as (2, 0, 0, 'pre'),
this returns a printable string such as '2.0pre'.
The micro version number is only excluded from the string if it is zero.
]
variable[ver] assign[=] call[name[list], parameter[call[name[map], parameter[name[str], name[version]]]]]
<ast.Tuple object at 0x7da20c7c8580> assign[=] tuple[[<ast.Subscript object at 0x7da20c7c9000>, <ast.Subscript object at 0x7da207f02ce0>]]
return[binary_operation[call[constant[.].join, parameter[name[numbers]]] + call[constant[-].join, parameter[name[rest]]]]] | keyword[def] identifier[versionString] ( identifier[version] ):
literal[string]
identifier[ver] = identifier[list] ( identifier[map] ( identifier[str] , identifier[version] ))
identifier[numbers] , identifier[rest] = identifier[ver] [: literal[int] keyword[if] identifier[ver] [ literal[int] ]== literal[string] keyword[else] literal[int] ], identifier[ver] [ literal[int] :]
keyword[return] literal[string] . identifier[join] ( identifier[numbers] )+ literal[string] . identifier[join] ( identifier[rest] ) | def versionString(version):
"""Create version string.
For a sequence containing version information such as (2, 0, 0, 'pre'),
this returns a printable string such as '2.0pre'.
The micro version number is only excluded from the string if it is zero.
"""
ver = list(map(str, version))
(numbers, rest) = (ver[:2 if ver[2] == '0' else 3], ver[3:])
return '.'.join(numbers) + '-'.join(rest) |
def pcklof(filename):
"""
Load a binary PCK file for use by the readers. Return the
handle of the loaded file which is used by other PCK routines to
refer to the file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcklof_c.html
:param filename: Name of the file to be loaded.
:type filename: str
:return: Loaded file's handle.
:rtype: int
"""
filename = stypes.stringToCharP(filename)
handle = ctypes.c_int()
libspice.pcklof_c(filename, ctypes.byref(handle))
return handle.value | def function[pcklof, parameter[filename]]:
constant[
Load a binary PCK file for use by the readers. Return the
handle of the loaded file which is used by other PCK routines to
refer to the file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcklof_c.html
:param filename: Name of the file to be loaded.
:type filename: str
:return: Loaded file's handle.
:rtype: int
]
variable[filename] assign[=] call[name[stypes].stringToCharP, parameter[name[filename]]]
variable[handle] assign[=] call[name[ctypes].c_int, parameter[]]
call[name[libspice].pcklof_c, parameter[name[filename], call[name[ctypes].byref, parameter[name[handle]]]]]
return[name[handle].value] | keyword[def] identifier[pcklof] ( identifier[filename] ):
literal[string]
identifier[filename] = identifier[stypes] . identifier[stringToCharP] ( identifier[filename] )
identifier[handle] = identifier[ctypes] . identifier[c_int] ()
identifier[libspice] . identifier[pcklof_c] ( identifier[filename] , identifier[ctypes] . identifier[byref] ( identifier[handle] ))
keyword[return] identifier[handle] . identifier[value] | def pcklof(filename):
"""
Load a binary PCK file for use by the readers. Return the
handle of the loaded file which is used by other PCK routines to
refer to the file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcklof_c.html
:param filename: Name of the file to be loaded.
:type filename: str
:return: Loaded file's handle.
:rtype: int
"""
filename = stypes.stringToCharP(filename)
handle = ctypes.c_int()
libspice.pcklof_c(filename, ctypes.byref(handle))
return handle.value |
def remove_port_profile_to_delete(self, profile_name, device_id):
"""Removes port profile to be deleted from table."""
with self.session.begin(subtransactions=True):
self.session.query(ucsm_model.PortProfileDelete).filter_by(
profile_id=profile_name, device_id=device_id).delete() | def function[remove_port_profile_to_delete, parameter[self, profile_name, device_id]]:
constant[Removes port profile to be deleted from table.]
with call[name[self].session.begin, parameter[]] begin[:]
call[call[call[name[self].session.query, parameter[name[ucsm_model].PortProfileDelete]].filter_by, parameter[]].delete, parameter[]] | keyword[def] identifier[remove_port_profile_to_delete] ( identifier[self] , identifier[profile_name] , identifier[device_id] ):
literal[string]
keyword[with] identifier[self] . identifier[session] . identifier[begin] ( identifier[subtransactions] = keyword[True] ):
identifier[self] . identifier[session] . identifier[query] ( identifier[ucsm_model] . identifier[PortProfileDelete] ). identifier[filter_by] (
identifier[profile_id] = identifier[profile_name] , identifier[device_id] = identifier[device_id] ). identifier[delete] () | def remove_port_profile_to_delete(self, profile_name, device_id):
"""Removes port profile to be deleted from table."""
with self.session.begin(subtransactions=True):
self.session.query(ucsm_model.PortProfileDelete).filter_by(profile_id=profile_name, device_id=device_id).delete() # depends on [control=['with'], data=[]] |
def save_minions(jid, minions, syndic_id=None):
'''
Save/update the serialized list of minions for a given job
'''
# Ensure we have a list for Python 3 compatability
minions = list(minions)
log.debug(
'Adding minions for job %s%s: %s',
jid,
' from syndic master \'{0}\''.format(syndic_id) if syndic_id else '',
minions
)
serial = salt.payload.Serial(__opts__)
jid_dir = salt.utils.jid.jid_dir(jid, _job_dir(), __opts__['hash_type'])
try:
if not os.path.exists(jid_dir):
os.makedirs(jid_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
# rarely, the directory can be already concurrently created between
# the os.path.exists and the os.makedirs lines above
pass
else:
raise
if syndic_id is not None:
minions_path = os.path.join(
jid_dir,
SYNDIC_MINIONS_P.format(syndic_id)
)
else:
minions_path = os.path.join(jid_dir, MINIONS_P)
try:
if not os.path.exists(jid_dir):
try:
os.makedirs(jid_dir)
except OSError:
pass
with salt.utils.files.fopen(minions_path, 'w+b') as wfh:
serial.dump(minions, wfh)
except IOError as exc:
log.error(
'Failed to write minion list %s to job cache file %s: %s',
minions, minions_path, exc
) | def function[save_minions, parameter[jid, minions, syndic_id]]:
constant[
Save/update the serialized list of minions for a given job
]
variable[minions] assign[=] call[name[list], parameter[name[minions]]]
call[name[log].debug, parameter[constant[Adding minions for job %s%s: %s], name[jid], <ast.IfExp object at 0x7da1b2344970>, name[minions]]]
variable[serial] assign[=] call[name[salt].payload.Serial, parameter[name[__opts__]]]
variable[jid_dir] assign[=] call[name[salt].utils.jid.jid_dir, parameter[name[jid], call[name[_job_dir], parameter[]], call[name[__opts__]][constant[hash_type]]]]
<ast.Try object at 0x7da1b2345240>
if compare[name[syndic_id] is_not constant[None]] begin[:]
variable[minions_path] assign[=] call[name[os].path.join, parameter[name[jid_dir], call[name[SYNDIC_MINIONS_P].format, parameter[name[syndic_id]]]]]
<ast.Try object at 0x7da1b23447f0> | keyword[def] identifier[save_minions] ( identifier[jid] , identifier[minions] , identifier[syndic_id] = keyword[None] ):
literal[string]
identifier[minions] = identifier[list] ( identifier[minions] )
identifier[log] . identifier[debug] (
literal[string] ,
identifier[jid] ,
literal[string] . identifier[format] ( identifier[syndic_id] ) keyword[if] identifier[syndic_id] keyword[else] literal[string] ,
identifier[minions]
)
identifier[serial] = identifier[salt] . identifier[payload] . identifier[Serial] ( identifier[__opts__] )
identifier[jid_dir] = identifier[salt] . identifier[utils] . identifier[jid] . identifier[jid_dir] ( identifier[jid] , identifier[_job_dir] (), identifier[__opts__] [ literal[string] ])
keyword[try] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[jid_dir] ):
identifier[os] . identifier[makedirs] ( identifier[jid_dir] )
keyword[except] identifier[OSError] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[errno] == identifier[errno] . identifier[EEXIST] :
keyword[pass]
keyword[else] :
keyword[raise]
keyword[if] identifier[syndic_id] keyword[is] keyword[not] keyword[None] :
identifier[minions_path] = identifier[os] . identifier[path] . identifier[join] (
identifier[jid_dir] ,
identifier[SYNDIC_MINIONS_P] . identifier[format] ( identifier[syndic_id] )
)
keyword[else] :
identifier[minions_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[jid_dir] , identifier[MINIONS_P] )
keyword[try] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[jid_dir] ):
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[jid_dir] )
keyword[except] identifier[OSError] :
keyword[pass]
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[minions_path] , literal[string] ) keyword[as] identifier[wfh] :
identifier[serial] . identifier[dump] ( identifier[minions] , identifier[wfh] )
keyword[except] identifier[IOError] keyword[as] identifier[exc] :
identifier[log] . identifier[error] (
literal[string] ,
identifier[minions] , identifier[minions_path] , identifier[exc]
) | def save_minions(jid, minions, syndic_id=None):
"""
Save/update the serialized list of minions for a given job
"""
# Ensure we have a list for Python 3 compatability
minions = list(minions)
log.debug('Adding minions for job %s%s: %s', jid, " from syndic master '{0}'".format(syndic_id) if syndic_id else '', minions)
serial = salt.payload.Serial(__opts__)
jid_dir = salt.utils.jid.jid_dir(jid, _job_dir(), __opts__['hash_type'])
try:
if not os.path.exists(jid_dir):
os.makedirs(jid_dir) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except OSError as exc:
if exc.errno == errno.EEXIST:
# rarely, the directory can be already concurrently created between
# the os.path.exists and the os.makedirs lines above
pass # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['exc']]
if syndic_id is not None:
minions_path = os.path.join(jid_dir, SYNDIC_MINIONS_P.format(syndic_id)) # depends on [control=['if'], data=['syndic_id']]
else:
minions_path = os.path.join(jid_dir, MINIONS_P)
try:
if not os.path.exists(jid_dir):
try:
os.makedirs(jid_dir) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
with salt.utils.files.fopen(minions_path, 'w+b') as wfh:
serial.dump(minions, wfh) # depends on [control=['with'], data=['wfh']] # depends on [control=['try'], data=[]]
except IOError as exc:
log.error('Failed to write minion list %s to job cache file %s: %s', minions, minions_path, exc) # depends on [control=['except'], data=['exc']] |
def _from_dict(cls, _dict):
"""Initialize a Grammars object from a json dictionary."""
args = {}
if 'grammars' in _dict:
args['grammars'] = [
Grammar._from_dict(x) for x in (_dict.get('grammars'))
]
else:
raise ValueError(
'Required property \'grammars\' not present in Grammars JSON')
return cls(**args) | def function[_from_dict, parameter[cls, _dict]]:
constant[Initialize a Grammars object from a json dictionary.]
variable[args] assign[=] dictionary[[], []]
if compare[constant[grammars] in name[_dict]] begin[:]
call[name[args]][constant[grammars]] assign[=] <ast.ListComp object at 0x7da1b2345780>
return[call[name[cls], parameter[]]] | keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ):
literal[string]
identifier[args] ={}
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[Grammar] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[return] identifier[cls] (** identifier[args] ) | def _from_dict(cls, _dict):
"""Initialize a Grammars object from a json dictionary."""
args = {}
if 'grammars' in _dict:
args['grammars'] = [Grammar._from_dict(x) for x in _dict.get('grammars')] # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'grammars' not present in Grammars JSON")
return cls(**args) |
def cms_check(migrate_cmd=False):
"""
Runs the django CMS ``cms check`` command
"""
from django.core.management import call_command
try:
import cms # NOQA # nopyflakes
_create_db(migrate_cmd)
call_command('cms', 'check')
except ImportError:
print('cms_check available only if django CMS is installed') | def function[cms_check, parameter[migrate_cmd]]:
constant[
Runs the django CMS ``cms check`` command
]
from relative_module[django.core.management] import module[call_command]
<ast.Try object at 0x7da18f00e0e0> | keyword[def] identifier[cms_check] ( identifier[migrate_cmd] = keyword[False] ):
literal[string]
keyword[from] identifier[django] . identifier[core] . identifier[management] keyword[import] identifier[call_command]
keyword[try] :
keyword[import] identifier[cms]
identifier[_create_db] ( identifier[migrate_cmd] )
identifier[call_command] ( literal[string] , literal[string] )
keyword[except] identifier[ImportError] :
identifier[print] ( literal[string] ) | def cms_check(migrate_cmd=False):
"""
Runs the django CMS ``cms check`` command
"""
from django.core.management import call_command
try:
import cms # NOQA # nopyflakes
_create_db(migrate_cmd)
call_command('cms', 'check') # depends on [control=['try'], data=[]]
except ImportError:
print('cms_check available only if django CMS is installed') # depends on [control=['except'], data=[]] |
def sim(self, src, tar, qval=2, alpha=1, beta=1, bias=None):
"""Return the Tversky index of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alpha : float
Tversky index parameter as described above
beta : float
Tversky index parameter as described above
bias : float
The symmetric Tversky index bias parameter
Returns
-------
float
Tversky similarity
Raises
------
ValueError
Unsupported weight assignment; alpha and beta must be greater than
or equal to 0.
Examples
--------
>>> cmp = Tversky()
>>> cmp.sim('cat', 'hat')
0.3333333333333333
>>> cmp.sim('Niall', 'Neil')
0.2222222222222222
>>> cmp.sim('aluminum', 'Catalan')
0.0625
>>> cmp.sim('ATCG', 'TAGC')
0.0
"""
if alpha < 0 or beta < 0:
raise ValueError(
'Unsupported weight assignment; alpha and beta '
+ 'must be greater than or equal to 0.'
)
if src == tar:
return 1.0
elif not src or not tar:
return 0.0
q_src, q_tar = self._get_qgrams(src, tar, qval)
q_src_mag = sum(q_src.values())
q_tar_mag = sum(q_tar.values())
q_intersection_mag = sum((q_src & q_tar).values())
if not q_src or not q_tar:
return 0.0
if bias is None:
return q_intersection_mag / (
q_intersection_mag
+ alpha * (q_src_mag - q_intersection_mag)
+ beta * (q_tar_mag - q_intersection_mag)
)
a_val = min(
q_src_mag - q_intersection_mag, q_tar_mag - q_intersection_mag
)
b_val = max(
q_src_mag - q_intersection_mag, q_tar_mag - q_intersection_mag
)
c_val = q_intersection_mag + bias
return c_val / (beta * (alpha * a_val + (1 - alpha) * b_val) + c_val) | def function[sim, parameter[self, src, tar, qval, alpha, beta, bias]]:
constant[Return the Tversky index of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alpha : float
Tversky index parameter as described above
beta : float
Tversky index parameter as described above
bias : float
The symmetric Tversky index bias parameter
Returns
-------
float
Tversky similarity
Raises
------
ValueError
Unsupported weight assignment; alpha and beta must be greater than
or equal to 0.
Examples
--------
>>> cmp = Tversky()
>>> cmp.sim('cat', 'hat')
0.3333333333333333
>>> cmp.sim('Niall', 'Neil')
0.2222222222222222
>>> cmp.sim('aluminum', 'Catalan')
0.0625
>>> cmp.sim('ATCG', 'TAGC')
0.0
]
if <ast.BoolOp object at 0x7da20c7c91e0> begin[:]
<ast.Raise object at 0x7da20c7c95d0>
if compare[name[src] equal[==] name[tar]] begin[:]
return[constant[1.0]]
<ast.Tuple object at 0x7da20c7cbb20> assign[=] call[name[self]._get_qgrams, parameter[name[src], name[tar], name[qval]]]
variable[q_src_mag] assign[=] call[name[sum], parameter[call[name[q_src].values, parameter[]]]]
variable[q_tar_mag] assign[=] call[name[sum], parameter[call[name[q_tar].values, parameter[]]]]
variable[q_intersection_mag] assign[=] call[name[sum], parameter[call[binary_operation[name[q_src] <ast.BitAnd object at 0x7da2590d6b60> name[q_tar]].values, parameter[]]]]
if <ast.BoolOp object at 0x7da20c7c9de0> begin[:]
return[constant[0.0]]
if compare[name[bias] is constant[None]] begin[:]
return[binary_operation[name[q_intersection_mag] / binary_operation[binary_operation[name[q_intersection_mag] + binary_operation[name[alpha] * binary_operation[name[q_src_mag] - name[q_intersection_mag]]]] + binary_operation[name[beta] * binary_operation[name[q_tar_mag] - name[q_intersection_mag]]]]]]
variable[a_val] assign[=] call[name[min], parameter[binary_operation[name[q_src_mag] - name[q_intersection_mag]], binary_operation[name[q_tar_mag] - name[q_intersection_mag]]]]
variable[b_val] assign[=] call[name[max], parameter[binary_operation[name[q_src_mag] - name[q_intersection_mag]], binary_operation[name[q_tar_mag] - name[q_intersection_mag]]]]
variable[c_val] assign[=] binary_operation[name[q_intersection_mag] + name[bias]]
return[binary_operation[name[c_val] / binary_operation[binary_operation[name[beta] * binary_operation[binary_operation[name[alpha] * name[a_val]] + binary_operation[binary_operation[constant[1] - name[alpha]] * name[b_val]]]] + name[c_val]]]] | keyword[def] identifier[sim] ( identifier[self] , identifier[src] , identifier[tar] , identifier[qval] = literal[int] , identifier[alpha] = literal[int] , identifier[beta] = literal[int] , identifier[bias] = keyword[None] ):
literal[string]
keyword[if] identifier[alpha] < literal[int] keyword[or] identifier[beta] < literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
+ literal[string]
)
keyword[if] identifier[src] == identifier[tar] :
keyword[return] literal[int]
keyword[elif] keyword[not] identifier[src] keyword[or] keyword[not] identifier[tar] :
keyword[return] literal[int]
identifier[q_src] , identifier[q_tar] = identifier[self] . identifier[_get_qgrams] ( identifier[src] , identifier[tar] , identifier[qval] )
identifier[q_src_mag] = identifier[sum] ( identifier[q_src] . identifier[values] ())
identifier[q_tar_mag] = identifier[sum] ( identifier[q_tar] . identifier[values] ())
identifier[q_intersection_mag] = identifier[sum] (( identifier[q_src] & identifier[q_tar] ). identifier[values] ())
keyword[if] keyword[not] identifier[q_src] keyword[or] keyword[not] identifier[q_tar] :
keyword[return] literal[int]
keyword[if] identifier[bias] keyword[is] keyword[None] :
keyword[return] identifier[q_intersection_mag] /(
identifier[q_intersection_mag]
+ identifier[alpha] *( identifier[q_src_mag] - identifier[q_intersection_mag] )
+ identifier[beta] *( identifier[q_tar_mag] - identifier[q_intersection_mag] )
)
identifier[a_val] = identifier[min] (
identifier[q_src_mag] - identifier[q_intersection_mag] , identifier[q_tar_mag] - identifier[q_intersection_mag]
)
identifier[b_val] = identifier[max] (
identifier[q_src_mag] - identifier[q_intersection_mag] , identifier[q_tar_mag] - identifier[q_intersection_mag]
)
identifier[c_val] = identifier[q_intersection_mag] + identifier[bias]
keyword[return] identifier[c_val] /( identifier[beta] *( identifier[alpha] * identifier[a_val] +( literal[int] - identifier[alpha] )* identifier[b_val] )+ identifier[c_val] ) | def sim(self, src, tar, qval=2, alpha=1, beta=1, bias=None):
"""Return the Tversky index of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alpha : float
Tversky index parameter as described above
beta : float
Tversky index parameter as described above
bias : float
The symmetric Tversky index bias parameter
Returns
-------
float
Tversky similarity
Raises
------
ValueError
Unsupported weight assignment; alpha and beta must be greater than
or equal to 0.
Examples
--------
>>> cmp = Tversky()
>>> cmp.sim('cat', 'hat')
0.3333333333333333
>>> cmp.sim('Niall', 'Neil')
0.2222222222222222
>>> cmp.sim('aluminum', 'Catalan')
0.0625
>>> cmp.sim('ATCG', 'TAGC')
0.0
"""
if alpha < 0 or beta < 0:
raise ValueError('Unsupported weight assignment; alpha and beta ' + 'must be greater than or equal to 0.') # depends on [control=['if'], data=[]]
if src == tar:
return 1.0 # depends on [control=['if'], data=[]]
elif not src or not tar:
return 0.0 # depends on [control=['if'], data=[]]
(q_src, q_tar) = self._get_qgrams(src, tar, qval)
q_src_mag = sum(q_src.values())
q_tar_mag = sum(q_tar.values())
q_intersection_mag = sum((q_src & q_tar).values())
if not q_src or not q_tar:
return 0.0 # depends on [control=['if'], data=[]]
if bias is None:
return q_intersection_mag / (q_intersection_mag + alpha * (q_src_mag - q_intersection_mag) + beta * (q_tar_mag - q_intersection_mag)) # depends on [control=['if'], data=[]]
a_val = min(q_src_mag - q_intersection_mag, q_tar_mag - q_intersection_mag)
b_val = max(q_src_mag - q_intersection_mag, q_tar_mag - q_intersection_mag)
c_val = q_intersection_mag + bias
return c_val / (beta * (alpha * a_val + (1 - alpha) * b_val) + c_val) |
def _update_process_stats(self):
"""Updates the process stats with the information from the processes
This method is called at the end of each static parsing of the nextflow
trace file. It re-populates the :attr:`process_stats` dictionary
with the new stat metrics.
"""
good_status = ["COMPLETED", "CACHED"]
for process, vals in self.trace_info.items():
# Update submission status of tags for each process
vals = self._update_tag_status(process, vals)
# Update process resources
self._update_process_resources(process, vals)
self.process_stats[process] = {}
inst = self.process_stats[process]
# Get number of completed samples
inst["completed"] = "{}".format(
len([x for x in vals if x["status"] in good_status]))
# Get average time
try:
time_array = [self._hms(x["realtime"]) for x in vals]
mean_time = round(sum(time_array) / len(time_array), 1)
mean_time_str = strftime('%H:%M:%S', gmtime(mean_time))
inst["realtime"] = mean_time_str
# When the realtime column is not present
except KeyError:
inst["realtime"] = "-"
# Get cumulative cpu/hours
try:
cpu_hours = [self._cpu_load_parser(
x["cpus"], x["%cpu"], x["realtime"]) for x in vals]
inst["cpuhour"] = round(sum(cpu_hours), 2)
# When the realtime, cpus or %cpus column are not present
except KeyError:
inst["cpuhour"] = "-"
# Assess resource warnings
inst["cpu_warnings"], inst["mem_warnings"] = \
self._assess_resource_warnings(process, vals)
# Get maximum memory
try:
rss_values = [self._size_coverter(x["rss"]) for x in vals
if x["rss"] != "-"]
if rss_values:
max_rss = round(max(rss_values))
rss_str = self._size_compress(max_rss)
else:
rss_str = "-"
inst["maxmem"] = rss_str
except KeyError:
inst["maxmem"] = "-"
# Get read size
try:
rchar_values = [self._size_coverter(x["rchar"]) for x in vals
if x["rchar"] != "-"]
if rchar_values:
avg_rchar = round(sum(rchar_values) / len(rchar_values))
rchar_str = self._size_compress(avg_rchar)
else:
rchar_str = "-"
except KeyError:
rchar_str = "-"
inst["avgread"] = rchar_str
# Get write size
try:
wchar_values = [self._size_coverter(x["wchar"]) for x in vals
if x["wchar"] != "-"]
if wchar_values:
avg_wchar = round(sum(wchar_values) / len(wchar_values))
wchar_str = self._size_compress(avg_wchar)
else:
wchar_str = "-"
except KeyError:
wchar_str = "-"
inst["avgwrite"] = wchar_str | def function[_update_process_stats, parameter[self]]:
constant[Updates the process stats with the information from the processes
This method is called at the end of each static parsing of the nextflow
trace file. It re-populates the :attr:`process_stats` dictionary
with the new stat metrics.
]
variable[good_status] assign[=] list[[<ast.Constant object at 0x7da1b0552b60>, <ast.Constant object at 0x7da1b0553b20>]]
for taget[tuple[[<ast.Name object at 0x7da1b0553760>, <ast.Name object at 0x7da1b0550bb0>]]] in starred[call[name[self].trace_info.items, parameter[]]] begin[:]
variable[vals] assign[=] call[name[self]._update_tag_status, parameter[name[process], name[vals]]]
call[name[self]._update_process_resources, parameter[name[process], name[vals]]]
call[name[self].process_stats][name[process]] assign[=] dictionary[[], []]
variable[inst] assign[=] call[name[self].process_stats][name[process]]
call[name[inst]][constant[completed]] assign[=] call[constant[{}].format, parameter[call[name[len], parameter[<ast.ListComp object at 0x7da1b03fb370>]]]]
<ast.Try object at 0x7da1b03f93f0>
<ast.Try object at 0x7da1b02bb040>
<ast.Tuple object at 0x7da1b03f8cd0> assign[=] call[name[self]._assess_resource_warnings, parameter[name[process], name[vals]]]
<ast.Try object at 0x7da1b03f86d0>
<ast.Try object at 0x7da1b03fb100>
call[name[inst]][constant[avgread]] assign[=] name[rchar_str]
<ast.Try object at 0x7da1b03f8760>
call[name[inst]][constant[avgwrite]] assign[=] name[wchar_str] | keyword[def] identifier[_update_process_stats] ( identifier[self] ):
literal[string]
identifier[good_status] =[ literal[string] , literal[string] ]
keyword[for] identifier[process] , identifier[vals] keyword[in] identifier[self] . identifier[trace_info] . identifier[items] ():
identifier[vals] = identifier[self] . identifier[_update_tag_status] ( identifier[process] , identifier[vals] )
identifier[self] . identifier[_update_process_resources] ( identifier[process] , identifier[vals] )
identifier[self] . identifier[process_stats] [ identifier[process] ]={}
identifier[inst] = identifier[self] . identifier[process_stats] [ identifier[process] ]
identifier[inst] [ literal[string] ]= literal[string] . identifier[format] (
identifier[len] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[vals] keyword[if] identifier[x] [ literal[string] ] keyword[in] identifier[good_status] ]))
keyword[try] :
identifier[time_array] =[ identifier[self] . identifier[_hms] ( identifier[x] [ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[vals] ]
identifier[mean_time] = identifier[round] ( identifier[sum] ( identifier[time_array] )/ identifier[len] ( identifier[time_array] ), literal[int] )
identifier[mean_time_str] = identifier[strftime] ( literal[string] , identifier[gmtime] ( identifier[mean_time] ))
identifier[inst] [ literal[string] ]= identifier[mean_time_str]
keyword[except] identifier[KeyError] :
identifier[inst] [ literal[string] ]= literal[string]
keyword[try] :
identifier[cpu_hours] =[ identifier[self] . identifier[_cpu_load_parser] (
identifier[x] [ literal[string] ], identifier[x] [ literal[string] ], identifier[x] [ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[vals] ]
identifier[inst] [ literal[string] ]= identifier[round] ( identifier[sum] ( identifier[cpu_hours] ), literal[int] )
keyword[except] identifier[KeyError] :
identifier[inst] [ literal[string] ]= literal[string]
identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ]= identifier[self] . identifier[_assess_resource_warnings] ( identifier[process] , identifier[vals] )
keyword[try] :
identifier[rss_values] =[ identifier[self] . identifier[_size_coverter] ( identifier[x] [ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[vals]
keyword[if] identifier[x] [ literal[string] ]!= literal[string] ]
keyword[if] identifier[rss_values] :
identifier[max_rss] = identifier[round] ( identifier[max] ( identifier[rss_values] ))
identifier[rss_str] = identifier[self] . identifier[_size_compress] ( identifier[max_rss] )
keyword[else] :
identifier[rss_str] = literal[string]
identifier[inst] [ literal[string] ]= identifier[rss_str]
keyword[except] identifier[KeyError] :
identifier[inst] [ literal[string] ]= literal[string]
keyword[try] :
identifier[rchar_values] =[ identifier[self] . identifier[_size_coverter] ( identifier[x] [ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[vals]
keyword[if] identifier[x] [ literal[string] ]!= literal[string] ]
keyword[if] identifier[rchar_values] :
identifier[avg_rchar] = identifier[round] ( identifier[sum] ( identifier[rchar_values] )/ identifier[len] ( identifier[rchar_values] ))
identifier[rchar_str] = identifier[self] . identifier[_size_compress] ( identifier[avg_rchar] )
keyword[else] :
identifier[rchar_str] = literal[string]
keyword[except] identifier[KeyError] :
identifier[rchar_str] = literal[string]
identifier[inst] [ literal[string] ]= identifier[rchar_str]
keyword[try] :
identifier[wchar_values] =[ identifier[self] . identifier[_size_coverter] ( identifier[x] [ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[vals]
keyword[if] identifier[x] [ literal[string] ]!= literal[string] ]
keyword[if] identifier[wchar_values] :
identifier[avg_wchar] = identifier[round] ( identifier[sum] ( identifier[wchar_values] )/ identifier[len] ( identifier[wchar_values] ))
identifier[wchar_str] = identifier[self] . identifier[_size_compress] ( identifier[avg_wchar] )
keyword[else] :
identifier[wchar_str] = literal[string]
keyword[except] identifier[KeyError] :
identifier[wchar_str] = literal[string]
identifier[inst] [ literal[string] ]= identifier[wchar_str] | def _update_process_stats(self):
"""Updates the process stats with the information from the processes
This method is called at the end of each static parsing of the nextflow
trace file. It re-populates the :attr:`process_stats` dictionary
with the new stat metrics.
"""
good_status = ['COMPLETED', 'CACHED']
for (process, vals) in self.trace_info.items():
# Update submission status of tags for each process
vals = self._update_tag_status(process, vals)
# Update process resources
self._update_process_resources(process, vals)
self.process_stats[process] = {}
inst = self.process_stats[process]
# Get number of completed samples
inst['completed'] = '{}'.format(len([x for x in vals if x['status'] in good_status]))
# Get average time
try:
time_array = [self._hms(x['realtime']) for x in vals]
mean_time = round(sum(time_array) / len(time_array), 1)
mean_time_str = strftime('%H:%M:%S', gmtime(mean_time))
inst['realtime'] = mean_time_str # depends on [control=['try'], data=[]]
# When the realtime column is not present
except KeyError:
inst['realtime'] = '-' # depends on [control=['except'], data=[]]
# Get cumulative cpu/hours
try:
cpu_hours = [self._cpu_load_parser(x['cpus'], x['%cpu'], x['realtime']) for x in vals]
inst['cpuhour'] = round(sum(cpu_hours), 2) # depends on [control=['try'], data=[]]
# When the realtime, cpus or %cpus column are not present
except KeyError:
inst['cpuhour'] = '-' # depends on [control=['except'], data=[]]
# Assess resource warnings
(inst['cpu_warnings'], inst['mem_warnings']) = self._assess_resource_warnings(process, vals)
# Get maximum memory
try:
rss_values = [self._size_coverter(x['rss']) for x in vals if x['rss'] != '-']
if rss_values:
max_rss = round(max(rss_values))
rss_str = self._size_compress(max_rss) # depends on [control=['if'], data=[]]
else:
rss_str = '-'
inst['maxmem'] = rss_str # depends on [control=['try'], data=[]]
except KeyError:
inst['maxmem'] = '-' # depends on [control=['except'], data=[]]
# Get read size
try:
rchar_values = [self._size_coverter(x['rchar']) for x in vals if x['rchar'] != '-']
if rchar_values:
avg_rchar = round(sum(rchar_values) / len(rchar_values))
rchar_str = self._size_compress(avg_rchar) # depends on [control=['if'], data=[]]
else:
rchar_str = '-' # depends on [control=['try'], data=[]]
except KeyError:
rchar_str = '-' # depends on [control=['except'], data=[]]
inst['avgread'] = rchar_str
# Get write size
try:
wchar_values = [self._size_coverter(x['wchar']) for x in vals if x['wchar'] != '-']
if wchar_values:
avg_wchar = round(sum(wchar_values) / len(wchar_values))
wchar_str = self._size_compress(avg_wchar) # depends on [control=['if'], data=[]]
else:
wchar_str = '-' # depends on [control=['try'], data=[]]
except KeyError:
wchar_str = '-' # depends on [control=['except'], data=[]]
inst['avgwrite'] = wchar_str # depends on [control=['for'], data=[]] |
def safe_uriref(text):
""" Escape a URL properly. """
url_ = url.parse(text).sanitize().deuserinfo().canonical()
return URIRef(url_.punycode().unicode()) | def function[safe_uriref, parameter[text]]:
constant[ Escape a URL properly. ]
variable[url_] assign[=] call[call[call[call[name[url].parse, parameter[name[text]]].sanitize, parameter[]].deuserinfo, parameter[]].canonical, parameter[]]
return[call[name[URIRef], parameter[call[call[name[url_].punycode, parameter[]].unicode, parameter[]]]]] | keyword[def] identifier[safe_uriref] ( identifier[text] ):
literal[string]
identifier[url_] = identifier[url] . identifier[parse] ( identifier[text] ). identifier[sanitize] (). identifier[deuserinfo] (). identifier[canonical] ()
keyword[return] identifier[URIRef] ( identifier[url_] . identifier[punycode] (). identifier[unicode] ()) | def safe_uriref(text):
""" Escape a URL properly. """
url_ = url.parse(text).sanitize().deuserinfo().canonical()
return URIRef(url_.punycode().unicode()) |
def _find_file(self, needle, candidates):
"""Find the first directory containing a given candidate file."""
for candidate in candidates:
fullpath = os.path.join(candidate, needle)
if os.path.isfile(fullpath):
return fullpath
raise PathError("Unable to locate file %s; tried %s" % (needle, candidates)) | def function[_find_file, parameter[self, needle, candidates]]:
constant[Find the first directory containing a given candidate file.]
for taget[name[candidate]] in starred[name[candidates]] begin[:]
variable[fullpath] assign[=] call[name[os].path.join, parameter[name[candidate], name[needle]]]
if call[name[os].path.isfile, parameter[name[fullpath]]] begin[:]
return[name[fullpath]]
<ast.Raise object at 0x7da1b1528dc0> | keyword[def] identifier[_find_file] ( identifier[self] , identifier[needle] , identifier[candidates] ):
literal[string]
keyword[for] identifier[candidate] keyword[in] identifier[candidates] :
identifier[fullpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[candidate] , identifier[needle] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[fullpath] ):
keyword[return] identifier[fullpath]
keyword[raise] identifier[PathError] ( literal[string] %( identifier[needle] , identifier[candidates] )) | def _find_file(self, needle, candidates):
"""Find the first directory containing a given candidate file."""
for candidate in candidates:
fullpath = os.path.join(candidate, needle)
if os.path.isfile(fullpath):
return fullpath # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['candidate']]
raise PathError('Unable to locate file %s; tried %s' % (needle, candidates)) |
def show_activity_count(date=None):
"""
Simple filter to get activity count for a given day.
Defaults to today.
"""
if not date:
today = datetime.datetime.now() - datetime.timedelta(hours = 24)
return Activity.objects.filter(timestamp__gte=today).count()
return Activity.objects.filter(timestamp__gte=date).count() | def function[show_activity_count, parameter[date]]:
constant[
Simple filter to get activity count for a given day.
Defaults to today.
]
if <ast.UnaryOp object at 0x7da1b287dc60> begin[:]
variable[today] assign[=] binary_operation[call[name[datetime].datetime.now, parameter[]] - call[name[datetime].timedelta, parameter[]]]
return[call[call[name[Activity].objects.filter, parameter[]].count, parameter[]]]
return[call[call[name[Activity].objects.filter, parameter[]].count, parameter[]]] | keyword[def] identifier[show_activity_count] ( identifier[date] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[date] :
identifier[today] = identifier[datetime] . identifier[datetime] . identifier[now] ()- identifier[datetime] . identifier[timedelta] ( identifier[hours] = literal[int] )
keyword[return] identifier[Activity] . identifier[objects] . identifier[filter] ( identifier[timestamp__gte] = identifier[today] ). identifier[count] ()
keyword[return] identifier[Activity] . identifier[objects] . identifier[filter] ( identifier[timestamp__gte] = identifier[date] ). identifier[count] () | def show_activity_count(date=None):
"""
Simple filter to get activity count for a given day.
Defaults to today.
"""
if not date:
today = datetime.datetime.now() - datetime.timedelta(hours=24)
return Activity.objects.filter(timestamp__gte=today).count() # depends on [control=['if'], data=[]]
return Activity.objects.filter(timestamp__gte=date).count() |
def makeB(self, buses=None, branches=None, method="XB"):
""" Based on makeB.m from MATPOWER by Ray Zimmerman, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
@param method: Specify "XB" or "BX" method.
@type method: string
@rtype: tuple
@return: Two matrices, B prime and B double prime, used in the fast
decoupled power flow solver.
"""
buses = self.connected_buses if buses is None else buses
branches = self.online_branches if branches is None else branches
B_buses = copy.deepcopy(buses) # modify bus copies
Bp_branches = copy.deepcopy(branches) # modify branch copies
Bpp_branches = copy.deepcopy(branches)
for bus in B_buses:
bus.b_shunt = 0.0
for branch in Bp_branches:
branch.b = 0.0
branch.ratio = 1.0
if method == "XB":
branch.r = 0.0
Yp, _, _ = self.getYbus(B_buses, Bp_branches)
for branch in Bpp_branches:
branch.phase_shift = 0.0
if method == "BX":
branch.r = 0.0
Ypp, _, _ = self.getYbus(B_buses, Bpp_branches)
del B_buses
del Bp_branches
return -Yp.imag, -Ypp.imag | def function[makeB, parameter[self, buses, branches, method]]:
constant[ Based on makeB.m from MATPOWER by Ray Zimmerman, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
@param method: Specify "XB" or "BX" method.
@type method: string
@rtype: tuple
@return: Two matrices, B prime and B double prime, used in the fast
decoupled power flow solver.
]
variable[buses] assign[=] <ast.IfExp object at 0x7da18dc999f0>
variable[branches] assign[=] <ast.IfExp object at 0x7da18dc9a020>
variable[B_buses] assign[=] call[name[copy].deepcopy, parameter[name[buses]]]
variable[Bp_branches] assign[=] call[name[copy].deepcopy, parameter[name[branches]]]
variable[Bpp_branches] assign[=] call[name[copy].deepcopy, parameter[name[branches]]]
for taget[name[bus]] in starred[name[B_buses]] begin[:]
name[bus].b_shunt assign[=] constant[0.0]
for taget[name[branch]] in starred[name[Bp_branches]] begin[:]
name[branch].b assign[=] constant[0.0]
name[branch].ratio assign[=] constant[1.0]
if compare[name[method] equal[==] constant[XB]] begin[:]
name[branch].r assign[=] constant[0.0]
<ast.Tuple object at 0x7da18dc99000> assign[=] call[name[self].getYbus, parameter[name[B_buses], name[Bp_branches]]]
for taget[name[branch]] in starred[name[Bpp_branches]] begin[:]
name[branch].phase_shift assign[=] constant[0.0]
if compare[name[method] equal[==] constant[BX]] begin[:]
name[branch].r assign[=] constant[0.0]
<ast.Tuple object at 0x7da18dc99e70> assign[=] call[name[self].getYbus, parameter[name[B_buses], name[Bpp_branches]]]
<ast.Delete object at 0x7da18dc982e0>
<ast.Delete object at 0x7da18dc996f0>
return[tuple[[<ast.UnaryOp object at 0x7da18dc9a4a0>, <ast.UnaryOp object at 0x7da18dc9a200>]]] | keyword[def] identifier[makeB] ( identifier[self] , identifier[buses] = keyword[None] , identifier[branches] = keyword[None] , identifier[method] = literal[string] ):
literal[string]
identifier[buses] = identifier[self] . identifier[connected_buses] keyword[if] identifier[buses] keyword[is] keyword[None] keyword[else] identifier[buses]
identifier[branches] = identifier[self] . identifier[online_branches] keyword[if] identifier[branches] keyword[is] keyword[None] keyword[else] identifier[branches]
identifier[B_buses] = identifier[copy] . identifier[deepcopy] ( identifier[buses] )
identifier[Bp_branches] = identifier[copy] . identifier[deepcopy] ( identifier[branches] )
identifier[Bpp_branches] = identifier[copy] . identifier[deepcopy] ( identifier[branches] )
keyword[for] identifier[bus] keyword[in] identifier[B_buses] :
identifier[bus] . identifier[b_shunt] = literal[int]
keyword[for] identifier[branch] keyword[in] identifier[Bp_branches] :
identifier[branch] . identifier[b] = literal[int]
identifier[branch] . identifier[ratio] = literal[int]
keyword[if] identifier[method] == literal[string] :
identifier[branch] . identifier[r] = literal[int]
identifier[Yp] , identifier[_] , identifier[_] = identifier[self] . identifier[getYbus] ( identifier[B_buses] , identifier[Bp_branches] )
keyword[for] identifier[branch] keyword[in] identifier[Bpp_branches] :
identifier[branch] . identifier[phase_shift] = literal[int]
keyword[if] identifier[method] == literal[string] :
identifier[branch] . identifier[r] = literal[int]
identifier[Ypp] , identifier[_] , identifier[_] = identifier[self] . identifier[getYbus] ( identifier[B_buses] , identifier[Bpp_branches] )
keyword[del] identifier[B_buses]
keyword[del] identifier[Bp_branches]
keyword[return] - identifier[Yp] . identifier[imag] ,- identifier[Ypp] . identifier[imag] | def makeB(self, buses=None, branches=None, method='XB'):
""" Based on makeB.m from MATPOWER by Ray Zimmerman, developed at
PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more
information.
@param method: Specify "XB" or "BX" method.
@type method: string
@rtype: tuple
@return: Two matrices, B prime and B double prime, used in the fast
decoupled power flow solver.
"""
buses = self.connected_buses if buses is None else buses
branches = self.online_branches if branches is None else branches
B_buses = copy.deepcopy(buses) # modify bus copies
Bp_branches = copy.deepcopy(branches) # modify branch copies
Bpp_branches = copy.deepcopy(branches)
for bus in B_buses:
bus.b_shunt = 0.0 # depends on [control=['for'], data=['bus']]
for branch in Bp_branches:
branch.b = 0.0
branch.ratio = 1.0
if method == 'XB':
branch.r = 0.0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['branch']]
(Yp, _, _) = self.getYbus(B_buses, Bp_branches)
for branch in Bpp_branches:
branch.phase_shift = 0.0
if method == 'BX':
branch.r = 0.0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['branch']]
(Ypp, _, _) = self.getYbus(B_buses, Bpp_branches)
del B_buses
del Bp_branches
return (-Yp.imag, -Ypp.imag) |
def list_tags(tags):
"""Print tags in dict so they allign with listing above."""
tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0))
tag_sec_spacer = ""
c = 1
ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"]
pad_col = {1: 38, 2: 49}
for k, v in tags_sorted:
# if k != "Name":
if k not in ignored_keys:
if c < 3:
padamt = pad_col[c]
sys.stdout.write(" {2}{0}:{3} {1}".
format(k, v, C_HEAD2, C_NORM).ljust(padamt))
c += 1
tag_sec_spacer = "\n"
else:
sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2,
C_NORM))
c = 1
tag_sec_spacer = ""
print(tag_sec_spacer) | def function[list_tags, parameter[tags]]:
constant[Print tags in dict so they allign with listing above.]
variable[tags_sorted] assign[=] call[name[sorted], parameter[call[name[list], parameter[call[name[tags].items, parameter[]]]]]]
variable[tag_sec_spacer] assign[=] constant[]
variable[c] assign[=] constant[1]
variable[ignored_keys] assign[=] list[[<ast.Constant object at 0x7da1b0a70400>, <ast.Constant object at 0x7da1b0a72080>]]
variable[pad_col] assign[=] dictionary[[<ast.Constant object at 0x7da1b0af1360>, <ast.Constant object at 0x7da1b0af05b0>], [<ast.Constant object at 0x7da1b0af0dc0>, <ast.Constant object at 0x7da1b0af0a90>]]
for taget[tuple[[<ast.Name object at 0x7da1b0af1060>, <ast.Name object at 0x7da1b0af1870>]]] in starred[name[tags_sorted]] begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[ignored_keys]] begin[:]
if compare[name[c] less[<] constant[3]] begin[:]
variable[padamt] assign[=] call[name[pad_col]][name[c]]
call[name[sys].stdout.write, parameter[call[call[constant[ {2}{0}:{3} {1}].format, parameter[name[k], name[v], name[C_HEAD2], name[C_NORM]]].ljust, parameter[name[padamt]]]]]
<ast.AugAssign object at 0x7da1b0af0b20>
variable[tag_sec_spacer] assign[=] constant[
]
call[name[print], parameter[name[tag_sec_spacer]]] | keyword[def] identifier[list_tags] ( identifier[tags] ):
literal[string]
identifier[tags_sorted] = identifier[sorted] ( identifier[list] ( identifier[tags] . identifier[items] ()), identifier[key] = identifier[operator] . identifier[itemgetter] ( literal[int] ))
identifier[tag_sec_spacer] = literal[string]
identifier[c] = literal[int]
identifier[ignored_keys] =[ literal[string] , literal[string] ]
identifier[pad_col] ={ literal[int] : literal[int] , literal[int] : literal[int] }
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[tags_sorted] :
keyword[if] identifier[k] keyword[not] keyword[in] identifier[ignored_keys] :
keyword[if] identifier[c] < literal[int] :
identifier[padamt] = identifier[pad_col] [ identifier[c] ]
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] .
identifier[format] ( identifier[k] , identifier[v] , identifier[C_HEAD2] , identifier[C_NORM] ). identifier[ljust] ( identifier[padamt] ))
identifier[c] += literal[int]
identifier[tag_sec_spacer] = literal[string]
keyword[else] :
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] . identifier[format] ( identifier[k] , identifier[v] , identifier[C_HEAD2] ,
identifier[C_NORM] ))
identifier[c] = literal[int]
identifier[tag_sec_spacer] = literal[string]
identifier[print] ( identifier[tag_sec_spacer] ) | def list_tags(tags):
"""Print tags in dict so they allign with listing above."""
tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0))
tag_sec_spacer = ''
c = 1
ignored_keys = ['Name', 'aws:ec2spot:fleet-request-id']
pad_col = {1: 38, 2: 49}
for (k, v) in tags_sorted:
# if k != "Name":
if k not in ignored_keys:
if c < 3:
padamt = pad_col[c]
sys.stdout.write(' {2}{0}:{3} {1}'.format(k, v, C_HEAD2, C_NORM).ljust(padamt))
c += 1
tag_sec_spacer = '\n' # depends on [control=['if'], data=['c']]
else:
sys.stdout.write('{2}{0}:{3} {1}\n'.format(k, v, C_HEAD2, C_NORM))
c = 1
tag_sec_spacer = '' # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=[]]
print(tag_sec_spacer) |
def to_JSON(self):
"""Dumps object fields into a JSON formatted string
:returns: the JSON string
"""
last = None
if self._last_weather:
last = self._last_weather.to_JSON()
return json.dumps({'name': self._name,
'station_ID': self._station_ID,
'station_type': self._station_type,
'status': self._status,
'lat': self._lat,
'lon': self._lon,
'distance': self._distance,
'weather': json.loads(last),
}) | def function[to_JSON, parameter[self]]:
constant[Dumps object fields into a JSON formatted string
:returns: the JSON string
]
variable[last] assign[=] constant[None]
if name[self]._last_weather begin[:]
variable[last] assign[=] call[name[self]._last_weather.to_JSON, parameter[]]
return[call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da18f721f60>, <ast.Constant object at 0x7da18f720340>, <ast.Constant object at 0x7da18f7228c0>, <ast.Constant object at 0x7da18f723220>, <ast.Constant object at 0x7da18f7201f0>, <ast.Constant object at 0x7da18f7230a0>, <ast.Constant object at 0x7da18f722500>, <ast.Constant object at 0x7da2054a6e90>], [<ast.Attribute object at 0x7da2054a5240>, <ast.Attribute object at 0x7da2054a42b0>, <ast.Attribute object at 0x7da2054a7be0>, <ast.Attribute object at 0x7da2054a5ed0>, <ast.Attribute object at 0x7da2054a5780>, <ast.Attribute object at 0x7da2054a4760>, <ast.Attribute object at 0x7da2054a4e80>, <ast.Call object at 0x7da2054a7a60>]]]]] | keyword[def] identifier[to_JSON] ( identifier[self] ):
literal[string]
identifier[last] = keyword[None]
keyword[if] identifier[self] . identifier[_last_weather] :
identifier[last] = identifier[self] . identifier[_last_weather] . identifier[to_JSON] ()
keyword[return] identifier[json] . identifier[dumps] ({ literal[string] : identifier[self] . identifier[_name] ,
literal[string] : identifier[self] . identifier[_station_ID] ,
literal[string] : identifier[self] . identifier[_station_type] ,
literal[string] : identifier[self] . identifier[_status] ,
literal[string] : identifier[self] . identifier[_lat] ,
literal[string] : identifier[self] . identifier[_lon] ,
literal[string] : identifier[self] . identifier[_distance] ,
literal[string] : identifier[json] . identifier[loads] ( identifier[last] ),
}) | def to_JSON(self):
"""Dumps object fields into a JSON formatted string
:returns: the JSON string
"""
last = None
if self._last_weather:
last = self._last_weather.to_JSON() # depends on [control=['if'], data=[]]
return json.dumps({'name': self._name, 'station_ID': self._station_ID, 'station_type': self._station_type, 'status': self._status, 'lat': self._lat, 'lon': self._lon, 'distance': self._distance, 'weather': json.loads(last)}) |
def update_translations(ctx, module_list):
""" Update translations from module list
:param module_list: a list of modules
"""
modules.update_translations(ctx, module_list)
ctx.log_line(u'Deprecated: use anthem.lyrics.modules.update_translations'
'instead of anthem.lyrics.loaders.update_translations') | def function[update_translations, parameter[ctx, module_list]]:
constant[ Update translations from module list
:param module_list: a list of modules
]
call[name[modules].update_translations, parameter[name[ctx], name[module_list]]]
call[name[ctx].log_line, parameter[constant[Deprecated: use anthem.lyrics.modules.update_translationsinstead of anthem.lyrics.loaders.update_translations]]] | keyword[def] identifier[update_translations] ( identifier[ctx] , identifier[module_list] ):
literal[string]
identifier[modules] . identifier[update_translations] ( identifier[ctx] , identifier[module_list] )
identifier[ctx] . identifier[log_line] ( literal[string]
literal[string] ) | def update_translations(ctx, module_list):
""" Update translations from module list
:param module_list: a list of modules
"""
modules.update_translations(ctx, module_list)
ctx.log_line(u'Deprecated: use anthem.lyrics.modules.update_translationsinstead of anthem.lyrics.loaders.update_translations') |
def _update_database_helper_table(
self):
"""*Update the sherlock catalogues database helper table with the time-stamp of when this catlogue was last updated*
**Usage:**
.. code-block:: python
self._update_database_helper_table()
"""
self.log.debug('starting the ``_update_database_helper_table`` method')
tableName = self.dbTableName
sqlQuery = u"""
update tcs_helper_catalogue_tables_info set last_updated = now() where table_name = "%(tableName)s";
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug(
'completed the ``_update_database_helper_table`` method')
return None | def function[_update_database_helper_table, parameter[self]]:
constant[*Update the sherlock catalogues database helper table with the time-stamp of when this catlogue was last updated*
**Usage:**
.. code-block:: python
self._update_database_helper_table()
]
call[name[self].log.debug, parameter[constant[starting the ``_update_database_helper_table`` method]]]
variable[tableName] assign[=] name[self].dbTableName
variable[sqlQuery] assign[=] binary_operation[constant[
update tcs_helper_catalogue_tables_info set last_updated = now() where table_name = "%(tableName)s";
] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]
call[name[writequery], parameter[]]
call[name[self].log.debug, parameter[constant[completed the ``_update_database_helper_table`` method]]]
return[constant[None]] | keyword[def] identifier[_update_database_helper_table] (
identifier[self] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
identifier[tableName] = identifier[self] . identifier[dbTableName]
identifier[sqlQuery] = literal[string] % identifier[locals] ()
identifier[writequery] (
identifier[log] = identifier[self] . identifier[log] ,
identifier[sqlQuery] = identifier[sqlQuery] ,
identifier[dbConn] = identifier[self] . identifier[cataloguesDbConn] ,
)
identifier[self] . identifier[log] . identifier[debug] (
literal[string] )
keyword[return] keyword[None] | def _update_database_helper_table(self):
"""*Update the sherlock catalogues database helper table with the time-stamp of when this catlogue was last updated*
**Usage:**
.. code-block:: python
self._update_database_helper_table()
"""
self.log.debug('starting the ``_update_database_helper_table`` method')
tableName = self.dbTableName
sqlQuery = u'\n update tcs_helper_catalogue_tables_info set last_updated = now() where table_name = "%(tableName)s";\n ' % locals()
writequery(log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn)
self.log.debug('completed the ``_update_database_helper_table`` method')
return None |
def get_content_type(
self,
bucket: str,
key: str
) -> str:
"""
Retrieves the content-type for a given object in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which content-type is being retrieved.
:return: the content-type
"""
blob_obj = self._get_blob_obj(bucket, key)
return blob_obj.content_type | def function[get_content_type, parameter[self, bucket, key]]:
constant[
Retrieves the content-type for a given object in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which content-type is being retrieved.
:return: the content-type
]
variable[blob_obj] assign[=] call[name[self]._get_blob_obj, parameter[name[bucket], name[key]]]
return[name[blob_obj].content_type] | keyword[def] identifier[get_content_type] (
identifier[self] ,
identifier[bucket] : identifier[str] ,
identifier[key] : identifier[str]
)-> identifier[str] :
literal[string]
identifier[blob_obj] = identifier[self] . identifier[_get_blob_obj] ( identifier[bucket] , identifier[key] )
keyword[return] identifier[blob_obj] . identifier[content_type] | def get_content_type(self, bucket: str, key: str) -> str:
"""
Retrieves the content-type for a given object in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which content-type is being retrieved.
:return: the content-type
"""
blob_obj = self._get_blob_obj(bucket, key)
return blob_obj.content_type |
def pretty_printer_factory(p_todolist, p_additional_filters=None):
""" Returns a pretty printer suitable for the ls and dep subcommands. """
p_additional_filters = p_additional_filters or []
printer = PrettyPrinter()
printer.add_filter(PrettyPrinterNumbers(p_todolist))
for ppf in p_additional_filters:
printer.add_filter(ppf)
# apply colors at the last step, the ANSI codes may confuse the
# preceding filters.
printer.add_filter(PrettyPrinterColorFilter())
return printer | def function[pretty_printer_factory, parameter[p_todolist, p_additional_filters]]:
constant[ Returns a pretty printer suitable for the ls and dep subcommands. ]
variable[p_additional_filters] assign[=] <ast.BoolOp object at 0x7da18ede4070>
variable[printer] assign[=] call[name[PrettyPrinter], parameter[]]
call[name[printer].add_filter, parameter[call[name[PrettyPrinterNumbers], parameter[name[p_todolist]]]]]
for taget[name[ppf]] in starred[name[p_additional_filters]] begin[:]
call[name[printer].add_filter, parameter[name[ppf]]]
call[name[printer].add_filter, parameter[call[name[PrettyPrinterColorFilter], parameter[]]]]
return[name[printer]] | keyword[def] identifier[pretty_printer_factory] ( identifier[p_todolist] , identifier[p_additional_filters] = keyword[None] ):
literal[string]
identifier[p_additional_filters] = identifier[p_additional_filters] keyword[or] []
identifier[printer] = identifier[PrettyPrinter] ()
identifier[printer] . identifier[add_filter] ( identifier[PrettyPrinterNumbers] ( identifier[p_todolist] ))
keyword[for] identifier[ppf] keyword[in] identifier[p_additional_filters] :
identifier[printer] . identifier[add_filter] ( identifier[ppf] )
identifier[printer] . identifier[add_filter] ( identifier[PrettyPrinterColorFilter] ())
keyword[return] identifier[printer] | def pretty_printer_factory(p_todolist, p_additional_filters=None):
""" Returns a pretty printer suitable for the ls and dep subcommands. """
p_additional_filters = p_additional_filters or []
printer = PrettyPrinter()
printer.add_filter(PrettyPrinterNumbers(p_todolist))
for ppf in p_additional_filters:
printer.add_filter(ppf) # depends on [control=['for'], data=['ppf']]
# apply colors at the last step, the ANSI codes may confuse the
# preceding filters.
printer.add_filter(PrettyPrinterColorFilter())
return printer |
def disable_vxlan_feature(self, nexus_host):
"""Disable VXLAN on the switch."""
# Removing the "feature nv overlay" configuration also
# removes the "interface nve" configuration.
starttime = time.time()
# Do CLI 'no feature nv overlay'
self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE,
(snipp.BODY_VXLAN_STATE % "disabled"))
# Do CLI 'no feature vn-segment-vlan-based'
self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE,
(snipp.BODY_VNSEG_STATE % "disabled"))
self.capture_and_print_timeshot(
starttime, "disable_vxlan",
switch=nexus_host) | def function[disable_vxlan_feature, parameter[self, nexus_host]]:
constant[Disable VXLAN on the switch.]
variable[starttime] assign[=] call[name[time].time, parameter[]]
call[name[self].send_edit_string, parameter[name[nexus_host], name[snipp].PATH_VXLAN_STATE, binary_operation[name[snipp].BODY_VXLAN_STATE <ast.Mod object at 0x7da2590d6920> constant[disabled]]]]
call[name[self].send_edit_string, parameter[name[nexus_host], name[snipp].PATH_VNSEG_STATE, binary_operation[name[snipp].BODY_VNSEG_STATE <ast.Mod object at 0x7da2590d6920> constant[disabled]]]]
call[name[self].capture_and_print_timeshot, parameter[name[starttime], constant[disable_vxlan]]] | keyword[def] identifier[disable_vxlan_feature] ( identifier[self] , identifier[nexus_host] ):
literal[string]
identifier[starttime] = identifier[time] . identifier[time] ()
identifier[self] . identifier[send_edit_string] ( identifier[nexus_host] , identifier[snipp] . identifier[PATH_VXLAN_STATE] ,
( identifier[snipp] . identifier[BODY_VXLAN_STATE] % literal[string] ))
identifier[self] . identifier[send_edit_string] ( identifier[nexus_host] , identifier[snipp] . identifier[PATH_VNSEG_STATE] ,
( identifier[snipp] . identifier[BODY_VNSEG_STATE] % literal[string] ))
identifier[self] . identifier[capture_and_print_timeshot] (
identifier[starttime] , literal[string] ,
identifier[switch] = identifier[nexus_host] ) | def disable_vxlan_feature(self, nexus_host):
"""Disable VXLAN on the switch."""
# Removing the "feature nv overlay" configuration also
# removes the "interface nve" configuration.
starttime = time.time()
# Do CLI 'no feature nv overlay'
self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE, snipp.BODY_VXLAN_STATE % 'disabled')
# Do CLI 'no feature vn-segment-vlan-based'
self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE, snipp.BODY_VNSEG_STATE % 'disabled')
self.capture_and_print_timeshot(starttime, 'disable_vxlan', switch=nexus_host) |
def generate(self):
"""
Generate a diff report from the reports specified.
:return: True/False : return status of whether the diff report generation succeeded.
"""
if (self.discover(CONSTANTS.STATS_CSV_LIST_FILE) and self.discover(CONSTANTS.PLOTS_CSV_LIST_FILE) and self.discover(CONSTANTS.CDF_PLOTS_CSV_LIST_FILE) and
self.collect() and self.collect_datasources() and self.collect_cdf_datasources()):
for stats in self.reports[0].stats:
metric_label = stats.replace('.stats.csv', '')
stats_0 = os.path.join(self.reports[0].local_location, stats)
stats_1 = os.path.join(self.reports[1].local_location, stats)
report0_stats = {}
report1_stats = {}
if naarad.utils.is_valid_file(stats_0) and naarad.utils.is_valid_file(stats_1):
report0 = csv.DictReader(open(stats_0))
for row in report0:
report0_stats[row[CONSTANTS.SUBMETRIC_HEADER]] = row
report0_stats['__headers__'] = report0._fieldnames
report1 = csv.DictReader(open(stats_1))
for row in report1:
report1_stats[row[CONSTANTS.SUBMETRIC_HEADER]] = row
report1_stats['__headers__'] = report1._fieldnames
common_stats = sorted(set(report0_stats['__headers__']) & set(report1_stats['__headers__']))
common_submetrics = sorted(set(report0_stats.keys()) & set(report1_stats.keys()))
for submetric in common_submetrics:
if submetric != '__headers__':
for stat in common_stats:
if stat != CONSTANTS.SUBMETRIC_HEADER:
diff_metric = reduce(defaultdict.__getitem__, [stats.split('.')[0], submetric, stat], self.diff_data)
diff_metric[0] = float(report0_stats[submetric][stat])
diff_metric[1] = float(report1_stats[submetric][stat])
diff_metric['absolute_diff'] = naarad.utils.normalize_float_for_display(diff_metric[1] - diff_metric[0])
if diff_metric[0] == 0:
if diff_metric['absolute_diff'] == '0.0':
diff_metric['percent_diff'] = 0.0
else:
diff_metric['percent_diff'] = 'N/A'
else:
diff_metric['percent_diff'] = naarad.utils.normalize_float_for_display((diff_metric[1] - diff_metric[0]) * 100 / diff_metric[0])
# check whether there is a SLA failure
if ((metric_label in self.sla_map.keys()) and (submetric in self.sla_map[metric_label].keys()) and
(stat in self.sla_map[metric_label][submetric].keys())):
self.check_sla(self.sla_map[metric_label][submetric][stat], diff_metric)
else:
return False
self.plot_diff()
diff_html = ''
if self.diff_data:
diff_html = self.generate_diff_html()
client_html = self.generate_client_charting_page(self.reports[0].datasource)
if diff_html != '':
with open(os.path.join(self.output_directory, CONSTANTS.DIFF_REPORT_FILE), 'w') as diff_file:
diff_file.write(diff_html)
with open(os.path.join(self.output_directory, CONSTANTS.CLIENT_CHARTING_FILE), 'w') as client_file:
client_file.write(client_html)
return True | def function[generate, parameter[self]]:
constant[
Generate a diff report from the reports specified.
:return: True/False : return status of whether the diff report generation succeeded.
]
if <ast.BoolOp object at 0x7da1b00d8ca0> begin[:]
for taget[name[stats]] in starred[call[name[self].reports][constant[0]].stats] begin[:]
variable[metric_label] assign[=] call[name[stats].replace, parameter[constant[.stats.csv], constant[]]]
variable[stats_0] assign[=] call[name[os].path.join, parameter[call[name[self].reports][constant[0]].local_location, name[stats]]]
variable[stats_1] assign[=] call[name[os].path.join, parameter[call[name[self].reports][constant[1]].local_location, name[stats]]]
variable[report0_stats] assign[=] dictionary[[], []]
variable[report1_stats] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b00f9780> begin[:]
variable[report0] assign[=] call[name[csv].DictReader, parameter[call[name[open], parameter[name[stats_0]]]]]
for taget[name[row]] in starred[name[report0]] begin[:]
call[name[report0_stats]][call[name[row]][name[CONSTANTS].SUBMETRIC_HEADER]] assign[=] name[row]
call[name[report0_stats]][constant[__headers__]] assign[=] name[report0]._fieldnames
variable[report1] assign[=] call[name[csv].DictReader, parameter[call[name[open], parameter[name[stats_1]]]]]
for taget[name[row]] in starred[name[report1]] begin[:]
call[name[report1_stats]][call[name[row]][name[CONSTANTS].SUBMETRIC_HEADER]] assign[=] name[row]
call[name[report1_stats]][constant[__headers__]] assign[=] name[report1]._fieldnames
variable[common_stats] assign[=] call[name[sorted], parameter[binary_operation[call[name[set], parameter[call[name[report0_stats]][constant[__headers__]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[call[name[report1_stats]][constant[__headers__]]]]]]]
variable[common_submetrics] assign[=] call[name[sorted], parameter[binary_operation[call[name[set], parameter[call[name[report0_stats].keys, parameter[]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[call[name[report1_stats].keys, parameter[]]]]]]]
for taget[name[submetric]] in starred[name[common_submetrics]] begin[:]
if compare[name[submetric] not_equal[!=] constant[__headers__]] begin[:]
for taget[name[stat]] in starred[name[common_stats]] begin[:]
if compare[name[stat] not_equal[!=] name[CONSTANTS].SUBMETRIC_HEADER] begin[:]
variable[diff_metric] assign[=] call[name[reduce], parameter[name[defaultdict].__getitem__, list[[<ast.Subscript object at 0x7da1b00f8a00>, <ast.Name object at 0x7da1b00dba90>, <ast.Name object at 0x7da1b00d9c30>]], name[self].diff_data]]
call[name[diff_metric]][constant[0]] assign[=] call[name[float], parameter[call[call[name[report0_stats]][name[submetric]]][name[stat]]]]
call[name[diff_metric]][constant[1]] assign[=] call[name[float], parameter[call[call[name[report1_stats]][name[submetric]]][name[stat]]]]
call[name[diff_metric]][constant[absolute_diff]] assign[=] call[name[naarad].utils.normalize_float_for_display, parameter[binary_operation[call[name[diff_metric]][constant[1]] - call[name[diff_metric]][constant[0]]]]]
if compare[call[name[diff_metric]][constant[0]] equal[==] constant[0]] begin[:]
if compare[call[name[diff_metric]][constant[absolute_diff]] equal[==] constant[0.0]] begin[:]
call[name[diff_metric]][constant[percent_diff]] assign[=] constant[0.0]
if <ast.BoolOp object at 0x7da1aff76d70> begin[:]
call[name[self].check_sla, parameter[call[call[call[name[self].sla_map][name[metric_label]]][name[submetric]]][name[stat]], name[diff_metric]]]
call[name[self].plot_diff, parameter[]]
variable[diff_html] assign[=] constant[]
if name[self].diff_data begin[:]
variable[diff_html] assign[=] call[name[self].generate_diff_html, parameter[]]
variable[client_html] assign[=] call[name[self].generate_client_charting_page, parameter[call[name[self].reports][constant[0]].datasource]]
if compare[name[diff_html] not_equal[!=] constant[]] begin[:]
with call[name[open], parameter[call[name[os].path.join, parameter[name[self].output_directory, name[CONSTANTS].DIFF_REPORT_FILE]], constant[w]]] begin[:]
call[name[diff_file].write, parameter[name[diff_html]]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[self].output_directory, name[CONSTANTS].CLIENT_CHARTING_FILE]], constant[w]]] begin[:]
call[name[client_file].write, parameter[name[client_html]]]
return[constant[True]] | keyword[def] identifier[generate] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[discover] ( identifier[CONSTANTS] . identifier[STATS_CSV_LIST_FILE] ) keyword[and] identifier[self] . identifier[discover] ( identifier[CONSTANTS] . identifier[PLOTS_CSV_LIST_FILE] ) keyword[and] identifier[self] . identifier[discover] ( identifier[CONSTANTS] . identifier[CDF_PLOTS_CSV_LIST_FILE] ) keyword[and]
identifier[self] . identifier[collect] () keyword[and] identifier[self] . identifier[collect_datasources] () keyword[and] identifier[self] . identifier[collect_cdf_datasources] ()):
keyword[for] identifier[stats] keyword[in] identifier[self] . identifier[reports] [ literal[int] ]. identifier[stats] :
identifier[metric_label] = identifier[stats] . identifier[replace] ( literal[string] , literal[string] )
identifier[stats_0] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[reports] [ literal[int] ]. identifier[local_location] , identifier[stats] )
identifier[stats_1] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[reports] [ literal[int] ]. identifier[local_location] , identifier[stats] )
identifier[report0_stats] ={}
identifier[report1_stats] ={}
keyword[if] identifier[naarad] . identifier[utils] . identifier[is_valid_file] ( identifier[stats_0] ) keyword[and] identifier[naarad] . identifier[utils] . identifier[is_valid_file] ( identifier[stats_1] ):
identifier[report0] = identifier[csv] . identifier[DictReader] ( identifier[open] ( identifier[stats_0] ))
keyword[for] identifier[row] keyword[in] identifier[report0] :
identifier[report0_stats] [ identifier[row] [ identifier[CONSTANTS] . identifier[SUBMETRIC_HEADER] ]]= identifier[row]
identifier[report0_stats] [ literal[string] ]= identifier[report0] . identifier[_fieldnames]
identifier[report1] = identifier[csv] . identifier[DictReader] ( identifier[open] ( identifier[stats_1] ))
keyword[for] identifier[row] keyword[in] identifier[report1] :
identifier[report1_stats] [ identifier[row] [ identifier[CONSTANTS] . identifier[SUBMETRIC_HEADER] ]]= identifier[row]
identifier[report1_stats] [ literal[string] ]= identifier[report1] . identifier[_fieldnames]
identifier[common_stats] = identifier[sorted] ( identifier[set] ( identifier[report0_stats] [ literal[string] ])& identifier[set] ( identifier[report1_stats] [ literal[string] ]))
identifier[common_submetrics] = identifier[sorted] ( identifier[set] ( identifier[report0_stats] . identifier[keys] ())& identifier[set] ( identifier[report1_stats] . identifier[keys] ()))
keyword[for] identifier[submetric] keyword[in] identifier[common_submetrics] :
keyword[if] identifier[submetric] != literal[string] :
keyword[for] identifier[stat] keyword[in] identifier[common_stats] :
keyword[if] identifier[stat] != identifier[CONSTANTS] . identifier[SUBMETRIC_HEADER] :
identifier[diff_metric] = identifier[reduce] ( identifier[defaultdict] . identifier[__getitem__] ,[ identifier[stats] . identifier[split] ( literal[string] )[ literal[int] ], identifier[submetric] , identifier[stat] ], identifier[self] . identifier[diff_data] )
identifier[diff_metric] [ literal[int] ]= identifier[float] ( identifier[report0_stats] [ identifier[submetric] ][ identifier[stat] ])
identifier[diff_metric] [ literal[int] ]= identifier[float] ( identifier[report1_stats] [ identifier[submetric] ][ identifier[stat] ])
identifier[diff_metric] [ literal[string] ]= identifier[naarad] . identifier[utils] . identifier[normalize_float_for_display] ( identifier[diff_metric] [ literal[int] ]- identifier[diff_metric] [ literal[int] ])
keyword[if] identifier[diff_metric] [ literal[int] ]== literal[int] :
keyword[if] identifier[diff_metric] [ literal[string] ]== literal[string] :
identifier[diff_metric] [ literal[string] ]= literal[int]
keyword[else] :
identifier[diff_metric] [ literal[string] ]= literal[string]
keyword[else] :
identifier[diff_metric] [ literal[string] ]= identifier[naarad] . identifier[utils] . identifier[normalize_float_for_display] (( identifier[diff_metric] [ literal[int] ]- identifier[diff_metric] [ literal[int] ])* literal[int] / identifier[diff_metric] [ literal[int] ])
keyword[if] (( identifier[metric_label] keyword[in] identifier[self] . identifier[sla_map] . identifier[keys] ()) keyword[and] ( identifier[submetric] keyword[in] identifier[self] . identifier[sla_map] [ identifier[metric_label] ]. identifier[keys] ()) keyword[and]
( identifier[stat] keyword[in] identifier[self] . identifier[sla_map] [ identifier[metric_label] ][ identifier[submetric] ]. identifier[keys] ())):
identifier[self] . identifier[check_sla] ( identifier[self] . identifier[sla_map] [ identifier[metric_label] ][ identifier[submetric] ][ identifier[stat] ], identifier[diff_metric] )
keyword[else] :
keyword[return] keyword[False]
identifier[self] . identifier[plot_diff] ()
identifier[diff_html] = literal[string]
keyword[if] identifier[self] . identifier[diff_data] :
identifier[diff_html] = identifier[self] . identifier[generate_diff_html] ()
identifier[client_html] = identifier[self] . identifier[generate_client_charting_page] ( identifier[self] . identifier[reports] [ literal[int] ]. identifier[datasource] )
keyword[if] identifier[diff_html] != literal[string] :
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[output_directory] , identifier[CONSTANTS] . identifier[DIFF_REPORT_FILE] ), literal[string] ) keyword[as] identifier[diff_file] :
identifier[diff_file] . identifier[write] ( identifier[diff_html] )
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[output_directory] , identifier[CONSTANTS] . identifier[CLIENT_CHARTING_FILE] ), literal[string] ) keyword[as] identifier[client_file] :
identifier[client_file] . identifier[write] ( identifier[client_html] )
keyword[return] keyword[True] | def generate(self):
"""
Generate a diff report from the reports specified.
:return: True/False : return status of whether the diff report generation succeeded.
"""
if self.discover(CONSTANTS.STATS_CSV_LIST_FILE) and self.discover(CONSTANTS.PLOTS_CSV_LIST_FILE) and self.discover(CONSTANTS.CDF_PLOTS_CSV_LIST_FILE) and self.collect() and self.collect_datasources() and self.collect_cdf_datasources():
for stats in self.reports[0].stats:
metric_label = stats.replace('.stats.csv', '')
stats_0 = os.path.join(self.reports[0].local_location, stats)
stats_1 = os.path.join(self.reports[1].local_location, stats)
report0_stats = {}
report1_stats = {}
if naarad.utils.is_valid_file(stats_0) and naarad.utils.is_valid_file(stats_1):
report0 = csv.DictReader(open(stats_0))
for row in report0:
report0_stats[row[CONSTANTS.SUBMETRIC_HEADER]] = row # depends on [control=['for'], data=['row']]
report0_stats['__headers__'] = report0._fieldnames
report1 = csv.DictReader(open(stats_1))
for row in report1:
report1_stats[row[CONSTANTS.SUBMETRIC_HEADER]] = row # depends on [control=['for'], data=['row']]
report1_stats['__headers__'] = report1._fieldnames
common_stats = sorted(set(report0_stats['__headers__']) & set(report1_stats['__headers__']))
common_submetrics = sorted(set(report0_stats.keys()) & set(report1_stats.keys()))
for submetric in common_submetrics:
if submetric != '__headers__':
for stat in common_stats:
if stat != CONSTANTS.SUBMETRIC_HEADER:
diff_metric = reduce(defaultdict.__getitem__, [stats.split('.')[0], submetric, stat], self.diff_data)
diff_metric[0] = float(report0_stats[submetric][stat])
diff_metric[1] = float(report1_stats[submetric][stat])
diff_metric['absolute_diff'] = naarad.utils.normalize_float_for_display(diff_metric[1] - diff_metric[0])
if diff_metric[0] == 0:
if diff_metric['absolute_diff'] == '0.0':
diff_metric['percent_diff'] = 0.0 # depends on [control=['if'], data=[]]
else:
diff_metric['percent_diff'] = 'N/A' # depends on [control=['if'], data=[]]
else:
diff_metric['percent_diff'] = naarad.utils.normalize_float_for_display((diff_metric[1] - diff_metric[0]) * 100 / diff_metric[0])
# check whether there is a SLA failure
if metric_label in self.sla_map.keys() and submetric in self.sla_map[metric_label].keys() and (stat in self.sla_map[metric_label][submetric].keys()):
self.check_sla(self.sla_map[metric_label][submetric][stat], diff_metric) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['stat']] # depends on [control=['for'], data=['stat']] # depends on [control=['if'], data=['submetric']] # depends on [control=['for'], data=['submetric']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['stats']] # depends on [control=['if'], data=[]]
else:
return False
self.plot_diff()
diff_html = ''
if self.diff_data:
diff_html = self.generate_diff_html()
client_html = self.generate_client_charting_page(self.reports[0].datasource) # depends on [control=['if'], data=[]]
if diff_html != '':
with open(os.path.join(self.output_directory, CONSTANTS.DIFF_REPORT_FILE), 'w') as diff_file:
diff_file.write(diff_html) # depends on [control=['with'], data=['diff_file']]
with open(os.path.join(self.output_directory, CONSTANTS.CLIENT_CHARTING_FILE), 'w') as client_file:
client_file.write(client_html) # depends on [control=['with'], data=['client_file']] # depends on [control=['if'], data=['diff_html']]
return True |
def _build_predict(self, Xnew, full_cov=False):
"""
The posterior variance of F is given by
q(f) = N(f | K alpha + mean, [K^-1 + diag(lambda**2)]^-1)
Here we project this to F*, the values of the GP at Xnew which is given
by
q(F*) = N ( F* | K_{*F} alpha + mean, K_{**} - K_{*f}[K_{ff} +
diag(lambda**-2)]^-1 K_{f*} )
"""
# compute kernel things
Kx = self.kern.K(self.X, Xnew)
K = self.kern.K(self.X)
# predictive mean
f_mean = tf.matmul(Kx, self.q_alpha, transpose_a=True) + self.mean_function(Xnew)
# predictive var
A = K + tf.matrix_diag(tf.transpose(1. / tf.square(self.q_lambda)))
L = tf.cholesky(A)
Kx_tiled = tf.tile(tf.expand_dims(Kx, 0), [self.num_latent, 1, 1])
LiKx = tf.matrix_triangular_solve(L, Kx_tiled)
if full_cov:
f_var = self.kern.K(Xnew) - tf.matmul(LiKx, LiKx, transpose_a=True)
else:
f_var = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(LiKx), 1)
return f_mean, tf.transpose(f_var) | def function[_build_predict, parameter[self, Xnew, full_cov]]:
constant[
The posterior variance of F is given by
q(f) = N(f | K alpha + mean, [K^-1 + diag(lambda**2)]^-1)
Here we project this to F*, the values of the GP at Xnew which is given
by
q(F*) = N ( F* | K_{*F} alpha + mean, K_{**} - K_{*f}[K_{ff} +
diag(lambda**-2)]^-1 K_{f*} )
]
variable[Kx] assign[=] call[name[self].kern.K, parameter[name[self].X, name[Xnew]]]
variable[K] assign[=] call[name[self].kern.K, parameter[name[self].X]]
variable[f_mean] assign[=] binary_operation[call[name[tf].matmul, parameter[name[Kx], name[self].q_alpha]] + call[name[self].mean_function, parameter[name[Xnew]]]]
variable[A] assign[=] binary_operation[name[K] + call[name[tf].matrix_diag, parameter[call[name[tf].transpose, parameter[binary_operation[constant[1.0] / call[name[tf].square, parameter[name[self].q_lambda]]]]]]]]
variable[L] assign[=] call[name[tf].cholesky, parameter[name[A]]]
variable[Kx_tiled] assign[=] call[name[tf].tile, parameter[call[name[tf].expand_dims, parameter[name[Kx], constant[0]]], list[[<ast.Attribute object at 0x7da1b1f48cd0>, <ast.Constant object at 0x7da1b1f493f0>, <ast.Constant object at 0x7da1b1f4ae60>]]]]
variable[LiKx] assign[=] call[name[tf].matrix_triangular_solve, parameter[name[L], name[Kx_tiled]]]
if name[full_cov] begin[:]
variable[f_var] assign[=] binary_operation[call[name[self].kern.K, parameter[name[Xnew]]] - call[name[tf].matmul, parameter[name[LiKx], name[LiKx]]]]
return[tuple[[<ast.Name object at 0x7da18dc9a320>, <ast.Call object at 0x7da18dc986d0>]]] | keyword[def] identifier[_build_predict] ( identifier[self] , identifier[Xnew] , identifier[full_cov] = keyword[False] ):
literal[string]
identifier[Kx] = identifier[self] . identifier[kern] . identifier[K] ( identifier[self] . identifier[X] , identifier[Xnew] )
identifier[K] = identifier[self] . identifier[kern] . identifier[K] ( identifier[self] . identifier[X] )
identifier[f_mean] = identifier[tf] . identifier[matmul] ( identifier[Kx] , identifier[self] . identifier[q_alpha] , identifier[transpose_a] = keyword[True] )+ identifier[self] . identifier[mean_function] ( identifier[Xnew] )
identifier[A] = identifier[K] + identifier[tf] . identifier[matrix_diag] ( identifier[tf] . identifier[transpose] ( literal[int] / identifier[tf] . identifier[square] ( identifier[self] . identifier[q_lambda] )))
identifier[L] = identifier[tf] . identifier[cholesky] ( identifier[A] )
identifier[Kx_tiled] = identifier[tf] . identifier[tile] ( identifier[tf] . identifier[expand_dims] ( identifier[Kx] , literal[int] ),[ identifier[self] . identifier[num_latent] , literal[int] , literal[int] ])
identifier[LiKx] = identifier[tf] . identifier[matrix_triangular_solve] ( identifier[L] , identifier[Kx_tiled] )
keyword[if] identifier[full_cov] :
identifier[f_var] = identifier[self] . identifier[kern] . identifier[K] ( identifier[Xnew] )- identifier[tf] . identifier[matmul] ( identifier[LiKx] , identifier[LiKx] , identifier[transpose_a] = keyword[True] )
keyword[else] :
identifier[f_var] = identifier[self] . identifier[kern] . identifier[Kdiag] ( identifier[Xnew] )- identifier[tf] . identifier[reduce_sum] ( identifier[tf] . identifier[square] ( identifier[LiKx] ), literal[int] )
keyword[return] identifier[f_mean] , identifier[tf] . identifier[transpose] ( identifier[f_var] ) | def _build_predict(self, Xnew, full_cov=False):
"""
The posterior variance of F is given by
q(f) = N(f | K alpha + mean, [K^-1 + diag(lambda**2)]^-1)
Here we project this to F*, the values of the GP at Xnew which is given
by
q(F*) = N ( F* | K_{*F} alpha + mean, K_{**} - K_{*f}[K_{ff} +
diag(lambda**-2)]^-1 K_{f*} )
"""
# compute kernel things
Kx = self.kern.K(self.X, Xnew)
K = self.kern.K(self.X)
# predictive mean
f_mean = tf.matmul(Kx, self.q_alpha, transpose_a=True) + self.mean_function(Xnew)
# predictive var
A = K + tf.matrix_diag(tf.transpose(1.0 / tf.square(self.q_lambda)))
L = tf.cholesky(A)
Kx_tiled = tf.tile(tf.expand_dims(Kx, 0), [self.num_latent, 1, 1])
LiKx = tf.matrix_triangular_solve(L, Kx_tiled)
if full_cov:
f_var = self.kern.K(Xnew) - tf.matmul(LiKx, LiKx, transpose_a=True) # depends on [control=['if'], data=[]]
else:
f_var = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(LiKx), 1)
return (f_mean, tf.transpose(f_var)) |
def _strip_placeholder_braces(p_matchobj):
"""
Returns string with conditional braces around placeholder stripped and
percent sign glued into placeholder character.
Returned string is composed from 'start', 'before', 'placeholder', 'after',
'whitespace', and 'end' match-groups of p_matchobj. Conditional braces are
stripped from 'before' and 'after' groups. 'whitespace', 'start', and 'end'
groups are preserved without any change.
Using this function as an 'repl' argument in re.sub it is possible to turn:
%{(}B{)}
into:
(%B)
"""
before = p_matchobj.group('before') or ''
placeholder = p_matchobj.group('placeholder')
after = p_matchobj.group('after') or ''
whitespace = p_matchobj.group('whitespace') or ''
return before + '%' + placeholder + after + whitespace | def function[_strip_placeholder_braces, parameter[p_matchobj]]:
constant[
Returns string with conditional braces around placeholder stripped and
percent sign glued into placeholder character.
Returned string is composed from 'start', 'before', 'placeholder', 'after',
'whitespace', and 'end' match-groups of p_matchobj. Conditional braces are
stripped from 'before' and 'after' groups. 'whitespace', 'start', and 'end'
groups are preserved without any change.
Using this function as an 'repl' argument in re.sub it is possible to turn:
%{(}B{)}
into:
(%B)
]
variable[before] assign[=] <ast.BoolOp object at 0x7da18eb55de0>
variable[placeholder] assign[=] call[name[p_matchobj].group, parameter[constant[placeholder]]]
variable[after] assign[=] <ast.BoolOp object at 0x7da18eb55120>
variable[whitespace] assign[=] <ast.BoolOp object at 0x7da18eb551b0>
return[binary_operation[binary_operation[binary_operation[binary_operation[name[before] + constant[%]] + name[placeholder]] + name[after]] + name[whitespace]]] | keyword[def] identifier[_strip_placeholder_braces] ( identifier[p_matchobj] ):
literal[string]
identifier[before] = identifier[p_matchobj] . identifier[group] ( literal[string] ) keyword[or] literal[string]
identifier[placeholder] = identifier[p_matchobj] . identifier[group] ( literal[string] )
identifier[after] = identifier[p_matchobj] . identifier[group] ( literal[string] ) keyword[or] literal[string]
identifier[whitespace] = identifier[p_matchobj] . identifier[group] ( literal[string] ) keyword[or] literal[string]
keyword[return] identifier[before] + literal[string] + identifier[placeholder] + identifier[after] + identifier[whitespace] | def _strip_placeholder_braces(p_matchobj):
"""
Returns string with conditional braces around placeholder stripped and
percent sign glued into placeholder character.
Returned string is composed from 'start', 'before', 'placeholder', 'after',
'whitespace', and 'end' match-groups of p_matchobj. Conditional braces are
stripped from 'before' and 'after' groups. 'whitespace', 'start', and 'end'
groups are preserved without any change.
Using this function as an 'repl' argument in re.sub it is possible to turn:
%{(}B{)}
into:
(%B)
"""
before = p_matchobj.group('before') or ''
placeholder = p_matchobj.group('placeholder')
after = p_matchobj.group('after') or ''
whitespace = p_matchobj.group('whitespace') or ''
return before + '%' + placeholder + after + whitespace |
def consent():
"""Return the consent form. Here for backwards-compatibility with 2.x."""
config = _config()
return render_template(
"consent.html",
hit_id=request.args["hit_id"],
assignment_id=request.args["assignment_id"],
worker_id=request.args["worker_id"],
mode=config.get("mode"),
) | def function[consent, parameter[]]:
constant[Return the consent form. Here for backwards-compatibility with 2.x.]
variable[config] assign[=] call[name[_config], parameter[]]
return[call[name[render_template], parameter[constant[consent.html]]]] | keyword[def] identifier[consent] ():
literal[string]
identifier[config] = identifier[_config] ()
keyword[return] identifier[render_template] (
literal[string] ,
identifier[hit_id] = identifier[request] . identifier[args] [ literal[string] ],
identifier[assignment_id] = identifier[request] . identifier[args] [ literal[string] ],
identifier[worker_id] = identifier[request] . identifier[args] [ literal[string] ],
identifier[mode] = identifier[config] . identifier[get] ( literal[string] ),
) | def consent():
"""Return the consent form. Here for backwards-compatibility with 2.x."""
config = _config()
return render_template('consent.html', hit_id=request.args['hit_id'], assignment_id=request.args['assignment_id'], worker_id=request.args['worker_id'], mode=config.get('mode')) |
def to_regex(regex, flags=0):
"""
Given a string, this function returns a new re.RegexObject.
Given a re.RegexObject, this function just returns the same object.
:type regex: string|re.RegexObject
:param regex: A regex or a re.RegexObject
:type flags: int
:param flags: See Python's re.compile().
:rtype: re.RegexObject
:return: The Python regex object.
"""
if regex is None:
raise TypeError('None can not be cast to re.RegexObject')
if hasattr(regex, 'match'):
return regex
return re.compile(regex, flags) | def function[to_regex, parameter[regex, flags]]:
constant[
Given a string, this function returns a new re.RegexObject.
Given a re.RegexObject, this function just returns the same object.
:type regex: string|re.RegexObject
:param regex: A regex or a re.RegexObject
:type flags: int
:param flags: See Python's re.compile().
:rtype: re.RegexObject
:return: The Python regex object.
]
if compare[name[regex] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b072e350>
if call[name[hasattr], parameter[name[regex], constant[match]]] begin[:]
return[name[regex]]
return[call[name[re].compile, parameter[name[regex], name[flags]]]] | keyword[def] identifier[to_regex] ( identifier[regex] , identifier[flags] = literal[int] ):
literal[string]
keyword[if] identifier[regex] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[hasattr] ( identifier[regex] , literal[string] ):
keyword[return] identifier[regex]
keyword[return] identifier[re] . identifier[compile] ( identifier[regex] , identifier[flags] ) | def to_regex(regex, flags=0):
"""
Given a string, this function returns a new re.RegexObject.
Given a re.RegexObject, this function just returns the same object.
:type regex: string|re.RegexObject
:param regex: A regex or a re.RegexObject
:type flags: int
:param flags: See Python's re.compile().
:rtype: re.RegexObject
:return: The Python regex object.
"""
if regex is None:
raise TypeError('None can not be cast to re.RegexObject') # depends on [control=['if'], data=[]]
if hasattr(regex, 'match'):
return regex # depends on [control=['if'], data=[]]
return re.compile(regex, flags) |
def img2ascii(img_path, ascii_path, ascii_char="*", pad=0):
"""Convert an image to ascii art text.
Suppose we have an image like that:
.. image:: images/rabbit.png
:align: left
Put some codes::
>>> from weatherlab.math.img2waveform import img2ascii
>>> img2ascii(r"testdata\img2waveform\rabbit.png",
... r"testdata\img2waveform\asciiart.txt", pad=0)
Then you will see this in asciiart.txt::
******
*** *** ****
** ** *********
** ** *** ***
** * ** **
** ** ** **
** * *** *
* ** ** **
** * ** **
** * ** *
* ** ** *
** ** * **
** * ** **
* * ** **
* ** * **
** ** ** **
** * ** **
** * * **
** * ** *
** * ** *
* ** ** *
* ** * *
* ** ** *
* ** ** *
** ** ** **
** * ** **
** * * **
** * * **
** * * **
* * ** **
* * ** *
** * ** *
** * ** *
** ** ** **
* ** ** **
* ** ** **
** ** ** *
** ** ** **
* ** ** **
** ** ** *
** ******* *
** ******* **
** **
** *
** **
*** *
**** ***
*** ***
** ****
** ***
** ***
** **
** **
* **
** **
** **
** **
** **
** **
** **
** **
* **
* **
** *
** **
* **
* **
** *
** *
** **
** **
** **
** **
** ** **
** *** *** **
* **** **** **
* *** **** **
** ** ** *
** *
** *
* **
** **
** *
* **
** **
** **
** *
** **
** **
** **
** *** ** **
** ****** ***
*** ****** **
*** * *** ***
*** ***
*** ***
**** ****
******** *******
*** ********** ******** ***
** *** ************ ********** *** * ***
** * **** *********************** *** ** ***
** * ** **** ** ******* * *** ***** ***
**** * * ***** ********** * **** * * ** **
*** * * ** * ******************************* * *** * **
** ***** * *** ********** ** ** ********** *** ** ***
** * ***** ** * ***** ** ** ***** * * ** * **
*** *** ************ ** ****** ** * * ** ** ** * ** ***
** ******* * * ** ** ** **** * ** * ** * **** **
** *** *** ******* ****** * ** * *** ***** *** ** ***** ** **
** * * ***** ************************************ * **** * **
*** ** ** *********************************************** *** ***
*** ** ****************************************** **** ** ** **
**** ** ** ******************************************** ** * **
** ****** ** ******************************************** ** * ***
** ***** *********************************************** ** ****
* *** ****************************** **************** *********
** ** *************************************** * * * ***** *
** ** ********************************************** *** *
* ** ** *********************************** ******* ** *
** ** ***************************************** *** ** *
*** ** * ********************************************** ** **
****** ************************************************ ** ***
**** *********************************************** ********
** *********************************************** ****
*** ** ******************************************* **
*** ** ***** ****** * * * * * ******** *** ** ** ***
*** * * **** **** **** * ** ** * *** ** ***
**** * * ** **** * *** ******** * *** *****
***** ** ** ** ** *** ** *** *****
******* * * ** * ** ********
*************** * *******************
****************************** ***
*** ********* **
** * **
** * **
** * **
** * **
** * **
** ** **
** ****** * ** *********
*************************************
**********
:param img_path: the image file path
:type img_path: str
:param ascii_path: the output ascii text file path
:type ascii_path: str
:param pad: how many space been filled in between two pixels
:type pad: int
"""
if len(ascii_char) != 1:
raise Exception("ascii_char has to be single character.")
image = Image.open(img_path).convert("L")
matrix = np.array(image)
# you can customize the gray scale fix behavior to fit color image
matrix[np.where(matrix >= 128)] = 255
matrix[np.where(matrix < 128)] = 0
lines = list()
for vector in matrix:
line = list()
for i in vector:
line.append(" " * pad)
if i:
line.append(" ")
else:
line.append(ascii_char)
lines.append("".join(line))
with open(ascii_path, "w") as f:
f.write("\n".join(lines)) | def function[img2ascii, parameter[img_path, ascii_path, ascii_char, pad]]:
constant[Convert an image to ascii art text.
Suppose we have an image like that:
.. image:: images/rabbit.png
:align: left
Put some codes::
>>> from weatherlab.math.img2waveform import img2ascii
>>> img2ascii(r"testdata\img2waveform
abbit.png",
... r"testdata\img2waveformsciiart.txt", pad=0)
Then you will see this in asciiart.txt::
******
*** *** ****
** ** *********
** ** *** ***
** * ** **
** ** ** **
** * *** *
* ** ** **
** * ** **
** * ** *
* ** ** *
** ** * **
** * ** **
* * ** **
* ** * **
** ** ** **
** * ** **
** * * **
** * ** *
** * ** *
* ** ** *
* ** * *
* ** ** *
* ** ** *
** ** ** **
** * ** **
** * * **
** * * **
** * * **
* * ** **
* * ** *
** * ** *
** * ** *
** ** ** **
* ** ** **
* ** ** **
** ** ** *
** ** ** **
* ** ** **
** ** ** *
** ******* *
** ******* **
** **
** *
** **
*** *
**** ***
*** ***
** ****
** ***
** ***
** **
** **
* **
** **
** **
** **
** **
** **
** **
** **
* **
* **
** *
** **
* **
* **
** *
** *
** **
** **
** **
** **
** ** **
** *** *** **
* **** **** **
* *** **** **
** ** ** *
** *
** *
* **
** **
** *
* **
** **
** **
** *
** **
** **
** **
** *** ** **
** ****** ***
*** ****** **
*** * *** ***
*** ***
*** ***
**** ****
******** *******
*** ********** ******** ***
** *** ************ ********** *** * ***
** * **** *********************** *** ** ***
** * ** **** ** ******* * *** ***** ***
**** * * ***** ********** * **** * * ** **
*** * * ** * ******************************* * *** * **
** ***** * *** ********** ** ** ********** *** ** ***
** * ***** ** * ***** ** ** ***** * * ** * **
*** *** ************ ** ****** ** * * ** ** ** * ** ***
** ******* * * ** ** ** **** * ** * ** * **** **
** *** *** ******* ****** * ** * *** ***** *** ** ***** ** **
** * * ***** ************************************ * **** * **
*** ** ** *********************************************** *** ***
*** ** ****************************************** **** ** ** **
**** ** ** ******************************************** ** * **
** ****** ** ******************************************** ** * ***
** ***** *********************************************** ** ****
* *** ****************************** **************** *********
** ** *************************************** * * * ***** *
** ** ********************************************** *** *
* ** ** *********************************** ******* ** *
** ** ***************************************** *** ** *
*** ** * ********************************************** ** **
****** ************************************************ ** ***
**** *********************************************** ********
** *********************************************** ****
*** ** ******************************************* **
*** ** ***** ****** * * * * * ******** *** ** ** ***
*** * * **** **** **** * ** ** * *** ** ***
**** * * ** **** * *** ******** * *** *****
***** ** ** ** ** *** ** *** *****
******* * * ** * ** ********
*************** * *******************
****************************** ***
*** ********* **
** * **
** * **
** * **
** * **
** * **
** ** **
** ****** * ** *********
*************************************
**********
:param img_path: the image file path
:type img_path: str
:param ascii_path: the output ascii text file path
:type ascii_path: str
:param pad: how many space been filled in between two pixels
:type pad: int
]
if compare[call[name[len], parameter[name[ascii_char]]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da18f09f8e0>
variable[image] assign[=] call[call[name[Image].open, parameter[name[img_path]]].convert, parameter[constant[L]]]
variable[matrix] assign[=] call[name[np].array, parameter[name[image]]]
call[name[matrix]][call[name[np].where, parameter[compare[name[matrix] greater_or_equal[>=] constant[128]]]]] assign[=] constant[255]
call[name[matrix]][call[name[np].where, parameter[compare[name[matrix] less[<] constant[128]]]]] assign[=] constant[0]
variable[lines] assign[=] call[name[list], parameter[]]
for taget[name[vector]] in starred[name[matrix]] begin[:]
variable[line] assign[=] call[name[list], parameter[]]
for taget[name[i]] in starred[name[vector]] begin[:]
call[name[line].append, parameter[binary_operation[constant[ ] * name[pad]]]]
if name[i] begin[:]
call[name[line].append, parameter[constant[ ]]]
call[name[lines].append, parameter[call[constant[].join, parameter[name[line]]]]]
with call[name[open], parameter[name[ascii_path], constant[w]]] begin[:]
call[name[f].write, parameter[call[constant[
].join, parameter[name[lines]]]]] | keyword[def] identifier[img2ascii] ( identifier[img_path] , identifier[ascii_path] , identifier[ascii_char] = literal[string] , identifier[pad] = literal[int] ):
literal[string]
keyword[if] identifier[len] ( identifier[ascii_char] )!= literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[image] = identifier[Image] . identifier[open] ( identifier[img_path] ). identifier[convert] ( literal[string] )
identifier[matrix] = identifier[np] . identifier[array] ( identifier[image] )
identifier[matrix] [ identifier[np] . identifier[where] ( identifier[matrix] >= literal[int] )]= literal[int]
identifier[matrix] [ identifier[np] . identifier[where] ( identifier[matrix] < literal[int] )]= literal[int]
identifier[lines] = identifier[list] ()
keyword[for] identifier[vector] keyword[in] identifier[matrix] :
identifier[line] = identifier[list] ()
keyword[for] identifier[i] keyword[in] identifier[vector] :
identifier[line] . identifier[append] ( literal[string] * identifier[pad] )
keyword[if] identifier[i] :
identifier[line] . identifier[append] ( literal[string] )
keyword[else] :
identifier[line] . identifier[append] ( identifier[ascii_char] )
identifier[lines] . identifier[append] ( literal[string] . identifier[join] ( identifier[line] ))
keyword[with] identifier[open] ( identifier[ascii_path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( literal[string] . identifier[join] ( identifier[lines] )) | def img2ascii(img_path, ascii_path, ascii_char='*', pad=0):
"""Convert an image to ascii art text.
Suppose we have an image like that:
.. image:: images/rabbit.png
:align: left
Put some codes::
>>> from weatherlab.math.img2waveform import img2ascii
>>> img2ascii(r"testdata\\img2waveform\rabbit.png",
... r"testdata\\img2waveform\x07sciiart.txt", pad=0)
Then you will see this in asciiart.txt::
******
*** *** ****
** ** *********
** ** *** ***
** * ** **
** ** ** **
** * *** *
* ** ** **
** * ** **
** * ** *
* ** ** *
** ** * **
** * ** **
* * ** **
* ** * **
** ** ** **
** * ** **
** * * **
** * ** *
** * ** *
* ** ** *
* ** * *
* ** ** *
* ** ** *
** ** ** **
** * ** **
** * * **
** * * **
** * * **
* * ** **
* * ** *
** * ** *
** * ** *
** ** ** **
* ** ** **
* ** ** **
** ** ** *
** ** ** **
* ** ** **
** ** ** *
** ******* *
** ******* **
** **
** *
** **
*** *
**** ***
*** ***
** ****
** ***
** ***
** **
** **
* **
** **
** **
** **
** **
** **
** **
** **
* **
* **
** *
** **
* **
* **
** *
** *
** **
** **
** **
** **
** ** **
** *** *** **
* **** **** **
* *** **** **
** ** ** *
** *
** *
* **
** **
** *
* **
** **
** **
** *
** **
** **
** **
** *** ** **
** ****** ***
*** ****** **
*** * *** ***
*** ***
*** ***
**** ****
******** *******
*** ********** ******** ***
** *** ************ ********** *** * ***
** * **** *********************** *** ** ***
** * ** **** ** ******* * *** ***** ***
**** * * ***** ********** * **** * * ** **
*** * * ** * ******************************* * *** * **
** ***** * *** ********** ** ** ********** *** ** ***
** * ***** ** * ***** ** ** ***** * * ** * **
*** *** ************ ** ****** ** * * ** ** ** * ** ***
** ******* * * ** ** ** **** * ** * ** * **** **
** *** *** ******* ****** * ** * *** ***** *** ** ***** ** **
** * * ***** ************************************ * **** * **
*** ** ** *********************************************** *** ***
*** ** ****************************************** **** ** ** **
**** ** ** ******************************************** ** * **
** ****** ** ******************************************** ** * ***
** ***** *********************************************** ** ****
* *** ****************************** **************** *********
** ** *************************************** * * * ***** *
** ** ********************************************** *** *
* ** ** *********************************** ******* ** *
** ** ***************************************** *** ** *
*** ** * ********************************************** ** **
****** ************************************************ ** ***
**** *********************************************** ********
** *********************************************** ****
*** ** ******************************************* **
*** ** ***** ****** * * * * * ******** *** ** ** ***
*** * * **** **** **** * ** ** * *** ** ***
**** * * ** **** * *** ******** * *** *****
***** ** ** ** ** *** ** *** *****
******* * * ** * ** ********
*************** * *******************
****************************** ***
*** ********* **
** * **
** * **
** * **
** * **
** * **
** ** **
** ****** * ** *********
*************************************
**********
:param img_path: the image file path
:type img_path: str
:param ascii_path: the output ascii text file path
:type ascii_path: str
:param pad: how many space been filled in between two pixels
:type pad: int
"""
if len(ascii_char) != 1:
raise Exception('ascii_char has to be single character.') # depends on [control=['if'], data=[]]
image = Image.open(img_path).convert('L')
matrix = np.array(image)
# you can customize the gray scale fix behavior to fit color image
matrix[np.where(matrix >= 128)] = 255
matrix[np.where(matrix < 128)] = 0
lines = list()
for vector in matrix:
line = list()
for i in vector:
line.append(' ' * pad)
if i:
line.append(' ') # depends on [control=['if'], data=[]]
else:
line.append(ascii_char) # depends on [control=['for'], data=['i']]
lines.append(''.join(line)) # depends on [control=['for'], data=['vector']]
with open(ascii_path, 'w') as f:
f.write('\n'.join(lines)) # depends on [control=['with'], data=['f']] |
def solar_azimuth(self, dateandtime, latitude, longitude):
"""Calculate the azimuth angle of the sun.
:param dateandtime: The date and time for which to calculate
the angle.
:type dateandtime: :class:`~datetime.datetime`
:param latitude: Latitude - Northern latitudes should be positive
:type latitude: float
:param longitude: Longitude - Eastern longitudes should be positive
:type longitude: float
:return: The azimuth angle in degrees clockwise from North.
:rtype: float
If `dateandtime` is a naive Python datetime then it is assumed to be
in the UTC timezone.
"""
if latitude > 89.8:
latitude = 89.8
if latitude < -89.8:
latitude = -89.8
if dateandtime.tzinfo is None:
zone = 0
utc_datetime = dateandtime
else:
zone = -dateandtime.utcoffset().total_seconds() / 3600.0
utc_datetime = dateandtime.astimezone(pytz.utc)
timenow = (
utc_datetime.hour
+ (utc_datetime.minute / 60.0)
+ (utc_datetime.second / 3600.0)
)
JD = self._julianday(dateandtime)
t = self._jday_to_jcentury(JD + timenow / 24.0)
theta = self._sun_declination(t)
eqtime = self._eq_of_time(t)
solarDec = theta # in degrees
solarTimeFix = eqtime - (4.0 * -longitude) + (60 * zone)
trueSolarTime = (
dateandtime.hour * 60.0
+ dateandtime.minute
+ dateandtime.second / 60.0
+ solarTimeFix
)
# in minutes
while trueSolarTime > 1440:
trueSolarTime = trueSolarTime - 1440
hourangle = trueSolarTime / 4.0 - 180.0
# Thanks to Louis Schwarzmayr for the next line:
if hourangle < -180:
hourangle = hourangle + 360.0
harad = radians(hourangle)
csz = sin(radians(latitude)) * sin(radians(solarDec)) + cos(
radians(latitude)
) * cos(radians(solarDec)) * cos(harad)
if csz > 1.0:
csz = 1.0
elif csz < -1.0:
csz = -1.0
zenith = degrees(acos(csz))
azDenom = cos(radians(latitude)) * sin(radians(zenith))
if abs(azDenom) > 0.001:
azRad = (
(sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec))
) / azDenom
if abs(azRad) > 1.0:
if azRad < 0:
azRad = -1.0
else:
azRad = 1.0
azimuth = 180.0 - degrees(acos(azRad))
if hourangle > 0.0:
azimuth = -azimuth
else:
if latitude > 0.0:
azimuth = 180.0
else:
azimuth = 0.0
if azimuth < 0.0:
azimuth = azimuth + 360.0
return azimuth | def function[solar_azimuth, parameter[self, dateandtime, latitude, longitude]]:
constant[Calculate the azimuth angle of the sun.
:param dateandtime: The date and time for which to calculate
the angle.
:type dateandtime: :class:`~datetime.datetime`
:param latitude: Latitude - Northern latitudes should be positive
:type latitude: float
:param longitude: Longitude - Eastern longitudes should be positive
:type longitude: float
:return: The azimuth angle in degrees clockwise from North.
:rtype: float
If `dateandtime` is a naive Python datetime then it is assumed to be
in the UTC timezone.
]
if compare[name[latitude] greater[>] constant[89.8]] begin[:]
variable[latitude] assign[=] constant[89.8]
if compare[name[latitude] less[<] <ast.UnaryOp object at 0x7da2047e80a0>] begin[:]
variable[latitude] assign[=] <ast.UnaryOp object at 0x7da2047ea560>
if compare[name[dateandtime].tzinfo is constant[None]] begin[:]
variable[zone] assign[=] constant[0]
variable[utc_datetime] assign[=] name[dateandtime]
variable[timenow] assign[=] binary_operation[binary_operation[name[utc_datetime].hour + binary_operation[name[utc_datetime].minute / constant[60.0]]] + binary_operation[name[utc_datetime].second / constant[3600.0]]]
variable[JD] assign[=] call[name[self]._julianday, parameter[name[dateandtime]]]
variable[t] assign[=] call[name[self]._jday_to_jcentury, parameter[binary_operation[name[JD] + binary_operation[name[timenow] / constant[24.0]]]]]
variable[theta] assign[=] call[name[self]._sun_declination, parameter[name[t]]]
variable[eqtime] assign[=] call[name[self]._eq_of_time, parameter[name[t]]]
variable[solarDec] assign[=] name[theta]
variable[solarTimeFix] assign[=] binary_operation[binary_operation[name[eqtime] - binary_operation[constant[4.0] * <ast.UnaryOp object at 0x7da2047eb4f0>]] + binary_operation[constant[60] * name[zone]]]
variable[trueSolarTime] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[dateandtime].hour * constant[60.0]] + name[dateandtime].minute] + binary_operation[name[dateandtime].second / constant[60.0]]] + name[solarTimeFix]]
while compare[name[trueSolarTime] greater[>] constant[1440]] begin[:]
variable[trueSolarTime] assign[=] binary_operation[name[trueSolarTime] - constant[1440]]
variable[hourangle] assign[=] binary_operation[binary_operation[name[trueSolarTime] / constant[4.0]] - constant[180.0]]
if compare[name[hourangle] less[<] <ast.UnaryOp object at 0x7da204345c90>] begin[:]
variable[hourangle] assign[=] binary_operation[name[hourangle] + constant[360.0]]
variable[harad] assign[=] call[name[radians], parameter[name[hourangle]]]
variable[csz] assign[=] binary_operation[binary_operation[call[name[sin], parameter[call[name[radians], parameter[name[latitude]]]]] * call[name[sin], parameter[call[name[radians], parameter[name[solarDec]]]]]] + binary_operation[binary_operation[call[name[cos], parameter[call[name[radians], parameter[name[latitude]]]]] * call[name[cos], parameter[call[name[radians], parameter[name[solarDec]]]]]] * call[name[cos], parameter[name[harad]]]]]
if compare[name[csz] greater[>] constant[1.0]] begin[:]
variable[csz] assign[=] constant[1.0]
variable[zenith] assign[=] call[name[degrees], parameter[call[name[acos], parameter[name[csz]]]]]
variable[azDenom] assign[=] binary_operation[call[name[cos], parameter[call[name[radians], parameter[name[latitude]]]]] * call[name[sin], parameter[call[name[radians], parameter[name[zenith]]]]]]
if compare[call[name[abs], parameter[name[azDenom]]] greater[>] constant[0.001]] begin[:]
variable[azRad] assign[=] binary_operation[binary_operation[binary_operation[call[name[sin], parameter[call[name[radians], parameter[name[latitude]]]]] * call[name[cos], parameter[call[name[radians], parameter[name[zenith]]]]]] - call[name[sin], parameter[call[name[radians], parameter[name[solarDec]]]]]] / name[azDenom]]
if compare[call[name[abs], parameter[name[azRad]]] greater[>] constant[1.0]] begin[:]
if compare[name[azRad] less[<] constant[0]] begin[:]
variable[azRad] assign[=] <ast.UnaryOp object at 0x7da204344d60>
variable[azimuth] assign[=] binary_operation[constant[180.0] - call[name[degrees], parameter[call[name[acos], parameter[name[azRad]]]]]]
if compare[name[hourangle] greater[>] constant[0.0]] begin[:]
variable[azimuth] assign[=] <ast.UnaryOp object at 0x7da204344a90>
if compare[name[azimuth] less[<] constant[0.0]] begin[:]
variable[azimuth] assign[=] binary_operation[name[azimuth] + constant[360.0]]
return[name[azimuth]] | keyword[def] identifier[solar_azimuth] ( identifier[self] , identifier[dateandtime] , identifier[latitude] , identifier[longitude] ):
literal[string]
keyword[if] identifier[latitude] > literal[int] :
identifier[latitude] = literal[int]
keyword[if] identifier[latitude] <- literal[int] :
identifier[latitude] =- literal[int]
keyword[if] identifier[dateandtime] . identifier[tzinfo] keyword[is] keyword[None] :
identifier[zone] = literal[int]
identifier[utc_datetime] = identifier[dateandtime]
keyword[else] :
identifier[zone] =- identifier[dateandtime] . identifier[utcoffset] (). identifier[total_seconds] ()/ literal[int]
identifier[utc_datetime] = identifier[dateandtime] . identifier[astimezone] ( identifier[pytz] . identifier[utc] )
identifier[timenow] =(
identifier[utc_datetime] . identifier[hour]
+( identifier[utc_datetime] . identifier[minute] / literal[int] )
+( identifier[utc_datetime] . identifier[second] / literal[int] )
)
identifier[JD] = identifier[self] . identifier[_julianday] ( identifier[dateandtime] )
identifier[t] = identifier[self] . identifier[_jday_to_jcentury] ( identifier[JD] + identifier[timenow] / literal[int] )
identifier[theta] = identifier[self] . identifier[_sun_declination] ( identifier[t] )
identifier[eqtime] = identifier[self] . identifier[_eq_of_time] ( identifier[t] )
identifier[solarDec] = identifier[theta]
identifier[solarTimeFix] = identifier[eqtime] -( literal[int] *- identifier[longitude] )+( literal[int] * identifier[zone] )
identifier[trueSolarTime] =(
identifier[dateandtime] . identifier[hour] * literal[int]
+ identifier[dateandtime] . identifier[minute]
+ identifier[dateandtime] . identifier[second] / literal[int]
+ identifier[solarTimeFix]
)
keyword[while] identifier[trueSolarTime] > literal[int] :
identifier[trueSolarTime] = identifier[trueSolarTime] - literal[int]
identifier[hourangle] = identifier[trueSolarTime] / literal[int] - literal[int]
keyword[if] identifier[hourangle] <- literal[int] :
identifier[hourangle] = identifier[hourangle] + literal[int]
identifier[harad] = identifier[radians] ( identifier[hourangle] )
identifier[csz] = identifier[sin] ( identifier[radians] ( identifier[latitude] ))* identifier[sin] ( identifier[radians] ( identifier[solarDec] ))+ identifier[cos] (
identifier[radians] ( identifier[latitude] )
)* identifier[cos] ( identifier[radians] ( identifier[solarDec] ))* identifier[cos] ( identifier[harad] )
keyword[if] identifier[csz] > literal[int] :
identifier[csz] = literal[int]
keyword[elif] identifier[csz] <- literal[int] :
identifier[csz] =- literal[int]
identifier[zenith] = identifier[degrees] ( identifier[acos] ( identifier[csz] ))
identifier[azDenom] = identifier[cos] ( identifier[radians] ( identifier[latitude] ))* identifier[sin] ( identifier[radians] ( identifier[zenith] ))
keyword[if] identifier[abs] ( identifier[azDenom] )> literal[int] :
identifier[azRad] =(
( identifier[sin] ( identifier[radians] ( identifier[latitude] ))* identifier[cos] ( identifier[radians] ( identifier[zenith] )))- identifier[sin] ( identifier[radians] ( identifier[solarDec] ))
)/ identifier[azDenom]
keyword[if] identifier[abs] ( identifier[azRad] )> literal[int] :
keyword[if] identifier[azRad] < literal[int] :
identifier[azRad] =- literal[int]
keyword[else] :
identifier[azRad] = literal[int]
identifier[azimuth] = literal[int] - identifier[degrees] ( identifier[acos] ( identifier[azRad] ))
keyword[if] identifier[hourangle] > literal[int] :
identifier[azimuth] =- identifier[azimuth]
keyword[else] :
keyword[if] identifier[latitude] > literal[int] :
identifier[azimuth] = literal[int]
keyword[else] :
identifier[azimuth] = literal[int]
keyword[if] identifier[azimuth] < literal[int] :
identifier[azimuth] = identifier[azimuth] + literal[int]
keyword[return] identifier[azimuth] | def solar_azimuth(self, dateandtime, latitude, longitude):
"""Calculate the azimuth angle of the sun.
:param dateandtime: The date and time for which to calculate
the angle.
:type dateandtime: :class:`~datetime.datetime`
:param latitude: Latitude - Northern latitudes should be positive
:type latitude: float
:param longitude: Longitude - Eastern longitudes should be positive
:type longitude: float
:return: The azimuth angle in degrees clockwise from North.
:rtype: float
If `dateandtime` is a naive Python datetime then it is assumed to be
in the UTC timezone.
"""
if latitude > 89.8:
latitude = 89.8 # depends on [control=['if'], data=['latitude']]
if latitude < -89.8:
latitude = -89.8 # depends on [control=['if'], data=['latitude']]
if dateandtime.tzinfo is None:
zone = 0
utc_datetime = dateandtime # depends on [control=['if'], data=[]]
else:
zone = -dateandtime.utcoffset().total_seconds() / 3600.0
utc_datetime = dateandtime.astimezone(pytz.utc)
timenow = utc_datetime.hour + utc_datetime.minute / 60.0 + utc_datetime.second / 3600.0
JD = self._julianday(dateandtime)
t = self._jday_to_jcentury(JD + timenow / 24.0)
theta = self._sun_declination(t)
eqtime = self._eq_of_time(t)
solarDec = theta # in degrees
solarTimeFix = eqtime - 4.0 * -longitude + 60 * zone
trueSolarTime = dateandtime.hour * 60.0 + dateandtime.minute + dateandtime.second / 60.0 + solarTimeFix
# in minutes
while trueSolarTime > 1440:
trueSolarTime = trueSolarTime - 1440 # depends on [control=['while'], data=['trueSolarTime']]
hourangle = trueSolarTime / 4.0 - 180.0
# Thanks to Louis Schwarzmayr for the next line:
if hourangle < -180:
hourangle = hourangle + 360.0 # depends on [control=['if'], data=['hourangle']]
harad = radians(hourangle)
csz = sin(radians(latitude)) * sin(radians(solarDec)) + cos(radians(latitude)) * cos(radians(solarDec)) * cos(harad)
if csz > 1.0:
csz = 1.0 # depends on [control=['if'], data=['csz']]
elif csz < -1.0:
csz = -1.0 # depends on [control=['if'], data=['csz']]
zenith = degrees(acos(csz))
azDenom = cos(radians(latitude)) * sin(radians(zenith))
if abs(azDenom) > 0.001:
azRad = (sin(radians(latitude)) * cos(radians(zenith)) - sin(radians(solarDec))) / azDenom
if abs(azRad) > 1.0:
if azRad < 0:
azRad = -1.0 # depends on [control=['if'], data=['azRad']]
else:
azRad = 1.0 # depends on [control=['if'], data=[]]
azimuth = 180.0 - degrees(acos(azRad))
if hourangle > 0.0:
azimuth = -azimuth # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif latitude > 0.0:
azimuth = 180.0 # depends on [control=['if'], data=[]]
else:
azimuth = 0.0
if azimuth < 0.0:
azimuth = azimuth + 360.0 # depends on [control=['if'], data=['azimuth']]
return azimuth |
def check_iterable_depth(obj, max_depth=100):
"""find the maximum depth of nesting of the iterable
Parameters
----------
obj : iterable
max_depth : int, default: 100
maximum depth beyond which we stop counting
Returns
-------
int
"""
def find_iterables(obj):
iterables = []
for item in obj:
if isiterable(item):
iterables += list(item)
return iterables
depth = 0
while (depth < max_depth) and isiterable(obj) and len(obj) > 0:
depth += 1
obj = find_iterables(obj)
return depth | def function[check_iterable_depth, parameter[obj, max_depth]]:
constant[find the maximum depth of nesting of the iterable
Parameters
----------
obj : iterable
max_depth : int, default: 100
maximum depth beyond which we stop counting
Returns
-------
int
]
def function[find_iterables, parameter[obj]]:
variable[iterables] assign[=] list[[]]
for taget[name[item]] in starred[name[obj]] begin[:]
if call[name[isiterable], parameter[name[item]]] begin[:]
<ast.AugAssign object at 0x7da18f810220>
return[name[iterables]]
variable[depth] assign[=] constant[0]
while <ast.BoolOp object at 0x7da18f811720> begin[:]
<ast.AugAssign object at 0x7da18f810cd0>
variable[obj] assign[=] call[name[find_iterables], parameter[name[obj]]]
return[name[depth]] | keyword[def] identifier[check_iterable_depth] ( identifier[obj] , identifier[max_depth] = literal[int] ):
literal[string]
keyword[def] identifier[find_iterables] ( identifier[obj] ):
identifier[iterables] =[]
keyword[for] identifier[item] keyword[in] identifier[obj] :
keyword[if] identifier[isiterable] ( identifier[item] ):
identifier[iterables] += identifier[list] ( identifier[item] )
keyword[return] identifier[iterables]
identifier[depth] = literal[int]
keyword[while] ( identifier[depth] < identifier[max_depth] ) keyword[and] identifier[isiterable] ( identifier[obj] ) keyword[and] identifier[len] ( identifier[obj] )> literal[int] :
identifier[depth] += literal[int]
identifier[obj] = identifier[find_iterables] ( identifier[obj] )
keyword[return] identifier[depth] | def check_iterable_depth(obj, max_depth=100):
"""find the maximum depth of nesting of the iterable
Parameters
----------
obj : iterable
max_depth : int, default: 100
maximum depth beyond which we stop counting
Returns
-------
int
"""
def find_iterables(obj):
iterables = []
for item in obj:
if isiterable(item):
iterables += list(item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return iterables
depth = 0
while depth < max_depth and isiterable(obj) and (len(obj) > 0):
depth += 1
obj = find_iterables(obj) # depends on [control=['while'], data=[]]
return depth |
def switch_training(self, flag):
"""
Switch training mode.
:param flag: switch on training mode when flag is True.
"""
if self._is_training == flag: return
self._is_training = flag
if flag:
self._training_flag.set_value(1)
else:
self._training_flag.set_value(0) | def function[switch_training, parameter[self, flag]]:
constant[
Switch training mode.
:param flag: switch on training mode when flag is True.
]
if compare[name[self]._is_training equal[==] name[flag]] begin[:]
return[None]
name[self]._is_training assign[=] name[flag]
if name[flag] begin[:]
call[name[self]._training_flag.set_value, parameter[constant[1]]] | keyword[def] identifier[switch_training] ( identifier[self] , identifier[flag] ):
literal[string]
keyword[if] identifier[self] . identifier[_is_training] == identifier[flag] : keyword[return]
identifier[self] . identifier[_is_training] = identifier[flag]
keyword[if] identifier[flag] :
identifier[self] . identifier[_training_flag] . identifier[set_value] ( literal[int] )
keyword[else] :
identifier[self] . identifier[_training_flag] . identifier[set_value] ( literal[int] ) | def switch_training(self, flag):
"""
Switch training mode.
:param flag: switch on training mode when flag is True.
"""
if self._is_training == flag:
return # depends on [control=['if'], data=[]]
self._is_training = flag
if flag:
self._training_flag.set_value(1) # depends on [control=['if'], data=[]]
else:
self._training_flag.set_value(0) |
def _build_predict(self, Xnew, full_cov=False):
"""
Xnew is a data matrix, point at which we want to predict
This method computes
p(F* | (F=LV) )
where F* are points on the GP at Xnew, F=LV are points on the GP at X.
"""
mu, var = conditional(Xnew, self.X, self.kern, self.V,
full_cov=full_cov,
q_sqrt=None, white=True)
return mu + self.mean_function(Xnew), var | def function[_build_predict, parameter[self, Xnew, full_cov]]:
constant[
Xnew is a data matrix, point at which we want to predict
This method computes
p(F* | (F=LV) )
where F* are points on the GP at Xnew, F=LV are points on the GP at X.
]
<ast.Tuple object at 0x7da1b21ec0a0> assign[=] call[name[conditional], parameter[name[Xnew], name[self].X, name[self].kern, name[self].V]]
return[tuple[[<ast.BinOp object at 0x7da204623ca0>, <ast.Name object at 0x7da204622620>]]] | keyword[def] identifier[_build_predict] ( identifier[self] , identifier[Xnew] , identifier[full_cov] = keyword[False] ):
literal[string]
identifier[mu] , identifier[var] = identifier[conditional] ( identifier[Xnew] , identifier[self] . identifier[X] , identifier[self] . identifier[kern] , identifier[self] . identifier[V] ,
identifier[full_cov] = identifier[full_cov] ,
identifier[q_sqrt] = keyword[None] , identifier[white] = keyword[True] )
keyword[return] identifier[mu] + identifier[self] . identifier[mean_function] ( identifier[Xnew] ), identifier[var] | def _build_predict(self, Xnew, full_cov=False):
"""
Xnew is a data matrix, point at which we want to predict
This method computes
p(F* | (F=LV) )
where F* are points on the GP at Xnew, F=LV are points on the GP at X.
"""
(mu, var) = conditional(Xnew, self.X, self.kern, self.V, full_cov=full_cov, q_sqrt=None, white=True)
return (mu + self.mean_function(Xnew), var) |
def pow(self, x, axis):
"""Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.pow, x, axis) | def function[pow, parameter[self, x, axis]]:
constant[Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
]
return[call[name[self].__array_op, parameter[name[operator].pow, name[x], name[axis]]]] | keyword[def] identifier[pow] ( identifier[self] , identifier[x] , identifier[axis] ):
literal[string]
keyword[return] identifier[self] . identifier[__array_op] ( identifier[operator] . identifier[pow] , identifier[x] , identifier[axis] ) | def pow(self, x, axis):
"""Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.pow, x, axis) |
def add_execution_event(self, context_id, event):
"""Within a context, append data to the execution result.
Args:
context_id (str): the context id returned by create_context
data_type (str): type of data to append
data (bytes): data to append
Returns:
(bool): True if the operation is successful, False if
the context_id doesn't reference a known context.
"""
if context_id not in self._contexts:
LOGGER.warning("Context_id not in contexts, %s", context_id)
return False
context = self._contexts.get(context_id)
context.add_execution_event(event)
return True | def function[add_execution_event, parameter[self, context_id, event]]:
constant[Within a context, append data to the execution result.
Args:
context_id (str): the context id returned by create_context
data_type (str): type of data to append
data (bytes): data to append
Returns:
(bool): True if the operation is successful, False if
the context_id doesn't reference a known context.
]
if compare[name[context_id] <ast.NotIn object at 0x7da2590d7190> name[self]._contexts] begin[:]
call[name[LOGGER].warning, parameter[constant[Context_id not in contexts, %s], name[context_id]]]
return[constant[False]]
variable[context] assign[=] call[name[self]._contexts.get, parameter[name[context_id]]]
call[name[context].add_execution_event, parameter[name[event]]]
return[constant[True]] | keyword[def] identifier[add_execution_event] ( identifier[self] , identifier[context_id] , identifier[event] ):
literal[string]
keyword[if] identifier[context_id] keyword[not] keyword[in] identifier[self] . identifier[_contexts] :
identifier[LOGGER] . identifier[warning] ( literal[string] , identifier[context_id] )
keyword[return] keyword[False]
identifier[context] = identifier[self] . identifier[_contexts] . identifier[get] ( identifier[context_id] )
identifier[context] . identifier[add_execution_event] ( identifier[event] )
keyword[return] keyword[True] | def add_execution_event(self, context_id, event):
"""Within a context, append data to the execution result.
Args:
context_id (str): the context id returned by create_context
data_type (str): type of data to append
data (bytes): data to append
Returns:
(bool): True if the operation is successful, False if
the context_id doesn't reference a known context.
"""
if context_id not in self._contexts:
LOGGER.warning('Context_id not in contexts, %s', context_id)
return False # depends on [control=['if'], data=['context_id']]
context = self._contexts.get(context_id)
context.add_execution_event(event)
return True |
def get_call_signature(fn: FunctionType,
args: ArgsType,
kwargs: KwargsType,
debug_cache: bool = False) -> str:
"""
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key.
"""
# Note that the function won't have the __self__ argument (as in
# fn.__self__), at this point, even if it's a member function.
try:
call_sig = json_encode((fn.__qualname__, args, kwargs))
except TypeError:
log.critical(
"\nTo decorate using @django_cache_function without specifying "
"cache_key, the decorated function's owning class and its "
"parameters must be JSON-serializable (see jsonfunc.py, "
"django_cache_fn.py).\n")
raise
if debug_cache:
log.debug("Making call signature {!r}", call_sig)
return call_sig | def function[get_call_signature, parameter[fn, args, kwargs, debug_cache]]:
constant[
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key.
]
<ast.Try object at 0x7da1b18d8ee0>
if name[debug_cache] begin[:]
call[name[log].debug, parameter[constant[Making call signature {!r}], name[call_sig]]]
return[name[call_sig]] | keyword[def] identifier[get_call_signature] ( identifier[fn] : identifier[FunctionType] ,
identifier[args] : identifier[ArgsType] ,
identifier[kwargs] : identifier[KwargsType] ,
identifier[debug_cache] : identifier[bool] = keyword[False] )-> identifier[str] :
literal[string]
keyword[try] :
identifier[call_sig] = identifier[json_encode] (( identifier[fn] . identifier[__qualname__] , identifier[args] , identifier[kwargs] ))
keyword[except] identifier[TypeError] :
identifier[log] . identifier[critical] (
literal[string]
literal[string]
literal[string]
literal[string] )
keyword[raise]
keyword[if] identifier[debug_cache] :
identifier[log] . identifier[debug] ( literal[string] , identifier[call_sig] )
keyword[return] identifier[call_sig] | def get_call_signature(fn: FunctionType, args: ArgsType, kwargs: KwargsType, debug_cache: bool=False) -> str:
"""
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key.
"""
# Note that the function won't have the __self__ argument (as in
# fn.__self__), at this point, even if it's a member function.
try:
call_sig = json_encode((fn.__qualname__, args, kwargs)) # depends on [control=['try'], data=[]]
except TypeError:
log.critical("\nTo decorate using @django_cache_function without specifying cache_key, the decorated function's owning class and its parameters must be JSON-serializable (see jsonfunc.py, django_cache_fn.py).\n")
raise # depends on [control=['except'], data=[]]
if debug_cache:
log.debug('Making call signature {!r}', call_sig) # depends on [control=['if'], data=[]]
return call_sig |
def set_default(*params):
"""
INPUT dicts IN PRIORITY ORDER
UPDATES FIRST dict WITH THE MERGE RESULT, WHERE MERGE RESULT IS DEFINED AS:
FOR EACH LEAF, RETURN THE HIGHEST PRIORITY LEAF VALUE
"""
p0 = params[0]
agg = p0 if p0 or _get(p0, CLASS) in data_types else {}
for p in params[1:]:
p = unwrap(p)
if p is None:
continue
_all_default(agg, p, seen={})
return wrap(agg) | def function[set_default, parameter[]]:
constant[
INPUT dicts IN PRIORITY ORDER
UPDATES FIRST dict WITH THE MERGE RESULT, WHERE MERGE RESULT IS DEFINED AS:
FOR EACH LEAF, RETURN THE HIGHEST PRIORITY LEAF VALUE
]
variable[p0] assign[=] call[name[params]][constant[0]]
variable[agg] assign[=] <ast.IfExp object at 0x7da20c991a80>
for taget[name[p]] in starred[call[name[params]][<ast.Slice object at 0x7da20c9917b0>]] begin[:]
variable[p] assign[=] call[name[unwrap], parameter[name[p]]]
if compare[name[p] is constant[None]] begin[:]
continue
call[name[_all_default], parameter[name[agg], name[p]]]
return[call[name[wrap], parameter[name[agg]]]] | keyword[def] identifier[set_default] (* identifier[params] ):
literal[string]
identifier[p0] = identifier[params] [ literal[int] ]
identifier[agg] = identifier[p0] keyword[if] identifier[p0] keyword[or] identifier[_get] ( identifier[p0] , identifier[CLASS] ) keyword[in] identifier[data_types] keyword[else] {}
keyword[for] identifier[p] keyword[in] identifier[params] [ literal[int] :]:
identifier[p] = identifier[unwrap] ( identifier[p] )
keyword[if] identifier[p] keyword[is] keyword[None] :
keyword[continue]
identifier[_all_default] ( identifier[agg] , identifier[p] , identifier[seen] ={})
keyword[return] identifier[wrap] ( identifier[agg] ) | def set_default(*params):
"""
INPUT dicts IN PRIORITY ORDER
UPDATES FIRST dict WITH THE MERGE RESULT, WHERE MERGE RESULT IS DEFINED AS:
FOR EACH LEAF, RETURN THE HIGHEST PRIORITY LEAF VALUE
"""
p0 = params[0]
agg = p0 if p0 or _get(p0, CLASS) in data_types else {}
for p in params[1:]:
p = unwrap(p)
if p is None:
continue # depends on [control=['if'], data=[]]
_all_default(agg, p, seen={}) # depends on [control=['for'], data=['p']]
return wrap(agg) |
def power_level(self, val):
"""Sets the power level according to the index or string.
0 = High
1 = HighLow
2 = LowHigh
3 = Low"""
if val in [0, 1, 2, 3]:
self.pdx.PowerLevel = val
elif type(val) is str:
if val.lower() == "high":
self.pdx.PowerLevel = 0
elif val.lower() == "highlow":
self.pdx.PowerLevel = 1
elif val.lower() == "lowhigh":
self.pdx.PowerLevel = 2
elif val.lower() == "low":
self.pdx.PowerLevel = 3
else:
raise ValueError("Not a valid power level") | def function[power_level, parameter[self, val]]:
constant[Sets the power level according to the index or string.
0 = High
1 = HighLow
2 = LowHigh
3 = Low]
if compare[name[val] in list[[<ast.Constant object at 0x7da204623e80>, <ast.Constant object at 0x7da2046205b0>, <ast.Constant object at 0x7da204621120>, <ast.Constant object at 0x7da204622980>]]] begin[:]
name[self].pdx.PowerLevel assign[=] name[val] | keyword[def] identifier[power_level] ( identifier[self] , identifier[val] ):
literal[string]
keyword[if] identifier[val] keyword[in] [ literal[int] , literal[int] , literal[int] , literal[int] ]:
identifier[self] . identifier[pdx] . identifier[PowerLevel] = identifier[val]
keyword[elif] identifier[type] ( identifier[val] ) keyword[is] identifier[str] :
keyword[if] identifier[val] . identifier[lower] ()== literal[string] :
identifier[self] . identifier[pdx] . identifier[PowerLevel] = literal[int]
keyword[elif] identifier[val] . identifier[lower] ()== literal[string] :
identifier[self] . identifier[pdx] . identifier[PowerLevel] = literal[int]
keyword[elif] identifier[val] . identifier[lower] ()== literal[string] :
identifier[self] . identifier[pdx] . identifier[PowerLevel] = literal[int]
keyword[elif] identifier[val] . identifier[lower] ()== literal[string] :
identifier[self] . identifier[pdx] . identifier[PowerLevel] = literal[int]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def power_level(self, val):
"""Sets the power level according to the index or string.
0 = High
1 = HighLow
2 = LowHigh
3 = Low"""
if val in [0, 1, 2, 3]:
self.pdx.PowerLevel = val # depends on [control=['if'], data=['val']]
elif type(val) is str:
if val.lower() == 'high':
self.pdx.PowerLevel = 0 # depends on [control=['if'], data=[]]
elif val.lower() == 'highlow':
self.pdx.PowerLevel = 1 # depends on [control=['if'], data=[]]
elif val.lower() == 'lowhigh':
self.pdx.PowerLevel = 2 # depends on [control=['if'], data=[]]
elif val.lower() == 'low':
self.pdx.PowerLevel = 3 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('Not a valid power level') |
def create_api_docs(code_path, api_docs_path, max_depth=2):
"""Function for generating .rst file for all .py file in dir_path folder.
:param code_path: Path of the source code.
:type code_path: str
:param api_docs_path: Path of the api documentation directory.
:type api_docs_path: str
:param max_depth: Maximum depth for the index.
:type max_depth: int
"""
base_path = os.path.split(code_path)[0]
for package, subpackages, candidate_files in os.walk(code_path):
# Checking __init__.py file
if '__init__.py' not in candidate_files:
continue
# Creating directory for the package
package_relative_path = package.replace(base_path + os.sep, '')
index_package_path = os.path.join(
api_docs_path, package_relative_path)
# calculate dir one up from package to store the index in
index_base_path, package_base_name = os.path.split(index_package_path)
if package_base_name in EXCLUDED_PACKAGES:
continue
full_package_name = package_relative_path.replace(os.sep, '.')
new_rst_dir = os.path.join(api_docs_path, package_relative_path)
create_dirs(new_rst_dir)
# Create index_file for the directory
modules = get_python_files_from_list(candidate_files)
index_file_text = create_package_level_rst_index_file(
package_name=full_package_name,
max_depth=max_depth,
modules=modules,
inner_packages=subpackages)
write_rst_file(
file_directory=index_base_path,
file_name=package_base_name,
content=index_file_text)
# Creating .rst file for each .py file
for module in modules:
module = module[:-3] # strip .py off the end
py_module_text = create_module_rst_file(
'%s.%s' % (full_package_name, module))
write_rst_file(
file_directory=new_rst_dir,
file_name=module,
content=py_module_text) | def function[create_api_docs, parameter[code_path, api_docs_path, max_depth]]:
constant[Function for generating .rst file for all .py file in dir_path folder.
:param code_path: Path of the source code.
:type code_path: str
:param api_docs_path: Path of the api documentation directory.
:type api_docs_path: str
:param max_depth: Maximum depth for the index.
:type max_depth: int
]
variable[base_path] assign[=] call[call[name[os].path.split, parameter[name[code_path]]]][constant[0]]
for taget[tuple[[<ast.Name object at 0x7da1b0c51930>, <ast.Name object at 0x7da1b0c53c10>, <ast.Name object at 0x7da1b0c501c0>]]] in starred[call[name[os].walk, parameter[name[code_path]]]] begin[:]
if compare[constant[__init__.py] <ast.NotIn object at 0x7da2590d7190> name[candidate_files]] begin[:]
continue
variable[package_relative_path] assign[=] call[name[package].replace, parameter[binary_operation[name[base_path] + name[os].sep], constant[]]]
variable[index_package_path] assign[=] call[name[os].path.join, parameter[name[api_docs_path], name[package_relative_path]]]
<ast.Tuple object at 0x7da1b0c51030> assign[=] call[name[os].path.split, parameter[name[index_package_path]]]
if compare[name[package_base_name] in name[EXCLUDED_PACKAGES]] begin[:]
continue
variable[full_package_name] assign[=] call[name[package_relative_path].replace, parameter[name[os].sep, constant[.]]]
variable[new_rst_dir] assign[=] call[name[os].path.join, parameter[name[api_docs_path], name[package_relative_path]]]
call[name[create_dirs], parameter[name[new_rst_dir]]]
variable[modules] assign[=] call[name[get_python_files_from_list], parameter[name[candidate_files]]]
variable[index_file_text] assign[=] call[name[create_package_level_rst_index_file], parameter[]]
call[name[write_rst_file], parameter[]]
for taget[name[module]] in starred[name[modules]] begin[:]
variable[module] assign[=] call[name[module]][<ast.Slice object at 0x7da1b0c53940>]
variable[py_module_text] assign[=] call[name[create_module_rst_file], parameter[binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0c51bd0>, <ast.Name object at 0x7da1b0c51510>]]]]]
call[name[write_rst_file], parameter[]] | keyword[def] identifier[create_api_docs] ( identifier[code_path] , identifier[api_docs_path] , identifier[max_depth] = literal[int] ):
literal[string]
identifier[base_path] = identifier[os] . identifier[path] . identifier[split] ( identifier[code_path] )[ literal[int] ]
keyword[for] identifier[package] , identifier[subpackages] , identifier[candidate_files] keyword[in] identifier[os] . identifier[walk] ( identifier[code_path] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[candidate_files] :
keyword[continue]
identifier[package_relative_path] = identifier[package] . identifier[replace] ( identifier[base_path] + identifier[os] . identifier[sep] , literal[string] )
identifier[index_package_path] = identifier[os] . identifier[path] . identifier[join] (
identifier[api_docs_path] , identifier[package_relative_path] )
identifier[index_base_path] , identifier[package_base_name] = identifier[os] . identifier[path] . identifier[split] ( identifier[index_package_path] )
keyword[if] identifier[package_base_name] keyword[in] identifier[EXCLUDED_PACKAGES] :
keyword[continue]
identifier[full_package_name] = identifier[package_relative_path] . identifier[replace] ( identifier[os] . identifier[sep] , literal[string] )
identifier[new_rst_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[api_docs_path] , identifier[package_relative_path] )
identifier[create_dirs] ( identifier[new_rst_dir] )
identifier[modules] = identifier[get_python_files_from_list] ( identifier[candidate_files] )
identifier[index_file_text] = identifier[create_package_level_rst_index_file] (
identifier[package_name] = identifier[full_package_name] ,
identifier[max_depth] = identifier[max_depth] ,
identifier[modules] = identifier[modules] ,
identifier[inner_packages] = identifier[subpackages] )
identifier[write_rst_file] (
identifier[file_directory] = identifier[index_base_path] ,
identifier[file_name] = identifier[package_base_name] ,
identifier[content] = identifier[index_file_text] )
keyword[for] identifier[module] keyword[in] identifier[modules] :
identifier[module] = identifier[module] [:- literal[int] ]
identifier[py_module_text] = identifier[create_module_rst_file] (
literal[string] %( identifier[full_package_name] , identifier[module] ))
identifier[write_rst_file] (
identifier[file_directory] = identifier[new_rst_dir] ,
identifier[file_name] = identifier[module] ,
identifier[content] = identifier[py_module_text] ) | def create_api_docs(code_path, api_docs_path, max_depth=2):
"""Function for generating .rst file for all .py file in dir_path folder.
:param code_path: Path of the source code.
:type code_path: str
:param api_docs_path: Path of the api documentation directory.
:type api_docs_path: str
:param max_depth: Maximum depth for the index.
:type max_depth: int
"""
base_path = os.path.split(code_path)[0]
for (package, subpackages, candidate_files) in os.walk(code_path):
# Checking __init__.py file
if '__init__.py' not in candidate_files:
continue # depends on [control=['if'], data=[]]
# Creating directory for the package
package_relative_path = package.replace(base_path + os.sep, '')
index_package_path = os.path.join(api_docs_path, package_relative_path)
# calculate dir one up from package to store the index in
(index_base_path, package_base_name) = os.path.split(index_package_path)
if package_base_name in EXCLUDED_PACKAGES:
continue # depends on [control=['if'], data=[]]
full_package_name = package_relative_path.replace(os.sep, '.')
new_rst_dir = os.path.join(api_docs_path, package_relative_path)
create_dirs(new_rst_dir)
# Create index_file for the directory
modules = get_python_files_from_list(candidate_files)
index_file_text = create_package_level_rst_index_file(package_name=full_package_name, max_depth=max_depth, modules=modules, inner_packages=subpackages)
write_rst_file(file_directory=index_base_path, file_name=package_base_name, content=index_file_text)
# Creating .rst file for each .py file
for module in modules:
module = module[:-3] # strip .py off the end
py_module_text = create_module_rst_file('%s.%s' % (full_package_name, module))
write_rst_file(file_directory=new_rst_dir, file_name=module, content=py_module_text) # depends on [control=['for'], data=['module']] # depends on [control=['for'], data=[]] |
def fingerprint(self, word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG):
"""Return the count fingerprint.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
Returns
-------
int
The count fingerprint
Examples
--------
>>> cf = Count()
>>> bin(cf.fingerprint('hat'))
'0b1010000000001'
>>> bin(cf.fingerprint('niall'))
'0b10001010000'
>>> bin(cf.fingerprint('colin'))
'0b101010000'
>>> bin(cf.fingerprint('atcg'))
'0b1010000000000'
>>> bin(cf.fingerprint('entreatment'))
'0b1111010000100000'
"""
if n_bits % 2:
n_bits += 1
word = Counter(word)
fingerprint = 0
for letter in most_common:
if n_bits:
fingerprint <<= 2
fingerprint += word[letter] & 3
n_bits -= 2
else:
break
if n_bits:
fingerprint <<= n_bits
return fingerprint | def function[fingerprint, parameter[self, word, n_bits, most_common]]:
constant[Return the count fingerprint.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
Returns
-------
int
The count fingerprint
Examples
--------
>>> cf = Count()
>>> bin(cf.fingerprint('hat'))
'0b1010000000001'
>>> bin(cf.fingerprint('niall'))
'0b10001010000'
>>> bin(cf.fingerprint('colin'))
'0b101010000'
>>> bin(cf.fingerprint('atcg'))
'0b1010000000000'
>>> bin(cf.fingerprint('entreatment'))
'0b1111010000100000'
]
if binary_operation[name[n_bits] <ast.Mod object at 0x7da2590d6920> constant[2]] begin[:]
<ast.AugAssign object at 0x7da2054a4b20>
variable[word] assign[=] call[name[Counter], parameter[name[word]]]
variable[fingerprint] assign[=] constant[0]
for taget[name[letter]] in starred[name[most_common]] begin[:]
if name[n_bits] begin[:]
<ast.AugAssign object at 0x7da2054a6dd0>
<ast.AugAssign object at 0x7da2054a5270>
<ast.AugAssign object at 0x7da2054a5180>
if name[n_bits] begin[:]
<ast.AugAssign object at 0x7da2054a5240>
return[name[fingerprint]] | keyword[def] identifier[fingerprint] ( identifier[self] , identifier[word] , identifier[n_bits] = literal[int] , identifier[most_common] = identifier[MOST_COMMON_LETTERS_CG] ):
literal[string]
keyword[if] identifier[n_bits] % literal[int] :
identifier[n_bits] += literal[int]
identifier[word] = identifier[Counter] ( identifier[word] )
identifier[fingerprint] = literal[int]
keyword[for] identifier[letter] keyword[in] identifier[most_common] :
keyword[if] identifier[n_bits] :
identifier[fingerprint] <<= literal[int]
identifier[fingerprint] += identifier[word] [ identifier[letter] ]& literal[int]
identifier[n_bits] -= literal[int]
keyword[else] :
keyword[break]
keyword[if] identifier[n_bits] :
identifier[fingerprint] <<= identifier[n_bits]
keyword[return] identifier[fingerprint] | def fingerprint(self, word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG):
"""Return the count fingerprint.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
Returns
-------
int
The count fingerprint
Examples
--------
>>> cf = Count()
>>> bin(cf.fingerprint('hat'))
'0b1010000000001'
>>> bin(cf.fingerprint('niall'))
'0b10001010000'
>>> bin(cf.fingerprint('colin'))
'0b101010000'
>>> bin(cf.fingerprint('atcg'))
'0b1010000000000'
>>> bin(cf.fingerprint('entreatment'))
'0b1111010000100000'
"""
if n_bits % 2:
n_bits += 1 # depends on [control=['if'], data=[]]
word = Counter(word)
fingerprint = 0
for letter in most_common:
if n_bits:
fingerprint <<= 2
fingerprint += word[letter] & 3
n_bits -= 2 # depends on [control=['if'], data=[]]
else:
break # depends on [control=['for'], data=['letter']]
if n_bits:
fingerprint <<= n_bits # depends on [control=['if'], data=[]]
return fingerprint |
def kn_to_n(kn, N_k = None, cleanup = False):
""" Convert KxN_max array to N array
Parameters
----------
u_kn: np.ndarray, float, shape=(KxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_n: np.ndarray, float, shape=(N)
"""
#print "warning: KxN arrays deprecated; convering into new preferred N shape"
# rewrite into kn shape
# rewrite into kn shape
[K, N_max] = np.shape(kn)
if N_k is None:
# We assume that all N_k are N_max.
# Not really an easier way to do this without being given the answer.
N_k = N_max*np.ones([K], dtype=np.int64)
N = np.sum(N_k)
n = np.zeros([N], dtype=np.float64)
i = 0
for k in range(K): # loop through the old K; some might be zero
for ik in range(N_k[k]):
n[i] = kn[k, ik]
i += 1
if cleanup:
del(kn) # very big, let's explicitly delete
return n | def function[kn_to_n, parameter[kn, N_k, cleanup]]:
constant[ Convert KxN_max array to N array
Parameters
----------
u_kn: np.ndarray, float, shape=(KxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_n: np.ndarray, float, shape=(N)
]
<ast.List object at 0x7da18bc72770> assign[=] call[name[np].shape, parameter[name[kn]]]
if compare[name[N_k] is constant[None]] begin[:]
variable[N_k] assign[=] binary_operation[name[N_max] * call[name[np].ones, parameter[list[[<ast.Name object at 0x7da18bc723e0>]]]]]
variable[N] assign[=] call[name[np].sum, parameter[name[N_k]]]
variable[n] assign[=] call[name[np].zeros, parameter[list[[<ast.Name object at 0x7da18bc704c0>]]]]
variable[i] assign[=] constant[0]
for taget[name[k]] in starred[call[name[range], parameter[name[K]]]] begin[:]
for taget[name[ik]] in starred[call[name[range], parameter[call[name[N_k]][name[k]]]]] begin[:]
call[name[n]][name[i]] assign[=] call[name[kn]][tuple[[<ast.Name object at 0x7da18bc70940>, <ast.Name object at 0x7da18bc70790>]]]
<ast.AugAssign object at 0x7da18bc722c0>
if name[cleanup] begin[:]
<ast.Delete object at 0x7da18bc73340>
return[name[n]] | keyword[def] identifier[kn_to_n] ( identifier[kn] , identifier[N_k] = keyword[None] , identifier[cleanup] = keyword[False] ):
literal[string]
[ identifier[K] , identifier[N_max] ]= identifier[np] . identifier[shape] ( identifier[kn] )
keyword[if] identifier[N_k] keyword[is] keyword[None] :
identifier[N_k] = identifier[N_max] * identifier[np] . identifier[ones] ([ identifier[K] ], identifier[dtype] = identifier[np] . identifier[int64] )
identifier[N] = identifier[np] . identifier[sum] ( identifier[N_k] )
identifier[n] = identifier[np] . identifier[zeros] ([ identifier[N] ], identifier[dtype] = identifier[np] . identifier[float64] )
identifier[i] = literal[int]
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[K] ):
keyword[for] identifier[ik] keyword[in] identifier[range] ( identifier[N_k] [ identifier[k] ]):
identifier[n] [ identifier[i] ]= identifier[kn] [ identifier[k] , identifier[ik] ]
identifier[i] += literal[int]
keyword[if] identifier[cleanup] :
keyword[del] ( identifier[kn] )
keyword[return] identifier[n] | def kn_to_n(kn, N_k=None, cleanup=False):
""" Convert KxN_max array to N array
Parameters
----------
u_kn: np.ndarray, float, shape=(KxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_n: np.ndarray, float, shape=(N)
"""
#print "warning: KxN arrays deprecated; convering into new preferred N shape"
# rewrite into kn shape
# rewrite into kn shape
[K, N_max] = np.shape(kn)
if N_k is None:
# We assume that all N_k are N_max.
# Not really an easier way to do this without being given the answer.
N_k = N_max * np.ones([K], dtype=np.int64) # depends on [control=['if'], data=['N_k']]
N = np.sum(N_k)
n = np.zeros([N], dtype=np.float64)
i = 0
for k in range(K): # loop through the old K; some might be zero
for ik in range(N_k[k]):
n[i] = kn[k, ik]
i += 1 # depends on [control=['for'], data=['ik']] # depends on [control=['for'], data=['k']]
if cleanup:
del kn # very big, let's explicitly delete # depends on [control=['if'], data=[]]
return n |
def validate_values(self, values):
"""
Validate values if they are registered as expected_values and present.
* If they are not registered they shouldn't be used anywhere at all
because profile can self check (profile.check_dependencies) for
missing/undefined dependencies.
* If they are not present in values but registered as expected_values
either the expected value has a default value OR a request for that
name will raise a KeyError on runtime. We don't know if all expected
values are actually needed/used, thus this fails late.
"""
format_message = '{}: {} (value: {})'.format
messages = []
for name, value in values.items():
if name not in self.expected_values:
continue
valid, message = self.expected_values[name].validate(value)
if valid:
continue
messages.append(format_message(name, message, value))
if len(messages):
return False, '\n'.join(messages)
return True, None | def function[validate_values, parameter[self, values]]:
constant[
Validate values if they are registered as expected_values and present.
* If they are not registered they shouldn't be used anywhere at all
because profile can self check (profile.check_dependencies) for
missing/undefined dependencies.
* If they are not present in values but registered as expected_values
either the expected value has a default value OR a request for that
name will raise a KeyError on runtime. We don't know if all expected
values are actually needed/used, thus this fails late.
]
variable[format_message] assign[=] constant[{}: {} (value: {})].format
variable[messages] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c7cb6d0>, <ast.Name object at 0x7da20c7caad0>]]] in starred[call[name[values].items, parameter[]]] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].expected_values] begin[:]
continue
<ast.Tuple object at 0x7da20c7c8730> assign[=] call[call[name[self].expected_values][name[name]].validate, parameter[name[value]]]
if name[valid] begin[:]
continue
call[name[messages].append, parameter[call[name[format_message], parameter[name[name], name[message], name[value]]]]]
if call[name[len], parameter[name[messages]]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b12c0a60>, <ast.Call object at 0x7da1b12c2b90>]]]
return[tuple[[<ast.Constant object at 0x7da1b12c1e10>, <ast.Constant object at 0x7da1b12c20e0>]]] | keyword[def] identifier[validate_values] ( identifier[self] , identifier[values] ):
literal[string]
identifier[format_message] = literal[string] . identifier[format]
identifier[messages] =[]
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[values] . identifier[items] ():
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[expected_values] :
keyword[continue]
identifier[valid] , identifier[message] = identifier[self] . identifier[expected_values] [ identifier[name] ]. identifier[validate] ( identifier[value] )
keyword[if] identifier[valid] :
keyword[continue]
identifier[messages] . identifier[append] ( identifier[format_message] ( identifier[name] , identifier[message] , identifier[value] ))
keyword[if] identifier[len] ( identifier[messages] ):
keyword[return] keyword[False] , literal[string] . identifier[join] ( identifier[messages] )
keyword[return] keyword[True] , keyword[None] | def validate_values(self, values):
"""
Validate values if they are registered as expected_values and present.
* If they are not registered they shouldn't be used anywhere at all
because profile can self check (profile.check_dependencies) for
missing/undefined dependencies.
* If they are not present in values but registered as expected_values
either the expected value has a default value OR a request for that
name will raise a KeyError on runtime. We don't know if all expected
values are actually needed/used, thus this fails late.
"""
format_message = '{}: {} (value: {})'.format
messages = []
for (name, value) in values.items():
if name not in self.expected_values:
continue # depends on [control=['if'], data=[]]
(valid, message) = self.expected_values[name].validate(value)
if valid:
continue # depends on [control=['if'], data=[]]
messages.append(format_message(name, message, value)) # depends on [control=['for'], data=[]]
if len(messages):
return (False, '\n'.join(messages)) # depends on [control=['if'], data=[]]
return (True, None) |
def _get_possible_adapter_modes(interface, blacklist):
'''
Return possible adapter modes for a given interface using a blacklist.
:param interface: interface name
:param blacklist: given blacklist
:return: list of possible adapter modes
'''
adapter_modes = []
protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower()
sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface))
with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file:
uevent_lines = uevent_file.readlines()
uevent_devtype = ""
for line in uevent_lines:
if line.startswith("DEVTYPE="):
uevent_devtype = line.split('=')[1].strip()
break
for adapter_mode in blacklist:
if adapter_mode == '_':
continue
value = blacklist.get(adapter_mode, {})
if value.get('additional_protocol') and adapter_mode not in protocols:
continue
if interface not in value['name'] \
and not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path) or
(blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype)
for iface_type in value['type']):
adapter_modes += [adapter_mode]
return adapter_modes | def function[_get_possible_adapter_modes, parameter[interface, blacklist]]:
constant[
Return possible adapter modes for a given interface using a blacklist.
:param interface: interface name
:param blacklist: given blacklist
:return: list of possible adapter modes
]
variable[adapter_modes] assign[=] list[[]]
variable[protocols] assign[=] call[call[call[name[_load_config], parameter[constant[lvrt], list[[<ast.Constant object at 0x7da2044c3160>]]]]][constant[AdditionalNetworkProtocols]].lower, parameter[]]
variable[sys_interface_path] assign[=] call[name[os].readlink, parameter[call[constant[/sys/class/net/{0}].format, parameter[name[interface]]]]]
with call[name[salt].utils.files.fopen, parameter[call[constant[/sys/class/net/{0}/uevent].format, parameter[name[interface]]]]] begin[:]
variable[uevent_lines] assign[=] call[name[uevent_file].readlines, parameter[]]
variable[uevent_devtype] assign[=] constant[]
for taget[name[line]] in starred[name[uevent_lines]] begin[:]
if call[name[line].startswith, parameter[constant[DEVTYPE=]]] begin[:]
variable[uevent_devtype] assign[=] call[call[call[name[line].split, parameter[constant[=]]]][constant[1]].strip, parameter[]]
break
for taget[name[adapter_mode]] in starred[name[blacklist]] begin[:]
if compare[name[adapter_mode] equal[==] constant[_]] begin[:]
continue
variable[value] assign[=] call[name[blacklist].get, parameter[name[adapter_mode], dictionary[[], []]]]
if <ast.BoolOp object at 0x7da2044c17b0> begin[:]
continue
if <ast.BoolOp object at 0x7da2044c2290> begin[:]
<ast.AugAssign object at 0x7da2044c3730>
return[name[adapter_modes]] | keyword[def] identifier[_get_possible_adapter_modes] ( identifier[interface] , identifier[blacklist] ):
literal[string]
identifier[adapter_modes] =[]
identifier[protocols] = identifier[_load_config] ( literal[string] ,[ literal[string] ])[ literal[string] ]. identifier[lower] ()
identifier[sys_interface_path] = identifier[os] . identifier[readlink] ( literal[string] . identifier[format] ( identifier[interface] ))
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( literal[string] . identifier[format] ( identifier[interface] )) keyword[as] identifier[uevent_file] :
identifier[uevent_lines] = identifier[uevent_file] . identifier[readlines] ()
identifier[uevent_devtype] = literal[string]
keyword[for] identifier[line] keyword[in] identifier[uevent_lines] :
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[uevent_devtype] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[break]
keyword[for] identifier[adapter_mode] keyword[in] identifier[blacklist] :
keyword[if] identifier[adapter_mode] == literal[string] :
keyword[continue]
identifier[value] = identifier[blacklist] . identifier[get] ( identifier[adapter_mode] ,{})
keyword[if] identifier[value] . identifier[get] ( literal[string] ) keyword[and] identifier[adapter_mode] keyword[not] keyword[in] identifier[protocols] :
keyword[continue]
keyword[if] identifier[interface] keyword[not] keyword[in] identifier[value] [ literal[string] ] keyword[and] keyword[not] identifier[any] (( identifier[blacklist] [ literal[string] ][ identifier[iface_type] ]== literal[string] keyword[and] identifier[iface_type] keyword[in] identifier[sys_interface_path] ) keyword[or]
( identifier[blacklist] [ literal[string] ][ identifier[iface_type] ]== literal[string] keyword[and] identifier[iface_type] == identifier[uevent_devtype] )
keyword[for] identifier[iface_type] keyword[in] identifier[value] [ literal[string] ]):
identifier[adapter_modes] +=[ identifier[adapter_mode] ]
keyword[return] identifier[adapter_modes] | def _get_possible_adapter_modes(interface, blacklist):
"""
Return possible adapter modes for a given interface using a blacklist.
:param interface: interface name
:param blacklist: given blacklist
:return: list of possible adapter modes
"""
adapter_modes = []
protocols = _load_config('lvrt', ['AdditionalNetworkProtocols'])['AdditionalNetworkProtocols'].lower()
sys_interface_path = os.readlink('/sys/class/net/{0}'.format(interface))
with salt.utils.files.fopen('/sys/class/net/{0}/uevent'.format(interface)) as uevent_file:
uevent_lines = uevent_file.readlines() # depends on [control=['with'], data=['uevent_file']]
uevent_devtype = ''
for line in uevent_lines:
if line.startswith('DEVTYPE='):
uevent_devtype = line.split('=')[1].strip()
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
for adapter_mode in blacklist:
if adapter_mode == '_':
continue # depends on [control=['if'], data=[]]
value = blacklist.get(adapter_mode, {})
if value.get('additional_protocol') and adapter_mode not in protocols:
continue # depends on [control=['if'], data=[]]
if interface not in value['name'] and (not any((blacklist['_'][iface_type] == 'sys' and iface_type in sys_interface_path or (blacklist['_'][iface_type] == 'uevent' and iface_type == uevent_devtype) for iface_type in value['type']))):
adapter_modes += [adapter_mode] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['adapter_mode']]
return adapter_modes |
def contains(self, key, value):
"""
增加查询条件,限制查询结果对象指定最短的值,包含指定字符串。在数据量比较大的情况下会比较慢。
:param key: 查询条件字段名
:param value: 需要包含的字符串
:rtype: Query
"""
self._add_condition(key, '$regex', self._quote(value))
return self | def function[contains, parameter[self, key, value]]:
constant[
增加查询条件,限制查询结果对象指定最短的值,包含指定字符串。在数据量比较大的情况下会比较慢。
:param key: 查询条件字段名
:param value: 需要包含的字符串
:rtype: Query
]
call[name[self]._add_condition, parameter[name[key], constant[$regex], call[name[self]._quote, parameter[name[value]]]]]
return[name[self]] | keyword[def] identifier[contains] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[self] . identifier[_add_condition] ( identifier[key] , literal[string] , identifier[self] . identifier[_quote] ( identifier[value] ))
keyword[return] identifier[self] | def contains(self, key, value):
"""
增加查询条件,限制查询结果对象指定最短的值,包含指定字符串。在数据量比较大的情况下会比较慢。
:param key: 查询条件字段名
:param value: 需要包含的字符串
:rtype: Query
"""
self._add_condition(key, '$regex', self._quote(value))
return self |
def cast_item(cls, item):
"""Cast list item to the appropriate tag type."""
if not isinstance(item, cls.subtype):
incompatible = isinstance(item, Base) and not any(
issubclass(cls.subtype, tag_type) and isinstance(item, tag_type)
for tag_type in cls.all_tags.values()
)
if incompatible:
raise IncompatibleItemType(item, cls.subtype)
try:
return cls.subtype(item)
except EndInstantiation:
raise ValueError('List tags without an explicit subtype must '
'either be empty or instantiated with '
'elements from which a subtype can be '
'inferred') from None
except (IncompatibleItemType, CastError):
raise
except Exception as exc:
raise CastError(item, cls.subtype) from exc
return item | def function[cast_item, parameter[cls, item]]:
constant[Cast list item to the appropriate tag type.]
if <ast.UnaryOp object at 0x7da20c76c790> begin[:]
variable[incompatible] assign[=] <ast.BoolOp object at 0x7da20c76e500>
if name[incompatible] begin[:]
<ast.Raise object at 0x7da20c76d090>
<ast.Try object at 0x7da20c76dde0>
return[name[item]] | keyword[def] identifier[cast_item] ( identifier[cls] , identifier[item] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[item] , identifier[cls] . identifier[subtype] ):
identifier[incompatible] = identifier[isinstance] ( identifier[item] , identifier[Base] ) keyword[and] keyword[not] identifier[any] (
identifier[issubclass] ( identifier[cls] . identifier[subtype] , identifier[tag_type] ) keyword[and] identifier[isinstance] ( identifier[item] , identifier[tag_type] )
keyword[for] identifier[tag_type] keyword[in] identifier[cls] . identifier[all_tags] . identifier[values] ()
)
keyword[if] identifier[incompatible] :
keyword[raise] identifier[IncompatibleItemType] ( identifier[item] , identifier[cls] . identifier[subtype] )
keyword[try] :
keyword[return] identifier[cls] . identifier[subtype] ( identifier[item] )
keyword[except] identifier[EndInstantiation] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string]
literal[string] ) keyword[from] keyword[None]
keyword[except] ( identifier[IncompatibleItemType] , identifier[CastError] ):
keyword[raise]
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
keyword[raise] identifier[CastError] ( identifier[item] , identifier[cls] . identifier[subtype] ) keyword[from] identifier[exc]
keyword[return] identifier[item] | def cast_item(cls, item):
"""Cast list item to the appropriate tag type."""
if not isinstance(item, cls.subtype):
incompatible = isinstance(item, Base) and (not any((issubclass(cls.subtype, tag_type) and isinstance(item, tag_type) for tag_type in cls.all_tags.values())))
if incompatible:
raise IncompatibleItemType(item, cls.subtype) # depends on [control=['if'], data=[]]
try:
return cls.subtype(item) # depends on [control=['try'], data=[]]
except EndInstantiation:
raise ValueError('List tags without an explicit subtype must either be empty or instantiated with elements from which a subtype can be inferred') from None # depends on [control=['except'], data=[]]
except (IncompatibleItemType, CastError):
raise # depends on [control=['except'], data=[]]
except Exception as exc:
raise CastError(item, cls.subtype) from exc # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
return item |
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = maybe_downcast_to_dtype(values, dtypes)
return self.make_block(nv)
# ndim > 1
if dtypes is None:
return self
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# operate column-by-column
# this is expensive as it splits the blocks items-by-item
def f(m, v, i):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
if dtype is not None:
v = maybe_downcast_to_dtype(v, dtype)
return v
return self.split_and_operate(None, f, False) | def function[downcast, parameter[self, dtypes]]:
constant[ try to downcast each item to the dict of dtypes if present ]
if compare[name[dtypes] is constant[False]] begin[:]
return[name[self]]
variable[values] assign[=] name[self].values
if name[self]._is_single_block begin[:]
if compare[name[dtypes] is constant[None]] begin[:]
variable[dtypes] assign[=] constant[infer]
variable[nv] assign[=] call[name[maybe_downcast_to_dtype], parameter[name[values], name[dtypes]]]
return[call[name[self].make_block, parameter[name[nv]]]]
if compare[name[dtypes] is constant[None]] begin[:]
return[name[self]]
if <ast.UnaryOp object at 0x7da20c9936d0> begin[:]
<ast.Raise object at 0x7da20c993c40>
def function[f, parameter[m, v, i]]:
if compare[name[dtypes] equal[==] constant[infer]] begin[:]
variable[dtype] assign[=] constant[infer]
if compare[name[dtype] is_not constant[None]] begin[:]
variable[v] assign[=] call[name[maybe_downcast_to_dtype], parameter[name[v], name[dtype]]]
return[name[v]]
return[call[name[self].split_and_operate, parameter[constant[None], name[f], constant[False]]]] | keyword[def] identifier[downcast] ( identifier[self] , identifier[dtypes] = keyword[None] ):
literal[string]
keyword[if] identifier[dtypes] keyword[is] keyword[False] :
keyword[return] identifier[self]
identifier[values] = identifier[self] . identifier[values]
keyword[if] identifier[self] . identifier[_is_single_block] :
keyword[if] identifier[dtypes] keyword[is] keyword[None] :
identifier[dtypes] = literal[string]
identifier[nv] = identifier[maybe_downcast_to_dtype] ( identifier[values] , identifier[dtypes] )
keyword[return] identifier[self] . identifier[make_block] ( identifier[nv] )
keyword[if] identifier[dtypes] keyword[is] keyword[None] :
keyword[return] identifier[self]
keyword[if] keyword[not] ( identifier[dtypes] == literal[string] keyword[or] identifier[isinstance] ( identifier[dtypes] , identifier[dict] )):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[def] identifier[f] ( identifier[m] , identifier[v] , identifier[i] ):
keyword[if] identifier[dtypes] == literal[string] :
identifier[dtype] = literal[string]
keyword[else] :
keyword[raise] identifier[AssertionError] ( literal[string] )
keyword[if] identifier[dtype] keyword[is] keyword[not] keyword[None] :
identifier[v] = identifier[maybe_downcast_to_dtype] ( identifier[v] , identifier[dtype] )
keyword[return] identifier[v]
keyword[return] identifier[self] . identifier[split_and_operate] ( keyword[None] , identifier[f] , keyword[False] ) | def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self # depends on [control=['if'], data=[]]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer' # depends on [control=['if'], data=['dtypes']]
nv = maybe_downcast_to_dtype(values, dtypes)
return self.make_block(nv) # depends on [control=['if'], data=[]]
# ndim > 1
if dtypes is None:
return self # depends on [control=['if'], data=[]]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as its argument") # depends on [control=['if'], data=[]]
# operate column-by-column
# this is expensive as it splits the blocks items-by-item
def f(m, v, i):
if dtypes == 'infer':
dtype = 'infer' # depends on [control=['if'], data=[]]
else:
raise AssertionError('dtypes as dict is not supported yet')
if dtype is not None:
v = maybe_downcast_to_dtype(v, dtype) # depends on [control=['if'], data=['dtype']]
return v
return self.split_and_operate(None, f, False) |
def ex_literal(val):
"""An int, float, long, bool, string, or None literal with the given
value.
"""
if val is None:
return ast.Name('None', ast.Load())
elif isinstance(val, int):
return ast.Num(val)
elif isinstance(val, bool):
return ast.Name(bytes(val), ast.Load())
elif isinstance(val, str):
return ast.Str(val)
raise TypeError(u'no literal for {0}'.format(type(val))) | def function[ex_literal, parameter[val]]:
constant[An int, float, long, bool, string, or None literal with the given
value.
]
if compare[name[val] is constant[None]] begin[:]
return[call[name[ast].Name, parameter[constant[None], call[name[ast].Load, parameter[]]]]]
<ast.Raise object at 0x7da1b26740d0> | keyword[def] identifier[ex_literal] ( identifier[val] ):
literal[string]
keyword[if] identifier[val] keyword[is] keyword[None] :
keyword[return] identifier[ast] . identifier[Name] ( literal[string] , identifier[ast] . identifier[Load] ())
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[int] ):
keyword[return] identifier[ast] . identifier[Num] ( identifier[val] )
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[bool] ):
keyword[return] identifier[ast] . identifier[Name] ( identifier[bytes] ( identifier[val] ), identifier[ast] . identifier[Load] ())
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[str] ):
keyword[return] identifier[ast] . identifier[Str] ( identifier[val] )
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[val] ))) | def ex_literal(val):
"""An int, float, long, bool, string, or None literal with the given
value.
"""
if val is None:
return ast.Name('None', ast.Load()) # depends on [control=['if'], data=[]]
elif isinstance(val, int):
return ast.Num(val) # depends on [control=['if'], data=[]]
elif isinstance(val, bool):
return ast.Name(bytes(val), ast.Load()) # depends on [control=['if'], data=[]]
elif isinstance(val, str):
return ast.Str(val) # depends on [control=['if'], data=[]]
raise TypeError(u'no literal for {0}'.format(type(val))) |
def detect_terminal(_environ=os.environ):
"""
Detect "terminal" you are using.
First, this function checks if you are in tmux, byobu, or screen.
If not it uses $COLORTERM [#]_ if defined and fallbacks to $TERM.
.. [#] So, if you are in Gnome Terminal you have "gnome-terminal"
instead of "xterm-color"".
"""
if _environ.get('TMUX'):
return 'tmux'
elif subdict_by_key_prefix(_environ, 'BYOBU'):
return 'byobu'
elif _environ.get('TERM').startswith('screen'):
return _environ['TERM']
elif _environ.get('COLORTERM'):
return _environ['COLORTERM']
else:
return _environ.get('TERM') | def function[detect_terminal, parameter[_environ]]:
constant[
Detect "terminal" you are using.
First, this function checks if you are in tmux, byobu, or screen.
If not it uses $COLORTERM [#]_ if defined and fallbacks to $TERM.
.. [#] So, if you are in Gnome Terminal you have "gnome-terminal"
instead of "xterm-color"".
]
if call[name[_environ].get, parameter[constant[TMUX]]] begin[:]
return[constant[tmux]] | keyword[def] identifier[detect_terminal] ( identifier[_environ] = identifier[os] . identifier[environ] ):
literal[string]
keyword[if] identifier[_environ] . identifier[get] ( literal[string] ):
keyword[return] literal[string]
keyword[elif] identifier[subdict_by_key_prefix] ( identifier[_environ] , literal[string] ):
keyword[return] literal[string]
keyword[elif] identifier[_environ] . identifier[get] ( literal[string] ). identifier[startswith] ( literal[string] ):
keyword[return] identifier[_environ] [ literal[string] ]
keyword[elif] identifier[_environ] . identifier[get] ( literal[string] ):
keyword[return] identifier[_environ] [ literal[string] ]
keyword[else] :
keyword[return] identifier[_environ] . identifier[get] ( literal[string] ) | def detect_terminal(_environ=os.environ):
"""
Detect "terminal" you are using.
First, this function checks if you are in tmux, byobu, or screen.
If not it uses $COLORTERM [#]_ if defined and fallbacks to $TERM.
.. [#] So, if you are in Gnome Terminal you have "gnome-terminal"
instead of "xterm-color"".
"""
if _environ.get('TMUX'):
return 'tmux' # depends on [control=['if'], data=[]]
elif subdict_by_key_prefix(_environ, 'BYOBU'):
return 'byobu' # depends on [control=['if'], data=[]]
elif _environ.get('TERM').startswith('screen'):
return _environ['TERM'] # depends on [control=['if'], data=[]]
elif _environ.get('COLORTERM'):
return _environ['COLORTERM'] # depends on [control=['if'], data=[]]
else:
return _environ.get('TERM') |
def __find_sync_range(self, messages, preamble_end: int, search_end: int):
"""
Finding the synchronization works by finding the first difference between two messages.
This is performed for all messages and the most frequent first difference is chosen
:type messages: list of Message
:param preamble_end: End of preamble = start of search
:param search_end: End of search = start of first other label
"""
possible_sync_pos = defaultdict(int)
for i, msg in enumerate(messages):
bits_i = msg.decoded_bits[preamble_end:search_end]
for j in range(i, len(messages)):
bits_j = messages[j].decoded_bits[preamble_end:search_end]
first_diff = next((k for k, (bit_i, bit_j) in enumerate(zip(bits_i, bits_j)) if bit_i != bit_j), None)
if first_diff is not None:
first_diff = preamble_end + 4 * (first_diff // 4)
if (first_diff - preamble_end) >= 4:
possible_sync_pos[(preamble_end, first_diff)] += 1
try:
sync_interval = max(possible_sync_pos, key=possible_sync_pos.__getitem__)
return sync_interval
except ValueError:
return None | def function[__find_sync_range, parameter[self, messages, preamble_end, search_end]]:
constant[
Finding the synchronization works by finding the first difference between two messages.
This is performed for all messages and the most frequent first difference is chosen
:type messages: list of Message
:param preamble_end: End of preamble = start of search
:param search_end: End of search = start of first other label
]
variable[possible_sync_pos] assign[=] call[name[defaultdict], parameter[name[int]]]
for taget[tuple[[<ast.Name object at 0x7da2044c21a0>, <ast.Name object at 0x7da2044c04f0>]]] in starred[call[name[enumerate], parameter[name[messages]]]] begin[:]
variable[bits_i] assign[=] call[name[msg].decoded_bits][<ast.Slice object at 0x7da2044c0280>]
for taget[name[j]] in starred[call[name[range], parameter[name[i], call[name[len], parameter[name[messages]]]]]] begin[:]
variable[bits_j] assign[=] call[call[name[messages]][name[j]].decoded_bits][<ast.Slice object at 0x7da2044c0400>]
variable[first_diff] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da2044c3e80>, constant[None]]]
if compare[name[first_diff] is_not constant[None]] begin[:]
variable[first_diff] assign[=] binary_operation[name[preamble_end] + binary_operation[constant[4] * binary_operation[name[first_diff] <ast.FloorDiv object at 0x7da2590d6bc0> constant[4]]]]
if compare[binary_operation[name[first_diff] - name[preamble_end]] greater_or_equal[>=] constant[4]] begin[:]
<ast.AugAssign object at 0x7da1b21e3fd0>
<ast.Try object at 0x7da1b21e10c0> | keyword[def] identifier[__find_sync_range] ( identifier[self] , identifier[messages] , identifier[preamble_end] : identifier[int] , identifier[search_end] : identifier[int] ):
literal[string]
identifier[possible_sync_pos] = identifier[defaultdict] ( identifier[int] )
keyword[for] identifier[i] , identifier[msg] keyword[in] identifier[enumerate] ( identifier[messages] ):
identifier[bits_i] = identifier[msg] . identifier[decoded_bits] [ identifier[preamble_end] : identifier[search_end] ]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[i] , identifier[len] ( identifier[messages] )):
identifier[bits_j] = identifier[messages] [ identifier[j] ]. identifier[decoded_bits] [ identifier[preamble_end] : identifier[search_end] ]
identifier[first_diff] = identifier[next] (( identifier[k] keyword[for] identifier[k] ,( identifier[bit_i] , identifier[bit_j] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[bits_i] , identifier[bits_j] )) keyword[if] identifier[bit_i] != identifier[bit_j] ), keyword[None] )
keyword[if] identifier[first_diff] keyword[is] keyword[not] keyword[None] :
identifier[first_diff] = identifier[preamble_end] + literal[int] *( identifier[first_diff] // literal[int] )
keyword[if] ( identifier[first_diff] - identifier[preamble_end] )>= literal[int] :
identifier[possible_sync_pos] [( identifier[preamble_end] , identifier[first_diff] )]+= literal[int]
keyword[try] :
identifier[sync_interval] = identifier[max] ( identifier[possible_sync_pos] , identifier[key] = identifier[possible_sync_pos] . identifier[__getitem__] )
keyword[return] identifier[sync_interval]
keyword[except] identifier[ValueError] :
keyword[return] keyword[None] | def __find_sync_range(self, messages, preamble_end: int, search_end: int):
"""
Finding the synchronization works by finding the first difference between two messages.
This is performed for all messages and the most frequent first difference is chosen
:type messages: list of Message
:param preamble_end: End of preamble = start of search
:param search_end: End of search = start of first other label
"""
possible_sync_pos = defaultdict(int)
for (i, msg) in enumerate(messages):
bits_i = msg.decoded_bits[preamble_end:search_end]
for j in range(i, len(messages)):
bits_j = messages[j].decoded_bits[preamble_end:search_end]
first_diff = next((k for (k, (bit_i, bit_j)) in enumerate(zip(bits_i, bits_j)) if bit_i != bit_j), None)
if first_diff is not None:
first_diff = preamble_end + 4 * (first_diff // 4)
if first_diff - preamble_end >= 4:
possible_sync_pos[preamble_end, first_diff] += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['first_diff']] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=[]]
try:
sync_interval = max(possible_sync_pos, key=possible_sync_pos.__getitem__)
return sync_interval # depends on [control=['try'], data=[]]
except ValueError:
return None # depends on [control=['except'], data=[]] |
def set_vm_status(self, device='FLOPPY',
boot_option='BOOT_ONCE', write_protect='YES'):
"""Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media.
"""
return self._call_method('set_vm_status', device, boot_option,
write_protect) | def function[set_vm_status, parameter[self, device, boot_option, write_protect]]:
constant[Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media.
]
return[call[name[self]._call_method, parameter[constant[set_vm_status], name[device], name[boot_option], name[write_protect]]]] | keyword[def] identifier[set_vm_status] ( identifier[self] , identifier[device] = literal[string] ,
identifier[boot_option] = literal[string] , identifier[write_protect] = literal[string] ):
literal[string]
keyword[return] identifier[self] . identifier[_call_method] ( literal[string] , identifier[device] , identifier[boot_option] ,
identifier[write_protect] ) | def set_vm_status(self, device='FLOPPY', boot_option='BOOT_ONCE', write_protect='YES'):
"""Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media.
"""
return self._call_method('set_vm_status', device, boot_option, write_protect) |
def prepare_weighted_spans(targets, # type: List[TargetExplanation]
preserve_density=None, # type: Optional[bool]
):
# type: (...) -> List[Optional[List[PreparedWeightedSpans]]]
""" Return weighted spans prepared for rendering.
Calculate a separate weight range for each different weighted
span (for each different index): each target has the same number
of weighted spans.
"""
targets_char_weights = [
[get_char_weights(ws, preserve_density=preserve_density)
for ws in t.weighted_spans.docs_weighted_spans]
if t.weighted_spans else None
for t in targets] # type: List[Optional[List[np.ndarray]]]
max_idx = max_or_0(len(ch_w or []) for ch_w in targets_char_weights)
targets_char_weights_not_None = [
cw for cw in targets_char_weights
if cw is not None] # type: List[List[np.ndarray]]
spans_weight_ranges = [
max_or_0(
abs(x) for char_weights in targets_char_weights_not_None
for x in char_weights[idx])
for idx in range(max_idx)]
return [
[PreparedWeightedSpans(ws, char_weights, weight_range)
for ws, char_weights, weight_range in zip(
t.weighted_spans.docs_weighted_spans, # type: ignore
t_char_weights,
spans_weight_ranges)]
if t_char_weights is not None else None
for t, t_char_weights in zip(targets, targets_char_weights)] | def function[prepare_weighted_spans, parameter[targets, preserve_density]]:
constant[ Return weighted spans prepared for rendering.
Calculate a separate weight range for each different weighted
span (for each different index): each target has the same number
of weighted spans.
]
variable[targets_char_weights] assign[=] <ast.ListComp object at 0x7da18ede4be0>
variable[max_idx] assign[=] call[name[max_or_0], parameter[<ast.GeneratorExp object at 0x7da18ede4d00>]]
variable[targets_char_weights_not_None] assign[=] <ast.ListComp object at 0x7da18ede7280>
variable[spans_weight_ranges] assign[=] <ast.ListComp object at 0x7da18ede7fd0>
return[<ast.ListComp object at 0x7da207f01930>] | keyword[def] identifier[prepare_weighted_spans] ( identifier[targets] ,
identifier[preserve_density] = keyword[None] ,
):
literal[string]
identifier[targets_char_weights] =[
[ identifier[get_char_weights] ( identifier[ws] , identifier[preserve_density] = identifier[preserve_density] )
keyword[for] identifier[ws] keyword[in] identifier[t] . identifier[weighted_spans] . identifier[docs_weighted_spans] ]
keyword[if] identifier[t] . identifier[weighted_spans] keyword[else] keyword[None]
keyword[for] identifier[t] keyword[in] identifier[targets] ]
identifier[max_idx] = identifier[max_or_0] ( identifier[len] ( identifier[ch_w] keyword[or] []) keyword[for] identifier[ch_w] keyword[in] identifier[targets_char_weights] )
identifier[targets_char_weights_not_None] =[
identifier[cw] keyword[for] identifier[cw] keyword[in] identifier[targets_char_weights]
keyword[if] identifier[cw] keyword[is] keyword[not] keyword[None] ]
identifier[spans_weight_ranges] =[
identifier[max_or_0] (
identifier[abs] ( identifier[x] ) keyword[for] identifier[char_weights] keyword[in] identifier[targets_char_weights_not_None]
keyword[for] identifier[x] keyword[in] identifier[char_weights] [ identifier[idx] ])
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[max_idx] )]
keyword[return] [
[ identifier[PreparedWeightedSpans] ( identifier[ws] , identifier[char_weights] , identifier[weight_range] )
keyword[for] identifier[ws] , identifier[char_weights] , identifier[weight_range] keyword[in] identifier[zip] (
identifier[t] . identifier[weighted_spans] . identifier[docs_weighted_spans] ,
identifier[t_char_weights] ,
identifier[spans_weight_ranges] )]
keyword[if] identifier[t_char_weights] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None]
keyword[for] identifier[t] , identifier[t_char_weights] keyword[in] identifier[zip] ( identifier[targets] , identifier[targets_char_weights] )] | def prepare_weighted_spans(targets, preserve_density=None): # type: List[TargetExplanation]
# type: Optional[bool]
# type: (...) -> List[Optional[List[PreparedWeightedSpans]]]
' Return weighted spans prepared for rendering.\n Calculate a separate weight range for each different weighted\n span (for each different index): each target has the same number\n of weighted spans.\n '
targets_char_weights = [[get_char_weights(ws, preserve_density=preserve_density) for ws in t.weighted_spans.docs_weighted_spans] if t.weighted_spans else None for t in targets] # type: List[Optional[List[np.ndarray]]]
max_idx = max_or_0((len(ch_w or []) for ch_w in targets_char_weights))
targets_char_weights_not_None = [cw for cw in targets_char_weights if cw is not None] # type: List[List[np.ndarray]]
spans_weight_ranges = [max_or_0((abs(x) for char_weights in targets_char_weights_not_None for x in char_weights[idx])) for idx in range(max_idx)] # type: ignore
return [[PreparedWeightedSpans(ws, char_weights, weight_range) for (ws, char_weights, weight_range) in zip(t.weighted_spans.docs_weighted_spans, t_char_weights, spans_weight_ranges)] if t_char_weights is not None else None for (t, t_char_weights) in zip(targets, targets_char_weights)] |
def follow_topic_from_config():
"""Read kafka config, then dispatch to `follow_topic`."""
config = get_config()['ResultTopicBolt']
kafka_class = import_name(config['kafka_class'])
return follow_topic(kafka_class, config['topic'], **config['kafka_init']) | def function[follow_topic_from_config, parameter[]]:
constant[Read kafka config, then dispatch to `follow_topic`.]
variable[config] assign[=] call[call[name[get_config], parameter[]]][constant[ResultTopicBolt]]
variable[kafka_class] assign[=] call[name[import_name], parameter[call[name[config]][constant[kafka_class]]]]
return[call[name[follow_topic], parameter[name[kafka_class], call[name[config]][constant[topic]]]]] | keyword[def] identifier[follow_topic_from_config] ():
literal[string]
identifier[config] = identifier[get_config] ()[ literal[string] ]
identifier[kafka_class] = identifier[import_name] ( identifier[config] [ literal[string] ])
keyword[return] identifier[follow_topic] ( identifier[kafka_class] , identifier[config] [ literal[string] ],** identifier[config] [ literal[string] ]) | def follow_topic_from_config():
"""Read kafka config, then dispatch to `follow_topic`."""
config = get_config()['ResultTopicBolt']
kafka_class = import_name(config['kafka_class'])
return follow_topic(kafka_class, config['topic'], **config['kafka_init']) |
def _get_result(self, resource):
"""
Converts the given resource to a result to be returned from the view.
Unless a custom renderer is employed, this will involve creating
a representer and using it to convert the resource to a string.
:param resource: Resource to convert.
:type resource: Object implementing
:class:`evererst.interfaces.IResource`.
:returns: :class:`pyramid.reposnse.Response` object or a dictionary
with a single key "context" mapped to the given resource (to be
passed on to a custom renderer).
"""
if self._convert_response:
self._update_response_body(resource)
result = self.request.response
else:
result = dict(context=resource)
return result | def function[_get_result, parameter[self, resource]]:
constant[
Converts the given resource to a result to be returned from the view.
Unless a custom renderer is employed, this will involve creating
a representer and using it to convert the resource to a string.
:param resource: Resource to convert.
:type resource: Object implementing
:class:`evererst.interfaces.IResource`.
:returns: :class:`pyramid.reposnse.Response` object or a dictionary
with a single key "context" mapped to the given resource (to be
passed on to a custom renderer).
]
if name[self]._convert_response begin[:]
call[name[self]._update_response_body, parameter[name[resource]]]
variable[result] assign[=] name[self].request.response
return[name[result]] | keyword[def] identifier[_get_result] ( identifier[self] , identifier[resource] ):
literal[string]
keyword[if] identifier[self] . identifier[_convert_response] :
identifier[self] . identifier[_update_response_body] ( identifier[resource] )
identifier[result] = identifier[self] . identifier[request] . identifier[response]
keyword[else] :
identifier[result] = identifier[dict] ( identifier[context] = identifier[resource] )
keyword[return] identifier[result] | def _get_result(self, resource):
"""
Converts the given resource to a result to be returned from the view.
Unless a custom renderer is employed, this will involve creating
a representer and using it to convert the resource to a string.
:param resource: Resource to convert.
:type resource: Object implementing
:class:`evererst.interfaces.IResource`.
:returns: :class:`pyramid.reposnse.Response` object or a dictionary
with a single key "context" mapped to the given resource (to be
passed on to a custom renderer).
"""
if self._convert_response:
self._update_response_body(resource)
result = self.request.response # depends on [control=['if'], data=[]]
else:
result = dict(context=resource)
return result |
def overwrite_color(self, string, color, prefix=False, reset=False):
"""
:param string: input
:param color: new color
:param prefix: if it also should start the color to at the beginning.
:param reset: if it also should end the color at the ending.
:type reset: bool | int | str
:return:
"""
if isinstance(color, int):
color = self.prepare_color(color)
# end if
prefix = color if prefix else ""
if isinstance(reset, int):
reset = self.prepare_color(reset)
elif isinstance(reset, bool):
reset = self.formatter.color_off if reset else ""
# end if
return (
prefix +
string.replace(self.formatter.color_off, self.formatter.color_off+color).replace(self.formatter.all_off, self.formatter.all_off + color) +
reset
) | def function[overwrite_color, parameter[self, string, color, prefix, reset]]:
constant[
:param string: input
:param color: new color
:param prefix: if it also should start the color to at the beginning.
:param reset: if it also should end the color at the ending.
:type reset: bool | int | str
:return:
]
if call[name[isinstance], parameter[name[color], name[int]]] begin[:]
variable[color] assign[=] call[name[self].prepare_color, parameter[name[color]]]
variable[prefix] assign[=] <ast.IfExp object at 0x7da1b06c9060>
if call[name[isinstance], parameter[name[reset], name[int]]] begin[:]
variable[reset] assign[=] call[name[self].prepare_color, parameter[name[reset]]]
return[binary_operation[binary_operation[name[prefix] + call[call[name[string].replace, parameter[name[self].formatter.color_off, binary_operation[name[self].formatter.color_off + name[color]]]].replace, parameter[name[self].formatter.all_off, binary_operation[name[self].formatter.all_off + name[color]]]]] + name[reset]]] | keyword[def] identifier[overwrite_color] ( identifier[self] , identifier[string] , identifier[color] , identifier[prefix] = keyword[False] , identifier[reset] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[color] , identifier[int] ):
identifier[color] = identifier[self] . identifier[prepare_color] ( identifier[color] )
identifier[prefix] = identifier[color] keyword[if] identifier[prefix] keyword[else] literal[string]
keyword[if] identifier[isinstance] ( identifier[reset] , identifier[int] ):
identifier[reset] = identifier[self] . identifier[prepare_color] ( identifier[reset] )
keyword[elif] identifier[isinstance] ( identifier[reset] , identifier[bool] ):
identifier[reset] = identifier[self] . identifier[formatter] . identifier[color_off] keyword[if] identifier[reset] keyword[else] literal[string]
keyword[return] (
identifier[prefix] +
identifier[string] . identifier[replace] ( identifier[self] . identifier[formatter] . identifier[color_off] , identifier[self] . identifier[formatter] . identifier[color_off] + identifier[color] ). identifier[replace] ( identifier[self] . identifier[formatter] . identifier[all_off] , identifier[self] . identifier[formatter] . identifier[all_off] + identifier[color] )+
identifier[reset]
) | def overwrite_color(self, string, color, prefix=False, reset=False):
"""
:param string: input
:param color: new color
:param prefix: if it also should start the color to at the beginning.
:param reset: if it also should end the color at the ending.
:type reset: bool | int | str
:return:
"""
if isinstance(color, int):
color = self.prepare_color(color) # depends on [control=['if'], data=[]]
# end if
prefix = color if prefix else ''
if isinstance(reset, int):
reset = self.prepare_color(reset) # depends on [control=['if'], data=[]]
elif isinstance(reset, bool):
reset = self.formatter.color_off if reset else '' # depends on [control=['if'], data=[]]
# end if
return prefix + string.replace(self.formatter.color_off, self.formatter.color_off + color).replace(self.formatter.all_off, self.formatter.all_off + color) + reset |
def export_pac(self, xpac, fpha, famp, desc):
"""Write PAC analysis data to CSV."""
filename = splitext(self.filename)[0] + '_pac.csv'
heading_row_1 = ['Segment index',
'Start time',
'End time',
'Duration',
'Stitch',
'Stage',
'Cycle',
'Event type',
'Channel',
]
spacer = [''] * (len(heading_row_1) - 1)
heading_row_2 = []
for fp in fpha:
fp_str = str(fp[0]) + '-' + str(fp[1])
for fa in famp:
fa_str = str(fa[0]) + '-' + str(fa[1])
heading_row_2.append(fp_str + '_' + fa_str + '_pac')
if 'pval' in xpac[list(xpac.keys())[0]].keys():
heading_row_3 = [x[:-4] + '_pval' for x in heading_row_2]
heading_row_2.extend(heading_row_3)
with open(filename, 'w', newline='') as f:
lg.info('Writing to ' + str(filename))
csv_file = writer(f)
csv_file.writerow(['Wonambi v{}'.format(__version__)])
csv_file.writerow(heading_row_1 + heading_row_2)
csv_file.writerow(['Mean'] + spacer + list(desc['mean']))
csv_file.writerow(['SD'] + spacer + list(desc['sd']))
csv_file.writerow(['Mean of ln'] + spacer + list(desc['mean_log']))
csv_file.writerow(['SD of ln'] + spacer + list(desc['sd_log']))
idx = 0
for chan in xpac.keys():
for i, j in enumerate(xpac[chan]['times']):
idx += 1
cyc = None
if xpac[chan]['cycle'][i] is not None:
cyc = xpac[chan]['cycle'][i][2]
data_row = list(ravel(xpac[chan]['data'][i, :, :]))
pval_row = []
if 'pval' in xpac[chan]:
pval_row = list(ravel(xpac[chan]['pval'][i, :, :]))
csv_file.writerow([idx,
j[0],
j[1],
xpac[chan]['duration'][i],
xpac[chan]['n_stitch'][i],
xpac[chan]['stage'][i],
cyc,
xpac[chan]['name'][i],
chan,
] + data_row + pval_row) | def function[export_pac, parameter[self, xpac, fpha, famp, desc]]:
constant[Write PAC analysis data to CSV.]
variable[filename] assign[=] binary_operation[call[call[name[splitext], parameter[name[self].filename]]][constant[0]] + constant[_pac.csv]]
variable[heading_row_1] assign[=] list[[<ast.Constant object at 0x7da1b0ef0f40>, <ast.Constant object at 0x7da1b0ef03d0>, <ast.Constant object at 0x7da1b0ef01f0>, <ast.Constant object at 0x7da1b0ef02e0>, <ast.Constant object at 0x7da1b0ef00d0>, <ast.Constant object at 0x7da1b0ef0130>, <ast.Constant object at 0x7da1b0ef0160>, <ast.Constant object at 0x7da1b0ef02b0>, <ast.Constant object at 0x7da1b0ef05b0>]]
variable[spacer] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0ef0340>]] * binary_operation[call[name[len], parameter[name[heading_row_1]]] - constant[1]]]
variable[heading_row_2] assign[=] list[[]]
for taget[name[fp]] in starred[name[fpha]] begin[:]
variable[fp_str] assign[=] binary_operation[binary_operation[call[name[str], parameter[call[name[fp]][constant[0]]]] + constant[-]] + call[name[str], parameter[call[name[fp]][constant[1]]]]]
for taget[name[fa]] in starred[name[famp]] begin[:]
variable[fa_str] assign[=] binary_operation[binary_operation[call[name[str], parameter[call[name[fa]][constant[0]]]] + constant[-]] + call[name[str], parameter[call[name[fa]][constant[1]]]]]
call[name[heading_row_2].append, parameter[binary_operation[binary_operation[binary_operation[name[fp_str] + constant[_]] + name[fa_str]] + constant[_pac]]]]
if compare[constant[pval] in call[call[name[xpac]][call[call[name[list], parameter[call[name[xpac].keys, parameter[]]]]][constant[0]]].keys, parameter[]]] begin[:]
variable[heading_row_3] assign[=] <ast.ListComp object at 0x7da1b0ddc7c0>
call[name[heading_row_2].extend, parameter[name[heading_row_3]]]
with call[name[open], parameter[name[filename], constant[w]]] begin[:]
call[name[lg].info, parameter[binary_operation[constant[Writing to ] + call[name[str], parameter[name[filename]]]]]]
variable[csv_file] assign[=] call[name[writer], parameter[name[f]]]
call[name[csv_file].writerow, parameter[list[[<ast.Call object at 0x7da1b0ddd270>]]]]
call[name[csv_file].writerow, parameter[binary_operation[name[heading_row_1] + name[heading_row_2]]]]
call[name[csv_file].writerow, parameter[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da1b0ddc2b0>]] + name[spacer]] + call[name[list], parameter[call[name[desc]][constant[mean]]]]]]]
call[name[csv_file].writerow, parameter[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da1b0ddc370>]] + name[spacer]] + call[name[list], parameter[call[name[desc]][constant[sd]]]]]]]
call[name[csv_file].writerow, parameter[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da1b0ec28f0>]] + name[spacer]] + call[name[list], parameter[call[name[desc]][constant[mean_log]]]]]]]
call[name[csv_file].writerow, parameter[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da1b0ec2440>]] + name[spacer]] + call[name[list], parameter[call[name[desc]][constant[sd_log]]]]]]]
variable[idx] assign[=] constant[0]
for taget[name[chan]] in starred[call[name[xpac].keys, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0ec1c00>, <ast.Name object at 0x7da1b0ec0d00>]]] in starred[call[name[enumerate], parameter[call[call[name[xpac]][name[chan]]][constant[times]]]]] begin[:]
<ast.AugAssign object at 0x7da1b0ec2e30>
variable[cyc] assign[=] constant[None]
if compare[call[call[call[name[xpac]][name[chan]]][constant[cycle]]][name[i]] is_not constant[None]] begin[:]
variable[cyc] assign[=] call[call[call[call[name[xpac]][name[chan]]][constant[cycle]]][name[i]]][constant[2]]
variable[data_row] assign[=] call[name[list], parameter[call[name[ravel], parameter[call[call[call[name[xpac]][name[chan]]][constant[data]]][tuple[[<ast.Name object at 0x7da1b0ec0850>, <ast.Slice object at 0x7da1b0ec3550>, <ast.Slice object at 0x7da1b0ec2350>]]]]]]]
variable[pval_row] assign[=] list[[]]
if compare[constant[pval] in call[name[xpac]][name[chan]]] begin[:]
variable[pval_row] assign[=] call[name[list], parameter[call[name[ravel], parameter[call[call[call[name[xpac]][name[chan]]][constant[pval]]][tuple[[<ast.Name object at 0x7da1b0ec3dc0>, <ast.Slice object at 0x7da1b0ec26e0>, <ast.Slice object at 0x7da1b0ec1de0>]]]]]]]
call[name[csv_file].writerow, parameter[binary_operation[binary_operation[list[[<ast.Name object at 0x7da1b0ec1900>, <ast.Subscript object at 0x7da1b0ec2b60>, <ast.Subscript object at 0x7da1b0ec3910>, <ast.Subscript object at 0x7da1b0ec2920>, <ast.Subscript object at 0x7da1b0ec1330>, <ast.Subscript object at 0x7da1b0ec2c20>, <ast.Name object at 0x7da1b0ec0580>, <ast.Subscript object at 0x7da1b0ec34f0>, <ast.Name object at 0x7da1b0ec12a0>]] + name[data_row]] + name[pval_row]]]] | keyword[def] identifier[export_pac] ( identifier[self] , identifier[xpac] , identifier[fpha] , identifier[famp] , identifier[desc] ):
literal[string]
identifier[filename] = identifier[splitext] ( identifier[self] . identifier[filename] )[ literal[int] ]+ literal[string]
identifier[heading_row_1] =[ literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
]
identifier[spacer] =[ literal[string] ]*( identifier[len] ( identifier[heading_row_1] )- literal[int] )
identifier[heading_row_2] =[]
keyword[for] identifier[fp] keyword[in] identifier[fpha] :
identifier[fp_str] = identifier[str] ( identifier[fp] [ literal[int] ])+ literal[string] + identifier[str] ( identifier[fp] [ literal[int] ])
keyword[for] identifier[fa] keyword[in] identifier[famp] :
identifier[fa_str] = identifier[str] ( identifier[fa] [ literal[int] ])+ literal[string] + identifier[str] ( identifier[fa] [ literal[int] ])
identifier[heading_row_2] . identifier[append] ( identifier[fp_str] + literal[string] + identifier[fa_str] + literal[string] )
keyword[if] literal[string] keyword[in] identifier[xpac] [ identifier[list] ( identifier[xpac] . identifier[keys] ())[ literal[int] ]]. identifier[keys] ():
identifier[heading_row_3] =[ identifier[x] [:- literal[int] ]+ literal[string] keyword[for] identifier[x] keyword[in] identifier[heading_row_2] ]
identifier[heading_row_2] . identifier[extend] ( identifier[heading_row_3] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] , identifier[newline] = literal[string] ) keyword[as] identifier[f] :
identifier[lg] . identifier[info] ( literal[string] + identifier[str] ( identifier[filename] ))
identifier[csv_file] = identifier[writer] ( identifier[f] )
identifier[csv_file] . identifier[writerow] ([ literal[string] . identifier[format] ( identifier[__version__] )])
identifier[csv_file] . identifier[writerow] ( identifier[heading_row_1] + identifier[heading_row_2] )
identifier[csv_file] . identifier[writerow] ([ literal[string] ]+ identifier[spacer] + identifier[list] ( identifier[desc] [ literal[string] ]))
identifier[csv_file] . identifier[writerow] ([ literal[string] ]+ identifier[spacer] + identifier[list] ( identifier[desc] [ literal[string] ]))
identifier[csv_file] . identifier[writerow] ([ literal[string] ]+ identifier[spacer] + identifier[list] ( identifier[desc] [ literal[string] ]))
identifier[csv_file] . identifier[writerow] ([ literal[string] ]+ identifier[spacer] + identifier[list] ( identifier[desc] [ literal[string] ]))
identifier[idx] = literal[int]
keyword[for] identifier[chan] keyword[in] identifier[xpac] . identifier[keys] ():
keyword[for] identifier[i] , identifier[j] keyword[in] identifier[enumerate] ( identifier[xpac] [ identifier[chan] ][ literal[string] ]):
identifier[idx] += literal[int]
identifier[cyc] = keyword[None]
keyword[if] identifier[xpac] [ identifier[chan] ][ literal[string] ][ identifier[i] ] keyword[is] keyword[not] keyword[None] :
identifier[cyc] = identifier[xpac] [ identifier[chan] ][ literal[string] ][ identifier[i] ][ literal[int] ]
identifier[data_row] = identifier[list] ( identifier[ravel] ( identifier[xpac] [ identifier[chan] ][ literal[string] ][ identifier[i] ,:,:]))
identifier[pval_row] =[]
keyword[if] literal[string] keyword[in] identifier[xpac] [ identifier[chan] ]:
identifier[pval_row] = identifier[list] ( identifier[ravel] ( identifier[xpac] [ identifier[chan] ][ literal[string] ][ identifier[i] ,:,:]))
identifier[csv_file] . identifier[writerow] ([ identifier[idx] ,
identifier[j] [ literal[int] ],
identifier[j] [ literal[int] ],
identifier[xpac] [ identifier[chan] ][ literal[string] ][ identifier[i] ],
identifier[xpac] [ identifier[chan] ][ literal[string] ][ identifier[i] ],
identifier[xpac] [ identifier[chan] ][ literal[string] ][ identifier[i] ],
identifier[cyc] ,
identifier[xpac] [ identifier[chan] ][ literal[string] ][ identifier[i] ],
identifier[chan] ,
]+ identifier[data_row] + identifier[pval_row] ) | def export_pac(self, xpac, fpha, famp, desc):
"""Write PAC analysis data to CSV."""
filename = splitext(self.filename)[0] + '_pac.csv'
heading_row_1 = ['Segment index', 'Start time', 'End time', 'Duration', 'Stitch', 'Stage', 'Cycle', 'Event type', 'Channel']
spacer = [''] * (len(heading_row_1) - 1)
heading_row_2 = []
for fp in fpha:
fp_str = str(fp[0]) + '-' + str(fp[1])
for fa in famp:
fa_str = str(fa[0]) + '-' + str(fa[1])
heading_row_2.append(fp_str + '_' + fa_str + '_pac') # depends on [control=['for'], data=['fa']] # depends on [control=['for'], data=['fp']]
if 'pval' in xpac[list(xpac.keys())[0]].keys():
heading_row_3 = [x[:-4] + '_pval' for x in heading_row_2]
heading_row_2.extend(heading_row_3) # depends on [control=['if'], data=[]]
with open(filename, 'w', newline='') as f:
lg.info('Writing to ' + str(filename))
csv_file = writer(f)
csv_file.writerow(['Wonambi v{}'.format(__version__)])
csv_file.writerow(heading_row_1 + heading_row_2)
csv_file.writerow(['Mean'] + spacer + list(desc['mean']))
csv_file.writerow(['SD'] + spacer + list(desc['sd']))
csv_file.writerow(['Mean of ln'] + spacer + list(desc['mean_log']))
csv_file.writerow(['SD of ln'] + spacer + list(desc['sd_log']))
idx = 0
for chan in xpac.keys():
for (i, j) in enumerate(xpac[chan]['times']):
idx += 1
cyc = None
if xpac[chan]['cycle'][i] is not None:
cyc = xpac[chan]['cycle'][i][2] # depends on [control=['if'], data=[]]
data_row = list(ravel(xpac[chan]['data'][i, :, :]))
pval_row = []
if 'pval' in xpac[chan]:
pval_row = list(ravel(xpac[chan]['pval'][i, :, :])) # depends on [control=['if'], data=[]]
csv_file.writerow([idx, j[0], j[1], xpac[chan]['duration'][i], xpac[chan]['n_stitch'][i], xpac[chan]['stage'][i], cyc, xpac[chan]['name'][i], chan] + data_row + pval_row) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['chan']] # depends on [control=['with'], data=['f']] |
def _clean_kwargs(self, kwargs, fn):
'''
Remove unexpected keyword arguments from the
set of received keyword arguments.
'''
# Do not do the cleaning if server config
# doesnt ask to ignore
if not self.server.IGNORE_UNEXPECTED_KWARGS:
return kwargs
expected_kwargs = set(inspect.getargspec(fn).args)
got_kwargs = set(kwargs.keys())
unexpected_kwargs = got_kwargs - expected_kwargs
for k in unexpected_kwargs:
del kwargs[k]
return kwargs | def function[_clean_kwargs, parameter[self, kwargs, fn]]:
constant[
Remove unexpected keyword arguments from the
set of received keyword arguments.
]
if <ast.UnaryOp object at 0x7da1b1b0dea0> begin[:]
return[name[kwargs]]
variable[expected_kwargs] assign[=] call[name[set], parameter[call[name[inspect].getargspec, parameter[name[fn]]].args]]
variable[got_kwargs] assign[=] call[name[set], parameter[call[name[kwargs].keys, parameter[]]]]
variable[unexpected_kwargs] assign[=] binary_operation[name[got_kwargs] - name[expected_kwargs]]
for taget[name[k]] in starred[name[unexpected_kwargs]] begin[:]
<ast.Delete object at 0x7da1b1803b50>
return[name[kwargs]] | keyword[def] identifier[_clean_kwargs] ( identifier[self] , identifier[kwargs] , identifier[fn] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[server] . identifier[IGNORE_UNEXPECTED_KWARGS] :
keyword[return] identifier[kwargs]
identifier[expected_kwargs] = identifier[set] ( identifier[inspect] . identifier[getargspec] ( identifier[fn] ). identifier[args] )
identifier[got_kwargs] = identifier[set] ( identifier[kwargs] . identifier[keys] ())
identifier[unexpected_kwargs] = identifier[got_kwargs] - identifier[expected_kwargs]
keyword[for] identifier[k] keyword[in] identifier[unexpected_kwargs] :
keyword[del] identifier[kwargs] [ identifier[k] ]
keyword[return] identifier[kwargs] | def _clean_kwargs(self, kwargs, fn):
"""
Remove unexpected keyword arguments from the
set of received keyword arguments.
"""
# Do not do the cleaning if server config
# doesnt ask to ignore
if not self.server.IGNORE_UNEXPECTED_KWARGS:
return kwargs # depends on [control=['if'], data=[]]
expected_kwargs = set(inspect.getargspec(fn).args)
got_kwargs = set(kwargs.keys())
unexpected_kwargs = got_kwargs - expected_kwargs
for k in unexpected_kwargs:
del kwargs[k] # depends on [control=['for'], data=['k']]
return kwargs |
def marvcli_develop_server(port, public):
"""Run development webserver.
ATTENTION: By default it is only served on localhost. To run it
within a container and access it from the outside, you need to
forward the port and tell it to listen on all IPs instead of only
localhost.
"""
from flask_cors import CORS
app = create_app(push=False)
app.site.load_for_web()
CORS(app)
class IPDBMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
from ipdb import launch_ipdb_on_exception
with launch_ipdb_on_exception():
appiter = self.app(environ, start_response)
for item in appiter:
yield item
app.debug = True
if IPDB:
app.wsgi_app = IPDBMiddleware(app.wsgi_app)
app.run(use_debugger=False,
use_reloader=False,
host=('0.0.0.0' if public else '127.0.0.1'),
port=port,
threaded=False)
else:
app.run(host=('0.0.0.0' if public else '127.0.0.1'),
port=port,
reloader_type='watchdog',
threaded=False) | def function[marvcli_develop_server, parameter[port, public]]:
constant[Run development webserver.
ATTENTION: By default it is only served on localhost. To run it
within a container and access it from the outside, you need to
forward the port and tell it to listen on all IPs instead of only
localhost.
]
from relative_module[flask_cors] import module[CORS]
variable[app] assign[=] call[name[create_app], parameter[]]
call[name[app].site.load_for_web, parameter[]]
call[name[CORS], parameter[name[app]]]
class class[IPDBMiddleware, parameter[]] begin[:]
def function[__init__, parameter[self, app]]:
name[self].app assign[=] name[app]
def function[__call__, parameter[self, environ, start_response]]:
from relative_module[ipdb] import module[launch_ipdb_on_exception]
with call[name[launch_ipdb_on_exception], parameter[]] begin[:]
variable[appiter] assign[=] call[name[self].app, parameter[name[environ], name[start_response]]]
for taget[name[item]] in starred[name[appiter]] begin[:]
<ast.Yield object at 0x7da1b2633490>
name[app].debug assign[=] constant[True]
if name[IPDB] begin[:]
name[app].wsgi_app assign[=] call[name[IPDBMiddleware], parameter[name[app].wsgi_app]]
call[name[app].run, parameter[]] | keyword[def] identifier[marvcli_develop_server] ( identifier[port] , identifier[public] ):
literal[string]
keyword[from] identifier[flask_cors] keyword[import] identifier[CORS]
identifier[app] = identifier[create_app] ( identifier[push] = keyword[False] )
identifier[app] . identifier[site] . identifier[load_for_web] ()
identifier[CORS] ( identifier[app] )
keyword[class] identifier[IPDBMiddleware] ( identifier[object] ):
keyword[def] identifier[__init__] ( identifier[self] , identifier[app] ):
identifier[self] . identifier[app] = identifier[app]
keyword[def] identifier[__call__] ( identifier[self] , identifier[environ] , identifier[start_response] ):
keyword[from] identifier[ipdb] keyword[import] identifier[launch_ipdb_on_exception]
keyword[with] identifier[launch_ipdb_on_exception] ():
identifier[appiter] = identifier[self] . identifier[app] ( identifier[environ] , identifier[start_response] )
keyword[for] identifier[item] keyword[in] identifier[appiter] :
keyword[yield] identifier[item]
identifier[app] . identifier[debug] = keyword[True]
keyword[if] identifier[IPDB] :
identifier[app] . identifier[wsgi_app] = identifier[IPDBMiddleware] ( identifier[app] . identifier[wsgi_app] )
identifier[app] . identifier[run] ( identifier[use_debugger] = keyword[False] ,
identifier[use_reloader] = keyword[False] ,
identifier[host] =( literal[string] keyword[if] identifier[public] keyword[else] literal[string] ),
identifier[port] = identifier[port] ,
identifier[threaded] = keyword[False] )
keyword[else] :
identifier[app] . identifier[run] ( identifier[host] =( literal[string] keyword[if] identifier[public] keyword[else] literal[string] ),
identifier[port] = identifier[port] ,
identifier[reloader_type] = literal[string] ,
identifier[threaded] = keyword[False] ) | def marvcli_develop_server(port, public):
"""Run development webserver.
ATTENTION: By default it is only served on localhost. To run it
within a container and access it from the outside, you need to
forward the port and tell it to listen on all IPs instead of only
localhost.
"""
from flask_cors import CORS
app = create_app(push=False)
app.site.load_for_web()
CORS(app)
class IPDBMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
from ipdb import launch_ipdb_on_exception
with launch_ipdb_on_exception():
appiter = self.app(environ, start_response)
for item in appiter:
yield item # depends on [control=['for'], data=['item']] # depends on [control=['with'], data=[]]
app.debug = True
if IPDB:
app.wsgi_app = IPDBMiddleware(app.wsgi_app)
app.run(use_debugger=False, use_reloader=False, host='0.0.0.0' if public else '127.0.0.1', port=port, threaded=False) # depends on [control=['if'], data=[]]
else:
app.run(host='0.0.0.0' if public else '127.0.0.1', port=port, reloader_type='watchdog', threaded=False) |
async def get_scene(self, scene_id, from_cache=True) -> Scene:
"""Get a scene resource instance.
:raises a ResourceNotFoundException when no scene found.
:raises a PvApiError when something is wrong with the hub.
"""
if not from_cache:
await self.get_scenes()
for _scene in self.scenes:
if _scene.id == scene_id:
return _scene
raise ResourceNotFoundException("Scene not found scene_id: {}".format(scene_id)) | <ast.AsyncFunctionDef object at 0x7da1b0a808b0> | keyword[async] keyword[def] identifier[get_scene] ( identifier[self] , identifier[scene_id] , identifier[from_cache] = keyword[True] )-> identifier[Scene] :
literal[string]
keyword[if] keyword[not] identifier[from_cache] :
keyword[await] identifier[self] . identifier[get_scenes] ()
keyword[for] identifier[_scene] keyword[in] identifier[self] . identifier[scenes] :
keyword[if] identifier[_scene] . identifier[id] == identifier[scene_id] :
keyword[return] identifier[_scene]
keyword[raise] identifier[ResourceNotFoundException] ( literal[string] . identifier[format] ( identifier[scene_id] )) | async def get_scene(self, scene_id, from_cache=True) -> Scene:
"""Get a scene resource instance.
:raises a ResourceNotFoundException when no scene found.
:raises a PvApiError when something is wrong with the hub.
"""
if not from_cache:
await self.get_scenes() # depends on [control=['if'], data=[]]
for _scene in self.scenes:
if _scene.id == scene_id:
return _scene # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_scene']]
raise ResourceNotFoundException('Scene not found scene_id: {}'.format(scene_id)) |
def validate_payload_len(self, payload):
"""Validate payload len."""
if not hasattr(self, "PAYLOAD_LEN"):
# No fixed payload len, e.g. within FrameGetSceneListNotification
return
# pylint: disable=no-member
if len(payload) != self.PAYLOAD_LEN:
raise PyVLXException("Invalid payload len", expected_len=self.PAYLOAD_LEN, current_len=len(payload), frame_type=type(self).__name__) | def function[validate_payload_len, parameter[self, payload]]:
constant[Validate payload len.]
if <ast.UnaryOp object at 0x7da207f00c40> begin[:]
return[None]
if compare[call[name[len], parameter[name[payload]]] not_equal[!=] name[self].PAYLOAD_LEN] begin[:]
<ast.Raise object at 0x7da207f02e00> | keyword[def] identifier[validate_payload_len] ( identifier[self] , identifier[payload] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[return]
keyword[if] identifier[len] ( identifier[payload] )!= identifier[self] . identifier[PAYLOAD_LEN] :
keyword[raise] identifier[PyVLXException] ( literal[string] , identifier[expected_len] = identifier[self] . identifier[PAYLOAD_LEN] , identifier[current_len] = identifier[len] ( identifier[payload] ), identifier[frame_type] = identifier[type] ( identifier[self] ). identifier[__name__] ) | def validate_payload_len(self, payload):
"""Validate payload len."""
if not hasattr(self, 'PAYLOAD_LEN'):
# No fixed payload len, e.g. within FrameGetSceneListNotification
return # depends on [control=['if'], data=[]]
# pylint: disable=no-member
if len(payload) != self.PAYLOAD_LEN:
raise PyVLXException('Invalid payload len', expected_len=self.PAYLOAD_LEN, current_len=len(payload), frame_type=type(self).__name__) # depends on [control=['if'], data=[]] |
def name(self):
"""
Name of document as seen on ProvStore
"""
if self._name:
return self._name
elif not self.abstract:
return self.read_meta()._name
raise EmptyDocumentException() | def function[name, parameter[self]]:
constant[
Name of document as seen on ProvStore
]
if name[self]._name begin[:]
return[name[self]._name]
<ast.Raise object at 0x7da204961fc0> | keyword[def] identifier[name] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_name] :
keyword[return] identifier[self] . identifier[_name]
keyword[elif] keyword[not] identifier[self] . identifier[abstract] :
keyword[return] identifier[self] . identifier[read_meta] (). identifier[_name]
keyword[raise] identifier[EmptyDocumentException] () | def name(self):
"""
Name of document as seen on ProvStore
"""
if self._name:
return self._name # depends on [control=['if'], data=[]]
elif not self.abstract:
return self.read_meta()._name # depends on [control=['if'], data=[]]
raise EmptyDocumentException() |
def iter_chunks(l, size):
"""
Returns a generator containing chunks of *size* of a list, integer or generator *l*. A *size*
smaller than 1 results in no chunking at all.
"""
if isinstance(l, six.integer_types):
l = six.moves.range(l)
if is_lazy_iterable(l):
if size < 1:
yield list(l)
else:
chunk = []
for elem in l:
if len(chunk) < size:
chunk.append(elem)
else:
yield chunk
chunk = [elem]
else:
if chunk:
yield chunk
else:
if size < 1:
yield l
else:
for i in six.moves.range(0, len(l), size):
yield l[i:i + size] | def function[iter_chunks, parameter[l, size]]:
constant[
Returns a generator containing chunks of *size* of a list, integer or generator *l*. A *size*
smaller than 1 results in no chunking at all.
]
if call[name[isinstance], parameter[name[l], name[six].integer_types]] begin[:]
variable[l] assign[=] call[name[six].moves.range, parameter[name[l]]]
if call[name[is_lazy_iterable], parameter[name[l]]] begin[:]
if compare[name[size] less[<] constant[1]] begin[:]
<ast.Yield object at 0x7da1b05541c0> | keyword[def] identifier[iter_chunks] ( identifier[l] , identifier[size] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[l] , identifier[six] . identifier[integer_types] ):
identifier[l] = identifier[six] . identifier[moves] . identifier[range] ( identifier[l] )
keyword[if] identifier[is_lazy_iterable] ( identifier[l] ):
keyword[if] identifier[size] < literal[int] :
keyword[yield] identifier[list] ( identifier[l] )
keyword[else] :
identifier[chunk] =[]
keyword[for] identifier[elem] keyword[in] identifier[l] :
keyword[if] identifier[len] ( identifier[chunk] )< identifier[size] :
identifier[chunk] . identifier[append] ( identifier[elem] )
keyword[else] :
keyword[yield] identifier[chunk]
identifier[chunk] =[ identifier[elem] ]
keyword[else] :
keyword[if] identifier[chunk] :
keyword[yield] identifier[chunk]
keyword[else] :
keyword[if] identifier[size] < literal[int] :
keyword[yield] identifier[l]
keyword[else] :
keyword[for] identifier[i] keyword[in] identifier[six] . identifier[moves] . identifier[range] ( literal[int] , identifier[len] ( identifier[l] ), identifier[size] ):
keyword[yield] identifier[l] [ identifier[i] : identifier[i] + identifier[size] ] | def iter_chunks(l, size):
"""
Returns a generator containing chunks of *size* of a list, integer or generator *l*. A *size*
smaller than 1 results in no chunking at all.
"""
if isinstance(l, six.integer_types):
l = six.moves.range(l) # depends on [control=['if'], data=[]]
if is_lazy_iterable(l):
if size < 1:
yield list(l) # depends on [control=['if'], data=[]]
else:
chunk = []
for elem in l:
if len(chunk) < size:
chunk.append(elem) # depends on [control=['if'], data=[]]
else:
yield chunk
chunk = [elem] # depends on [control=['for'], data=['elem']]
else:
if chunk:
yield chunk # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif size < 1:
yield l # depends on [control=['if'], data=[]]
else:
for i in six.moves.range(0, len(l), size):
yield l[i:i + size] # depends on [control=['for'], data=['i']] |
def likelihood_data_given_model(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_marg=False):
"""
computes the likelihood of the data given a model
This is specified with the non-linear parameters and a linear inversion and prior marginalisation.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images)
"""
# generate image
im_sim_list, model_error_list, cov_matrix, param = self.image_linear_solve(kwargs_lens, kwargs_source,
kwargs_lens_light, kwargs_ps,
inv_bool=source_marg)
# compute X^2
logL = 0
index = 0
for i in range(self._num_bands):
if self._compute_bool[i] is True:
logL += self._imageModel_list[i].Data.log_likelihood(im_sim_list[index], self._imageModel_list[i].ImageNumerics.mask, model_error_list[index])
index += 1
if cov_matrix is not None and source_marg:
marg_const = de_lens.marginalisation_const(cov_matrix)
logL += marg_const
return logL | def function[likelihood_data_given_model, parameter[self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_marg]]:
constant[
computes the likelihood of the data given a model
This is specified with the non-linear parameters and a linear inversion and prior marginalisation.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images)
]
<ast.Tuple object at 0x7da1b04a5390> assign[=] call[name[self].image_linear_solve, parameter[name[kwargs_lens], name[kwargs_source], name[kwargs_lens_light], name[kwargs_ps]]]
variable[logL] assign[=] constant[0]
variable[index] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[name[self]._num_bands]]] begin[:]
if compare[call[name[self]._compute_bool][name[i]] is constant[True]] begin[:]
<ast.AugAssign object at 0x7da1b04a68c0>
<ast.AugAssign object at 0x7da1b04a52a0>
if <ast.BoolOp object at 0x7da1b04a5330> begin[:]
variable[marg_const] assign[=] call[name[de_lens].marginalisation_const, parameter[name[cov_matrix]]]
<ast.AugAssign object at 0x7da1b04a7be0>
return[name[logL]] | keyword[def] identifier[likelihood_data_given_model] ( identifier[self] , identifier[kwargs_lens] , identifier[kwargs_source] , identifier[kwargs_lens_light] , identifier[kwargs_ps] , identifier[source_marg] = keyword[False] ):
literal[string]
identifier[im_sim_list] , identifier[model_error_list] , identifier[cov_matrix] , identifier[param] = identifier[self] . identifier[image_linear_solve] ( identifier[kwargs_lens] , identifier[kwargs_source] ,
identifier[kwargs_lens_light] , identifier[kwargs_ps] ,
identifier[inv_bool] = identifier[source_marg] )
identifier[logL] = literal[int]
identifier[index] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[_num_bands] ):
keyword[if] identifier[self] . identifier[_compute_bool] [ identifier[i] ] keyword[is] keyword[True] :
identifier[logL] += identifier[self] . identifier[_imageModel_list] [ identifier[i] ]. identifier[Data] . identifier[log_likelihood] ( identifier[im_sim_list] [ identifier[index] ], identifier[self] . identifier[_imageModel_list] [ identifier[i] ]. identifier[ImageNumerics] . identifier[mask] , identifier[model_error_list] [ identifier[index] ])
identifier[index] += literal[int]
keyword[if] identifier[cov_matrix] keyword[is] keyword[not] keyword[None] keyword[and] identifier[source_marg] :
identifier[marg_const] = identifier[de_lens] . identifier[marginalisation_const] ( identifier[cov_matrix] )
identifier[logL] += identifier[marg_const]
keyword[return] identifier[logL] | def likelihood_data_given_model(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_marg=False):
"""
computes the likelihood of the data given a model
This is specified with the non-linear parameters and a linear inversion and prior marginalisation.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images)
"""
# generate image
(im_sim_list, model_error_list, cov_matrix, param) = self.image_linear_solve(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, inv_bool=source_marg)
# compute X^2
logL = 0
index = 0
for i in range(self._num_bands):
if self._compute_bool[i] is True:
logL += self._imageModel_list[i].Data.log_likelihood(im_sim_list[index], self._imageModel_list[i].ImageNumerics.mask, model_error_list[index])
index += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if cov_matrix is not None and source_marg:
marg_const = de_lens.marginalisation_const(cov_matrix)
logL += marg_const # depends on [control=['if'], data=[]]
return logL |
def unwrap(self):
"""
Returns a GLFWimage object.
"""
pixels = [[[int(c) for c in p] for p in l] for l in self.pixels_array]
return self.GLFWimage(self.width, self.height, pixels) | def function[unwrap, parameter[self]]:
constant[
Returns a GLFWimage object.
]
variable[pixels] assign[=] <ast.ListComp object at 0x7da2045664a0>
return[call[name[self].GLFWimage, parameter[name[self].width, name[self].height, name[pixels]]]] | keyword[def] identifier[unwrap] ( identifier[self] ):
literal[string]
identifier[pixels] =[[[ identifier[int] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[p] ] keyword[for] identifier[p] keyword[in] identifier[l] ] keyword[for] identifier[l] keyword[in] identifier[self] . identifier[pixels_array] ]
keyword[return] identifier[self] . identifier[GLFWimage] ( identifier[self] . identifier[width] , identifier[self] . identifier[height] , identifier[pixels] ) | def unwrap(self):
"""
Returns a GLFWimage object.
"""
pixels = [[[int(c) for c in p] for p in l] for l in self.pixels_array]
return self.GLFWimage(self.width, self.height, pixels) |
def processHierarchical(self):
"""Main process for hierarchical segmentation.
Returns
-------
est_idxs : list
List containing estimated times for each layer in the hierarchy
as np.arrays
est_labels : list
List containing estimated labels for each layer in the hierarchy
as np.arrays
"""
# Preprocess to obtain features, times, and input boundary indeces
F, dur = features(self.file_struct, self.annot_beats, self.framesync)
try:
# Load and apply transform
W = load_transform(self.config["transform"])
F = W.dot(F)
# Get Segments
kmin, kmax = get_num_segs(dur)
# Run algorithm layer by layer
est_idxs = []
est_labels = []
for k in range(kmin, kmax):
S, cost = get_k_segments(F, k)
est_idxs.append(S)
est_labels.append(np.ones(len(S) - 1) * -1)
# Make sure that the first and last boundaries are included
assert est_idxs[-1][0] == 0 and \
est_idxs[-1][-1] == F.shape[1] - 1, "Layer %d does not " \
"start or end in the right frame(s)." % k
# Post process layer
est_idxs[-1], est_labels[-1] = \
self._postprocess(est_idxs[-1], est_labels[-1])
except:
# The audio file is too short, only beginning and end
logging.warning("Audio file too short! "
"Only start and end boundaries.")
est_idxs = [np.array([0, F.shape[1] - 1])]
est_labels = [np.ones(1) * -1]
return est_idxs, est_labels | def function[processHierarchical, parameter[self]]:
constant[Main process for hierarchical segmentation.
Returns
-------
est_idxs : list
List containing estimated times for each layer in the hierarchy
as np.arrays
est_labels : list
List containing estimated labels for each layer in the hierarchy
as np.arrays
]
<ast.Tuple object at 0x7da1b02d9b40> assign[=] call[name[features], parameter[name[self].file_struct, name[self].annot_beats, name[self].framesync]]
<ast.Try object at 0x7da1b02d8bb0>
return[tuple[[<ast.Name object at 0x7da1b0214bb0>, <ast.Name object at 0x7da1b02167a0>]]] | keyword[def] identifier[processHierarchical] ( identifier[self] ):
literal[string]
identifier[F] , identifier[dur] = identifier[features] ( identifier[self] . identifier[file_struct] , identifier[self] . identifier[annot_beats] , identifier[self] . identifier[framesync] )
keyword[try] :
identifier[W] = identifier[load_transform] ( identifier[self] . identifier[config] [ literal[string] ])
identifier[F] = identifier[W] . identifier[dot] ( identifier[F] )
identifier[kmin] , identifier[kmax] = identifier[get_num_segs] ( identifier[dur] )
identifier[est_idxs] =[]
identifier[est_labels] =[]
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[kmin] , identifier[kmax] ):
identifier[S] , identifier[cost] = identifier[get_k_segments] ( identifier[F] , identifier[k] )
identifier[est_idxs] . identifier[append] ( identifier[S] )
identifier[est_labels] . identifier[append] ( identifier[np] . identifier[ones] ( identifier[len] ( identifier[S] )- literal[int] )*- literal[int] )
keyword[assert] identifier[est_idxs] [- literal[int] ][ literal[int] ]== literal[int] keyword[and] identifier[est_idxs] [- literal[int] ][- literal[int] ]== identifier[F] . identifier[shape] [ literal[int] ]- literal[int] , literal[string] literal[string] % identifier[k]
identifier[est_idxs] [- literal[int] ], identifier[est_labels] [- literal[int] ]= identifier[self] . identifier[_postprocess] ( identifier[est_idxs] [- literal[int] ], identifier[est_labels] [- literal[int] ])
keyword[except] :
identifier[logging] . identifier[warning] ( literal[string]
literal[string] )
identifier[est_idxs] =[ identifier[np] . identifier[array] ([ literal[int] , identifier[F] . identifier[shape] [ literal[int] ]- literal[int] ])]
identifier[est_labels] =[ identifier[np] . identifier[ones] ( literal[int] )*- literal[int] ]
keyword[return] identifier[est_idxs] , identifier[est_labels] | def processHierarchical(self):
"""Main process for hierarchical segmentation.
Returns
-------
est_idxs : list
List containing estimated times for each layer in the hierarchy
as np.arrays
est_labels : list
List containing estimated labels for each layer in the hierarchy
as np.arrays
"""
# Preprocess to obtain features, times, and input boundary indeces
(F, dur) = features(self.file_struct, self.annot_beats, self.framesync)
try:
# Load and apply transform
W = load_transform(self.config['transform'])
F = W.dot(F)
# Get Segments
(kmin, kmax) = get_num_segs(dur)
# Run algorithm layer by layer
est_idxs = []
est_labels = []
for k in range(kmin, kmax):
(S, cost) = get_k_segments(F, k)
est_idxs.append(S)
est_labels.append(np.ones(len(S) - 1) * -1)
# Make sure that the first and last boundaries are included
assert est_idxs[-1][0] == 0 and est_idxs[-1][-1] == F.shape[1] - 1, 'Layer %d does not start or end in the right frame(s).' % k
# Post process layer
(est_idxs[-1], est_labels[-1]) = self._postprocess(est_idxs[-1], est_labels[-1]) # depends on [control=['for'], data=['k']] # depends on [control=['try'], data=[]]
except:
# The audio file is too short, only beginning and end
logging.warning('Audio file too short! Only start and end boundaries.')
est_idxs = [np.array([0, F.shape[1] - 1])]
est_labels = [np.ones(1) * -1] # depends on [control=['except'], data=[]]
return (est_idxs, est_labels) |
def savings_score(y_true, y_pred, cost_mat):
#TODO: update description
"""Savings score.
This function calculates the savings cost of using y_pred on y_true with
cost-matrix cost-mat, as the difference of y_pred and the cost_loss of a naive
classification model.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or label indicator matrix
Predicted labels, as returned by a classifier.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
score : float
Savings of a using y_pred on y_true with cost-matrix cost-mat
The best performance is 1.
References
----------
.. [1] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten,
`"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining,
677-685, 2014.
See also
--------
cost_loss
Examples
--------
>>> import numpy as np
>>> from costcla.metrics import savings_score, cost_loss
>>> y_pred = [0, 1, 0, 0]
>>> y_true = [0, 1, 1, 0]
>>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]])
>>> savings_score(y_true, y_pred, cost_mat)
0.5
"""
#TODO: Check consistency of cost_mat
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
n_samples = len(y_true)
# Calculate the cost of naive prediction
cost_base = min(cost_loss(y_true, np.zeros(n_samples), cost_mat),
cost_loss(y_true, np.ones(n_samples), cost_mat))
cost = cost_loss(y_true, y_pred, cost_mat)
return 1.0 - cost / cost_base | def function[savings_score, parameter[y_true, y_pred, cost_mat]]:
constant[Savings score.
This function calculates the savings cost of using y_pred on y_true with
cost-matrix cost-mat, as the difference of y_pred and the cost_loss of a naive
classification model.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or label indicator matrix
Predicted labels, as returned by a classifier.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
Returns
-------
score : float
Savings of a using y_pred on y_true with cost-matrix cost-mat
The best performance is 1.
References
----------
.. [1] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten,
`"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining,
677-685, 2014.
See also
--------
cost_loss
Examples
--------
>>> import numpy as np
>>> from costcla.metrics import savings_score, cost_loss
>>> y_pred = [0, 1, 0, 0]
>>> y_true = [0, 1, 1, 0]
>>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]])
>>> savings_score(y_true, y_pred, cost_mat)
0.5
]
variable[y_true] assign[=] call[name[column_or_1d], parameter[name[y_true]]]
variable[y_pred] assign[=] call[name[column_or_1d], parameter[name[y_pred]]]
variable[n_samples] assign[=] call[name[len], parameter[name[y_true]]]
variable[cost_base] assign[=] call[name[min], parameter[call[name[cost_loss], parameter[name[y_true], call[name[np].zeros, parameter[name[n_samples]]], name[cost_mat]]], call[name[cost_loss], parameter[name[y_true], call[name[np].ones, parameter[name[n_samples]]], name[cost_mat]]]]]
variable[cost] assign[=] call[name[cost_loss], parameter[name[y_true], name[y_pred], name[cost_mat]]]
return[binary_operation[constant[1.0] - binary_operation[name[cost] / name[cost_base]]]] | keyword[def] identifier[savings_score] ( identifier[y_true] , identifier[y_pred] , identifier[cost_mat] ):
literal[string]
identifier[y_true] = identifier[column_or_1d] ( identifier[y_true] )
identifier[y_pred] = identifier[column_or_1d] ( identifier[y_pred] )
identifier[n_samples] = identifier[len] ( identifier[y_true] )
identifier[cost_base] = identifier[min] ( identifier[cost_loss] ( identifier[y_true] , identifier[np] . identifier[zeros] ( identifier[n_samples] ), identifier[cost_mat] ),
identifier[cost_loss] ( identifier[y_true] , identifier[np] . identifier[ones] ( identifier[n_samples] ), identifier[cost_mat] ))
identifier[cost] = identifier[cost_loss] ( identifier[y_true] , identifier[y_pred] , identifier[cost_mat] )
keyword[return] literal[int] - identifier[cost] / identifier[cost_base] | def savings_score(y_true, y_pred, cost_mat):
#TODO: update description
'Savings score.\n\n This function calculates the savings cost of using y_pred on y_true with\n cost-matrix cost-mat, as the difference of y_pred and the cost_loss of a naive\n classification model.\n\n Parameters\n ----------\n y_true : array-like or label indicator matrix\n Ground truth (correct) labels.\n\n y_pred : array-like or label indicator matrix\n Predicted labels, as returned by a classifier.\n\n cost_mat : array-like of shape = [n_samples, 4]\n Cost matrix of the classification problem\n Where the columns represents the costs of: false positives, false negatives,\n true positives and true negatives, for each example.\n\n Returns\n -------\n score : float\n Savings of a using y_pred on y_true with cost-matrix cost-mat\n\n The best performance is 1.\n\n References\n ----------\n .. [1] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten,\n `"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining,\n 677-685, 2014.\n\n See also\n --------\n cost_loss\n\n Examples\n --------\n >>> import numpy as np\n >>> from costcla.metrics import savings_score, cost_loss\n >>> y_pred = [0, 1, 0, 0]\n >>> y_true = [0, 1, 1, 0]\n >>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]])\n >>> savings_score(y_true, y_pred, cost_mat)\n 0.5\n '
#TODO: Check consistency of cost_mat
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
n_samples = len(y_true)
# Calculate the cost of naive prediction
cost_base = min(cost_loss(y_true, np.zeros(n_samples), cost_mat), cost_loss(y_true, np.ones(n_samples), cost_mat))
cost = cost_loss(y_true, y_pred, cost_mat)
return 1.0 - cost / cost_base |
def rebin(a, *args):
"""See http://scipy-cookbook.readthedocs.io/items/Rebinning.html
Note: integer division in the computation of 'factor' has been
included to avoid the following runtime message:
VisibleDeprecationWarning: using a non-integer number instead of
an integer will result in an error in the future
from __future__ import division
"""
shape = a.shape
len_shape = len(shape)
factor = np.asarray(shape) // np.asarray(args)
ev_list = ['a.reshape('] + \
['args[%d], factor[%d], ' % (i, i) for i in range(len_shape)] + \
[')'] + ['.mean(%d)' % (i+1) for i in range(len_shape)]
# print(''.join(ev_list))
return eval(''.join(ev_list)) | def function[rebin, parameter[a]]:
constant[See http://scipy-cookbook.readthedocs.io/items/Rebinning.html
Note: integer division in the computation of 'factor' has been
included to avoid the following runtime message:
VisibleDeprecationWarning: using a non-integer number instead of
an integer will result in an error in the future
from __future__ import division
]
variable[shape] assign[=] name[a].shape
variable[len_shape] assign[=] call[name[len], parameter[name[shape]]]
variable[factor] assign[=] binary_operation[call[name[np].asarray, parameter[name[shape]]] <ast.FloorDiv object at 0x7da2590d6bc0> call[name[np].asarray, parameter[name[args]]]]
variable[ev_list] assign[=] binary_operation[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da1b2428850>]] + <ast.ListComp object at 0x7da1b242b910>] + list[[<ast.Constant object at 0x7da1b242b700>]]] + <ast.ListComp object at 0x7da1b24282e0>]
return[call[name[eval], parameter[call[constant[].join, parameter[name[ev_list]]]]]] | keyword[def] identifier[rebin] ( identifier[a] ,* identifier[args] ):
literal[string]
identifier[shape] = identifier[a] . identifier[shape]
identifier[len_shape] = identifier[len] ( identifier[shape] )
identifier[factor] = identifier[np] . identifier[asarray] ( identifier[shape] )// identifier[np] . identifier[asarray] ( identifier[args] )
identifier[ev_list] =[ literal[string] ]+[ literal[string] %( identifier[i] , identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len_shape] )]+[ literal[string] ]+[ literal[string] %( identifier[i] + literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len_shape] )]
keyword[return] identifier[eval] ( literal[string] . identifier[join] ( identifier[ev_list] )) | def rebin(a, *args):
"""See http://scipy-cookbook.readthedocs.io/items/Rebinning.html
Note: integer division in the computation of 'factor' has been
included to avoid the following runtime message:
VisibleDeprecationWarning: using a non-integer number instead of
an integer will result in an error in the future
from __future__ import division
"""
shape = a.shape
len_shape = len(shape)
factor = np.asarray(shape) // np.asarray(args)
ev_list = ['a.reshape('] + ['args[%d], factor[%d], ' % (i, i) for i in range(len_shape)] + [')'] + ['.mean(%d)' % (i + 1) for i in range(len_shape)]
# print(''.join(ev_list))
return eval(''.join(ev_list)) |
def parse_helpfull_output(help_output, regex=FLAG_HELP_RE_PY):
"""Parses the output of --helpfull.
Args:
help_output: str, the full output of --helpfull.
Returns:
A set of flags that are valid flags.
"""
valid_flags = set()
for _, no_prefix, flag_name in regex.findall(help_output):
valid_flags.add('--' + flag_name)
if no_prefix:
valid_flags.add('--no' + flag_name)
return valid_flags | def function[parse_helpfull_output, parameter[help_output, regex]]:
constant[Parses the output of --helpfull.
Args:
help_output: str, the full output of --helpfull.
Returns:
A set of flags that are valid flags.
]
variable[valid_flags] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20e9568f0>, <ast.Name object at 0x7da20e954d60>, <ast.Name object at 0x7da20e955870>]]] in starred[call[name[regex].findall, parameter[name[help_output]]]] begin[:]
call[name[valid_flags].add, parameter[binary_operation[constant[--] + name[flag_name]]]]
if name[no_prefix] begin[:]
call[name[valid_flags].add, parameter[binary_operation[constant[--no] + name[flag_name]]]]
return[name[valid_flags]] | keyword[def] identifier[parse_helpfull_output] ( identifier[help_output] , identifier[regex] = identifier[FLAG_HELP_RE_PY] ):
literal[string]
identifier[valid_flags] = identifier[set] ()
keyword[for] identifier[_] , identifier[no_prefix] , identifier[flag_name] keyword[in] identifier[regex] . identifier[findall] ( identifier[help_output] ):
identifier[valid_flags] . identifier[add] ( literal[string] + identifier[flag_name] )
keyword[if] identifier[no_prefix] :
identifier[valid_flags] . identifier[add] ( literal[string] + identifier[flag_name] )
keyword[return] identifier[valid_flags] | def parse_helpfull_output(help_output, regex=FLAG_HELP_RE_PY):
"""Parses the output of --helpfull.
Args:
help_output: str, the full output of --helpfull.
Returns:
A set of flags that are valid flags.
"""
valid_flags = set()
for (_, no_prefix, flag_name) in regex.findall(help_output):
valid_flags.add('--' + flag_name)
if no_prefix:
valid_flags.add('--no' + flag_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return valid_flags |
def open(self, verbose):
"""
open the serial port using the configuration data
returns a reference to this instance
"""
# open a serial port
if verbose:
print('\nOpening Arduino Serial port %s ' % self.port_id)
try:
# in case the port is already open, let's close it and then
# reopen it
self.arduino.close()
time.sleep(1)
self.arduino.open()
time.sleep(1)
return self.arduino
except Exception:
# opened failed - will report back to caller
raise | def function[open, parameter[self, verbose]]:
constant[
open the serial port using the configuration data
returns a reference to this instance
]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[
Opening Arduino Serial port %s ] <ast.Mod object at 0x7da2590d6920> name[self].port_id]]]
<ast.Try object at 0x7da20e954160> | keyword[def] identifier[open] ( identifier[self] , identifier[verbose] ):
literal[string]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] % identifier[self] . identifier[port_id] )
keyword[try] :
identifier[self] . identifier[arduino] . identifier[close] ()
identifier[time] . identifier[sleep] ( literal[int] )
identifier[self] . identifier[arduino] . identifier[open] ()
identifier[time] . identifier[sleep] ( literal[int] )
keyword[return] identifier[self] . identifier[arduino]
keyword[except] identifier[Exception] :
keyword[raise] | def open(self, verbose):
"""
open the serial port using the configuration data
returns a reference to this instance
"""
# open a serial port
if verbose:
print('\nOpening Arduino Serial port %s ' % self.port_id) # depends on [control=['if'], data=[]]
try:
# in case the port is already open, let's close it and then
# reopen it
self.arduino.close()
time.sleep(1)
self.arduino.open()
time.sleep(1)
return self.arduino # depends on [control=['try'], data=[]]
except Exception:
# opened failed - will report back to caller
raise # depends on [control=['except'], data=[]] |
def websocket_session(func: typing.Callable) -> ASGIApp:
"""
Takes a coroutine `func(session)`, and returns an ASGI application.
"""
# assert asyncio.iscoroutinefunction(func), "WebSocket endpoints must be async"
async def app(scope: Scope, receive: Receive, send: Send) -> None:
session = WebSocket(scope, receive=receive, send=send)
await func(session)
return app | def function[websocket_session, parameter[func]]:
constant[
Takes a coroutine `func(session)`, and returns an ASGI application.
]
<ast.AsyncFunctionDef object at 0x7da1b000c310>
return[name[app]] | keyword[def] identifier[websocket_session] ( identifier[func] : identifier[typing] . identifier[Callable] )-> identifier[ASGIApp] :
literal[string]
keyword[async] keyword[def] identifier[app] ( identifier[scope] : identifier[Scope] , identifier[receive] : identifier[Receive] , identifier[send] : identifier[Send] )-> keyword[None] :
identifier[session] = identifier[WebSocket] ( identifier[scope] , identifier[receive] = identifier[receive] , identifier[send] = identifier[send] )
keyword[await] identifier[func] ( identifier[session] )
keyword[return] identifier[app] | def websocket_session(func: typing.Callable) -> ASGIApp:
"""
Takes a coroutine `func(session)`, and returns an ASGI application.
"""
# assert asyncio.iscoroutinefunction(func), "WebSocket endpoints must be async"
async def app(scope: Scope, receive: Receive, send: Send) -> None:
session = WebSocket(scope, receive=receive, send=send)
await func(session)
return app |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.