code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def DeleteAddress(self, script_hash):
"""
Deletes an address from the wallet (includes watch-only addresses).
Args:
script_hash (UInt160): a bytearray (len 20) representing the public key.
Returns:
tuple:
bool: True if address removed, False otherwise.
list: a list of any ``neo.Wallet.Coin`` objects to be removed from the wallet.
"""
coin_keys_toremove = []
coins_to_remove = []
for key, coinref in self._coins.items():
if coinref.Output.ScriptHash.ToBytes() == script_hash.ToBytes():
coin_keys_toremove.append(key)
coins_to_remove.append(coinref)
for k in coin_keys_toremove:
del self._coins[k]
ok = False
if script_hash.ToBytes() in self._contracts.keys():
ok = True
del self._contracts[script_hash.ToBytes()]
elif script_hash in self._watch_only:
ok = True
self._watch_only.remove(script_hash)
return ok, coins_to_remove | def function[DeleteAddress, parameter[self, script_hash]]:
constant[
Deletes an address from the wallet (includes watch-only addresses).
Args:
script_hash (UInt160): a bytearray (len 20) representing the public key.
Returns:
tuple:
bool: True if address removed, False otherwise.
list: a list of any ``neo.Wallet.Coin`` objects to be removed from the wallet.
]
variable[coin_keys_toremove] assign[=] list[[]]
variable[coins_to_remove] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da204623a90>, <ast.Name object at 0x7da204622b00>]]] in starred[call[name[self]._coins.items, parameter[]]] begin[:]
if compare[call[name[coinref].Output.ScriptHash.ToBytes, parameter[]] equal[==] call[name[script_hash].ToBytes, parameter[]]] begin[:]
call[name[coin_keys_toremove].append, parameter[name[key]]]
call[name[coins_to_remove].append, parameter[name[coinref]]]
for taget[name[k]] in starred[name[coin_keys_toremove]] begin[:]
<ast.Delete object at 0x7da18dc9beb0>
variable[ok] assign[=] constant[False]
if compare[call[name[script_hash].ToBytes, parameter[]] in call[name[self]._contracts.keys, parameter[]]] begin[:]
variable[ok] assign[=] constant[True]
<ast.Delete object at 0x7da18dc9a560>
return[tuple[[<ast.Name object at 0x7da18dc98610>, <ast.Name object at 0x7da18dc99db0>]]] | keyword[def] identifier[DeleteAddress] ( identifier[self] , identifier[script_hash] ):
literal[string]
identifier[coin_keys_toremove] =[]
identifier[coins_to_remove] =[]
keyword[for] identifier[key] , identifier[coinref] keyword[in] identifier[self] . identifier[_coins] . identifier[items] ():
keyword[if] identifier[coinref] . identifier[Output] . identifier[ScriptHash] . identifier[ToBytes] ()== identifier[script_hash] . identifier[ToBytes] ():
identifier[coin_keys_toremove] . identifier[append] ( identifier[key] )
identifier[coins_to_remove] . identifier[append] ( identifier[coinref] )
keyword[for] identifier[k] keyword[in] identifier[coin_keys_toremove] :
keyword[del] identifier[self] . identifier[_coins] [ identifier[k] ]
identifier[ok] = keyword[False]
keyword[if] identifier[script_hash] . identifier[ToBytes] () keyword[in] identifier[self] . identifier[_contracts] . identifier[keys] ():
identifier[ok] = keyword[True]
keyword[del] identifier[self] . identifier[_contracts] [ identifier[script_hash] . identifier[ToBytes] ()]
keyword[elif] identifier[script_hash] keyword[in] identifier[self] . identifier[_watch_only] :
identifier[ok] = keyword[True]
identifier[self] . identifier[_watch_only] . identifier[remove] ( identifier[script_hash] )
keyword[return] identifier[ok] , identifier[coins_to_remove] | def DeleteAddress(self, script_hash):
"""
Deletes an address from the wallet (includes watch-only addresses).
Args:
script_hash (UInt160): a bytearray (len 20) representing the public key.
Returns:
tuple:
bool: True if address removed, False otherwise.
list: a list of any ``neo.Wallet.Coin`` objects to be removed from the wallet.
"""
coin_keys_toremove = []
coins_to_remove = []
for (key, coinref) in self._coins.items():
if coinref.Output.ScriptHash.ToBytes() == script_hash.ToBytes():
coin_keys_toremove.append(key)
coins_to_remove.append(coinref) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
for k in coin_keys_toremove:
del self._coins[k] # depends on [control=['for'], data=['k']]
ok = False
if script_hash.ToBytes() in self._contracts.keys():
ok = True
del self._contracts[script_hash.ToBytes()] # depends on [control=['if'], data=[]]
elif script_hash in self._watch_only:
ok = True
self._watch_only.remove(script_hash) # depends on [control=['if'], data=['script_hash']]
return (ok, coins_to_remove) |
def empty(self, duration):
'''Create an empty jams.Annotation for this task.
This method should be overridden by derived classes.
Parameters
----------
duration : int >= 0
Duration of the annotation
'''
return jams.Annotation(namespace=self.namespace, time=0, duration=0) | def function[empty, parameter[self, duration]]:
constant[Create an empty jams.Annotation for this task.
This method should be overridden by derived classes.
Parameters
----------
duration : int >= 0
Duration of the annotation
]
return[call[name[jams].Annotation, parameter[]]] | keyword[def] identifier[empty] ( identifier[self] , identifier[duration] ):
literal[string]
keyword[return] identifier[jams] . identifier[Annotation] ( identifier[namespace] = identifier[self] . identifier[namespace] , identifier[time] = literal[int] , identifier[duration] = literal[int] ) | def empty(self, duration):
"""Create an empty jams.Annotation for this task.
This method should be overridden by derived classes.
Parameters
----------
duration : int >= 0
Duration of the annotation
"""
return jams.Annotation(namespace=self.namespace, time=0, duration=0) |
def extend(self, other):
"""
Appends the segmentlists from other to the corresponding
segmentlists in self, adding new segmentslists to self as
needed.
"""
for key, value in other.iteritems():
if key not in self:
self[key] = _shallowcopy(value)
else:
self[key].extend(value) | def function[extend, parameter[self, other]]:
constant[
Appends the segmentlists from other to the corresponding
segmentlists in self, adding new segmentslists to self as
needed.
]
for taget[tuple[[<ast.Name object at 0x7da18bccaa40>, <ast.Name object at 0x7da18bcc83d0>]]] in starred[call[name[other].iteritems, parameter[]]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self]] begin[:]
call[name[self]][name[key]] assign[=] call[name[_shallowcopy], parameter[name[value]]] | keyword[def] identifier[extend] ( identifier[self] , identifier[other] ):
literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[other] . identifier[iteritems] ():
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] :
identifier[self] [ identifier[key] ]= identifier[_shallowcopy] ( identifier[value] )
keyword[else] :
identifier[self] [ identifier[key] ]. identifier[extend] ( identifier[value] ) | def extend(self, other):
"""
Appends the segmentlists from other to the corresponding
segmentlists in self, adding new segmentslists to self as
needed.
"""
for (key, value) in other.iteritems():
if key not in self:
self[key] = _shallowcopy(value) # depends on [control=['if'], data=['key', 'self']]
else:
self[key].extend(value) # depends on [control=['for'], data=[]] |
def get_obj_module(qualname):
"""Get a module/class/attribute and its original module by qualname"""
modname = qualname
classname = None
attrname = None
while modname not in sys.modules:
attrname = classname
modname, classname = modname.rsplit('.', 1)
# retrieve object and find original module name
if classname:
cls = getattr(sys.modules[modname], classname)
modname = cls.__module__
obj = getattr(cls, attrname) if attrname else cls
else:
obj = None
return obj, sys.modules[modname] | def function[get_obj_module, parameter[qualname]]:
constant[Get a module/class/attribute and its original module by qualname]
variable[modname] assign[=] name[qualname]
variable[classname] assign[=] constant[None]
variable[attrname] assign[=] constant[None]
while compare[name[modname] <ast.NotIn object at 0x7da2590d7190> name[sys].modules] begin[:]
variable[attrname] assign[=] name[classname]
<ast.Tuple object at 0x7da20e9b0760> assign[=] call[name[modname].rsplit, parameter[constant[.], constant[1]]]
if name[classname] begin[:]
variable[cls] assign[=] call[name[getattr], parameter[call[name[sys].modules][name[modname]], name[classname]]]
variable[modname] assign[=] name[cls].__module__
variable[obj] assign[=] <ast.IfExp object at 0x7da1b0e2d780>
return[tuple[[<ast.Name object at 0x7da1b0e2ded0>, <ast.Subscript object at 0x7da1b0e2e890>]]] | keyword[def] identifier[get_obj_module] ( identifier[qualname] ):
literal[string]
identifier[modname] = identifier[qualname]
identifier[classname] = keyword[None]
identifier[attrname] = keyword[None]
keyword[while] identifier[modname] keyword[not] keyword[in] identifier[sys] . identifier[modules] :
identifier[attrname] = identifier[classname]
identifier[modname] , identifier[classname] = identifier[modname] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[if] identifier[classname] :
identifier[cls] = identifier[getattr] ( identifier[sys] . identifier[modules] [ identifier[modname] ], identifier[classname] )
identifier[modname] = identifier[cls] . identifier[__module__]
identifier[obj] = identifier[getattr] ( identifier[cls] , identifier[attrname] ) keyword[if] identifier[attrname] keyword[else] identifier[cls]
keyword[else] :
identifier[obj] = keyword[None]
keyword[return] identifier[obj] , identifier[sys] . identifier[modules] [ identifier[modname] ] | def get_obj_module(qualname):
"""Get a module/class/attribute and its original module by qualname"""
modname = qualname
classname = None
attrname = None
while modname not in sys.modules:
attrname = classname
(modname, classname) = modname.rsplit('.', 1) # depends on [control=['while'], data=['modname']]
# retrieve object and find original module name
if classname:
cls = getattr(sys.modules[modname], classname)
modname = cls.__module__
obj = getattr(cls, attrname) if attrname else cls # depends on [control=['if'], data=[]]
else:
obj = None
return (obj, sys.modules[modname]) |
def cli(ctx, group_id, new_name):
"""Update the name of a group
Output:
a dictionary containing group information
"""
return ctx.gi.groups.update_group(group_id, new_name) | def function[cli, parameter[ctx, group_id, new_name]]:
constant[Update the name of a group
Output:
a dictionary containing group information
]
return[call[name[ctx].gi.groups.update_group, parameter[name[group_id], name[new_name]]]] | keyword[def] identifier[cli] ( identifier[ctx] , identifier[group_id] , identifier[new_name] ):
literal[string]
keyword[return] identifier[ctx] . identifier[gi] . identifier[groups] . identifier[update_group] ( identifier[group_id] , identifier[new_name] ) | def cli(ctx, group_id, new_name):
"""Update the name of a group
Output:
a dictionary containing group information
"""
return ctx.gi.groups.update_group(group_id, new_name) |
def dump(obj, fp):
'''Serialize an object representing the ARFF document to a given file-like
object.
:param obj: a dictionary.
:param fp: a file-like object.
'''
encoder = ArffEncoder()
generator = encoder.iter_encode(obj)
last_row = next(generator)
for row in generator:
fp.write(last_row + u'\n')
last_row = row
fp.write(last_row)
return fp | def function[dump, parameter[obj, fp]]:
constant[Serialize an object representing the ARFF document to a given file-like
object.
:param obj: a dictionary.
:param fp: a file-like object.
]
variable[encoder] assign[=] call[name[ArffEncoder], parameter[]]
variable[generator] assign[=] call[name[encoder].iter_encode, parameter[name[obj]]]
variable[last_row] assign[=] call[name[next], parameter[name[generator]]]
for taget[name[row]] in starred[name[generator]] begin[:]
call[name[fp].write, parameter[binary_operation[name[last_row] + constant[
]]]]
variable[last_row] assign[=] name[row]
call[name[fp].write, parameter[name[last_row]]]
return[name[fp]] | keyword[def] identifier[dump] ( identifier[obj] , identifier[fp] ):
literal[string]
identifier[encoder] = identifier[ArffEncoder] ()
identifier[generator] = identifier[encoder] . identifier[iter_encode] ( identifier[obj] )
identifier[last_row] = identifier[next] ( identifier[generator] )
keyword[for] identifier[row] keyword[in] identifier[generator] :
identifier[fp] . identifier[write] ( identifier[last_row] + literal[string] )
identifier[last_row] = identifier[row]
identifier[fp] . identifier[write] ( identifier[last_row] )
keyword[return] identifier[fp] | def dump(obj, fp):
"""Serialize an object representing the ARFF document to a given file-like
object.
:param obj: a dictionary.
:param fp: a file-like object.
"""
encoder = ArffEncoder()
generator = encoder.iter_encode(obj)
last_row = next(generator)
for row in generator:
fp.write(last_row + u'\n')
last_row = row # depends on [control=['for'], data=['row']]
fp.write(last_row)
return fp |
def _init_metadata(self, **kwargs):
"""Initialize form metadata"""
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._cognitive_process_default = self._mdata['cognitive_process']['default_id_values'][0]
self._assessment_default = self._mdata['assessment']['default_id_values'][0]
self._knowledge_category_default = self._mdata['knowledge_category']['default_id_values'][0] | def function[_init_metadata, parameter[self]]:
constant[Initialize form metadata]
call[name[osid_objects].OsidObjectForm._init_metadata, parameter[name[self]]]
name[self]._cognitive_process_default assign[=] call[call[call[name[self]._mdata][constant[cognitive_process]]][constant[default_id_values]]][constant[0]]
name[self]._assessment_default assign[=] call[call[call[name[self]._mdata][constant[assessment]]][constant[default_id_values]]][constant[0]]
name[self]._knowledge_category_default assign[=] call[call[call[name[self]._mdata][constant[knowledge_category]]][constant[default_id_values]]][constant[0]] | keyword[def] identifier[_init_metadata] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[osid_objects] . identifier[OsidObjectForm] . identifier[_init_metadata] ( identifier[self] ,** identifier[kwargs] )
identifier[self] . identifier[_cognitive_process_default] = identifier[self] . identifier[_mdata] [ literal[string] ][ literal[string] ][ literal[int] ]
identifier[self] . identifier[_assessment_default] = identifier[self] . identifier[_mdata] [ literal[string] ][ literal[string] ][ literal[int] ]
identifier[self] . identifier[_knowledge_category_default] = identifier[self] . identifier[_mdata] [ literal[string] ][ literal[string] ][ literal[int] ] | def _init_metadata(self, **kwargs):
"""Initialize form metadata"""
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._cognitive_process_default = self._mdata['cognitive_process']['default_id_values'][0]
self._assessment_default = self._mdata['assessment']['default_id_values'][0]
self._knowledge_category_default = self._mdata['knowledge_category']['default_id_values'][0] |
def datetime_to_millis(x):
"""Convert a `datetime.datetime` to milliseconds since the epoch"""
if x is None:
return None
if hasattr(x, 'timestamp'):
# Python >= 3.3
secs = x.timestamp()
elif x.tzinfo is None:
# Timezone naive
secs = (time.mktime((x.year, x.month, x.day,
x.hour, x.minute, x.second,
-1, -1, -1)) + x.microsecond / 1e6)
else:
# Timezone aware
secs = (x - _EPOCH).total_seconds()
return int(secs * 1000) | def function[datetime_to_millis, parameter[x]]:
constant[Convert a `datetime.datetime` to milliseconds since the epoch]
if compare[name[x] is constant[None]] begin[:]
return[constant[None]]
if call[name[hasattr], parameter[name[x], constant[timestamp]]] begin[:]
variable[secs] assign[=] call[name[x].timestamp, parameter[]]
return[call[name[int], parameter[binary_operation[name[secs] * constant[1000]]]]] | keyword[def] identifier[datetime_to_millis] ( identifier[x] ):
literal[string]
keyword[if] identifier[x] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[hasattr] ( identifier[x] , literal[string] ):
identifier[secs] = identifier[x] . identifier[timestamp] ()
keyword[elif] identifier[x] . identifier[tzinfo] keyword[is] keyword[None] :
identifier[secs] =( identifier[time] . identifier[mktime] (( identifier[x] . identifier[year] , identifier[x] . identifier[month] , identifier[x] . identifier[day] ,
identifier[x] . identifier[hour] , identifier[x] . identifier[minute] , identifier[x] . identifier[second] ,
- literal[int] ,- literal[int] ,- literal[int] ))+ identifier[x] . identifier[microsecond] / literal[int] )
keyword[else] :
identifier[secs] =( identifier[x] - identifier[_EPOCH] ). identifier[total_seconds] ()
keyword[return] identifier[int] ( identifier[secs] * literal[int] ) | def datetime_to_millis(x):
"""Convert a `datetime.datetime` to milliseconds since the epoch"""
if x is None:
return None # depends on [control=['if'], data=[]]
if hasattr(x, 'timestamp'):
# Python >= 3.3
secs = x.timestamp() # depends on [control=['if'], data=[]]
elif x.tzinfo is None:
# Timezone naive
secs = time.mktime((x.year, x.month, x.day, x.hour, x.minute, x.second, -1, -1, -1)) + x.microsecond / 1000000.0 # depends on [control=['if'], data=[]]
else:
# Timezone aware
secs = (x - _EPOCH).total_seconds()
return int(secs * 1000) |
def disk_free(path):
"""Return free bytes on partition holding `path`."""
stats = os.statvfs(path)
return stats.f_bavail * stats.f_frsize | def function[disk_free, parameter[path]]:
constant[Return free bytes on partition holding `path`.]
variable[stats] assign[=] call[name[os].statvfs, parameter[name[path]]]
return[binary_operation[name[stats].f_bavail * name[stats].f_frsize]] | keyword[def] identifier[disk_free] ( identifier[path] ):
literal[string]
identifier[stats] = identifier[os] . identifier[statvfs] ( identifier[path] )
keyword[return] identifier[stats] . identifier[f_bavail] * identifier[stats] . identifier[f_frsize] | def disk_free(path):
"""Return free bytes on partition holding `path`."""
stats = os.statvfs(path)
return stats.f_bavail * stats.f_frsize |
def _match_replace_binary(cls, ops: list) -> list:
"""Reduce list of `ops`"""
n = len(ops)
if n <= 1:
return ops
ops_left = ops[:n // 2]
ops_right = ops[n // 2:]
return _match_replace_binary_combine(
cls,
_match_replace_binary(cls, ops_left),
_match_replace_binary(cls, ops_right)) | def function[_match_replace_binary, parameter[cls, ops]]:
constant[Reduce list of `ops`]
variable[n] assign[=] call[name[len], parameter[name[ops]]]
if compare[name[n] less_or_equal[<=] constant[1]] begin[:]
return[name[ops]]
variable[ops_left] assign[=] call[name[ops]][<ast.Slice object at 0x7da20c795e10>]
variable[ops_right] assign[=] call[name[ops]][<ast.Slice object at 0x7da20c796200>]
return[call[name[_match_replace_binary_combine], parameter[name[cls], call[name[_match_replace_binary], parameter[name[cls], name[ops_left]]], call[name[_match_replace_binary], parameter[name[cls], name[ops_right]]]]]] | keyword[def] identifier[_match_replace_binary] ( identifier[cls] , identifier[ops] : identifier[list] )-> identifier[list] :
literal[string]
identifier[n] = identifier[len] ( identifier[ops] )
keyword[if] identifier[n] <= literal[int] :
keyword[return] identifier[ops]
identifier[ops_left] = identifier[ops] [: identifier[n] // literal[int] ]
identifier[ops_right] = identifier[ops] [ identifier[n] // literal[int] :]
keyword[return] identifier[_match_replace_binary_combine] (
identifier[cls] ,
identifier[_match_replace_binary] ( identifier[cls] , identifier[ops_left] ),
identifier[_match_replace_binary] ( identifier[cls] , identifier[ops_right] )) | def _match_replace_binary(cls, ops: list) -> list:
"""Reduce list of `ops`"""
n = len(ops)
if n <= 1:
return ops # depends on [control=['if'], data=[]]
ops_left = ops[:n // 2]
ops_right = ops[n // 2:]
return _match_replace_binary_combine(cls, _match_replace_binary(cls, ops_left), _match_replace_binary(cls, ops_right)) |
def node_has_namespace(node: BaseEntity, namespace: str) -> bool:
"""Pass for nodes that have the given namespace."""
ns = node.get(NAMESPACE)
return ns is not None and ns == namespace | def function[node_has_namespace, parameter[node, namespace]]:
constant[Pass for nodes that have the given namespace.]
variable[ns] assign[=] call[name[node].get, parameter[name[NAMESPACE]]]
return[<ast.BoolOp object at 0x7da18dc04970>] | keyword[def] identifier[node_has_namespace] ( identifier[node] : identifier[BaseEntity] , identifier[namespace] : identifier[str] )-> identifier[bool] :
literal[string]
identifier[ns] = identifier[node] . identifier[get] ( identifier[NAMESPACE] )
keyword[return] identifier[ns] keyword[is] keyword[not] keyword[None] keyword[and] identifier[ns] == identifier[namespace] | def node_has_namespace(node: BaseEntity, namespace: str) -> bool:
"""Pass for nodes that have the given namespace."""
ns = node.get(NAMESPACE)
return ns is not None and ns == namespace |
def _script_load(script):
'''
Borrowed/modified from my book, Redis in Action:
https://github.com/josiahcarlson/redis-in-action/blob/master/python/ch11_listing_source.py
Used for Lua scripting support when writing against Redis 2.6+ to allow
for multiple unique columns per model.
'''
script = script.encode('utf-8') if isinstance(script, six.text_type) else script
sha = [None, sha1(script).hexdigest()]
def call(conn, keys=[], args=[], force_eval=False):
keys = tuple(keys)
args = tuple(args)
if not force_eval:
if not sha[0]:
try:
# executing the script implicitly loads it
return conn.execute_command(
'EVAL', script, len(keys), *(keys + args))
finally:
# thread safe by re-using the GIL ;)
del sha[:-1]
try:
return conn.execute_command(
"EVALSHA", sha[0], len(keys), *(keys+args))
except redis.exceptions.ResponseError as msg:
if not any(msg.args[0].startswith(nsm) for nsm in NO_SCRIPT_MESSAGES):
raise
return conn.execute_command(
"EVAL", script, len(keys), *(keys+args))
return call | def function[_script_load, parameter[script]]:
constant[
Borrowed/modified from my book, Redis in Action:
https://github.com/josiahcarlson/redis-in-action/blob/master/python/ch11_listing_source.py
Used for Lua scripting support when writing against Redis 2.6+ to allow
for multiple unique columns per model.
]
variable[script] assign[=] <ast.IfExp object at 0x7da2041db310>
variable[sha] assign[=] list[[<ast.Constant object at 0x7da2041d8670>, <ast.Call object at 0x7da2041d8310>]]
def function[call, parameter[conn, keys, args, force_eval]]:
variable[keys] assign[=] call[name[tuple], parameter[name[keys]]]
variable[args] assign[=] call[name[tuple], parameter[name[args]]]
if <ast.UnaryOp object at 0x7da2041dbf40> begin[:]
if <ast.UnaryOp object at 0x7da2041d80a0> begin[:]
<ast.Try object at 0x7da2041d9ed0>
<ast.Try object at 0x7da2041dbe20>
return[call[name[conn].execute_command, parameter[constant[EVAL], name[script], call[name[len], parameter[name[keys]]], <ast.Starred object at 0x7da1b26ad2a0>]]]
return[name[call]] | keyword[def] identifier[_script_load] ( identifier[script] ):
literal[string]
identifier[script] = identifier[script] . identifier[encode] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[script] , identifier[six] . identifier[text_type] ) keyword[else] identifier[script]
identifier[sha] =[ keyword[None] , identifier[sha1] ( identifier[script] ). identifier[hexdigest] ()]
keyword[def] identifier[call] ( identifier[conn] , identifier[keys] =[], identifier[args] =[], identifier[force_eval] = keyword[False] ):
identifier[keys] = identifier[tuple] ( identifier[keys] )
identifier[args] = identifier[tuple] ( identifier[args] )
keyword[if] keyword[not] identifier[force_eval] :
keyword[if] keyword[not] identifier[sha] [ literal[int] ]:
keyword[try] :
keyword[return] identifier[conn] . identifier[execute_command] (
literal[string] , identifier[script] , identifier[len] ( identifier[keys] ),*( identifier[keys] + identifier[args] ))
keyword[finally] :
keyword[del] identifier[sha] [:- literal[int] ]
keyword[try] :
keyword[return] identifier[conn] . identifier[execute_command] (
literal[string] , identifier[sha] [ literal[int] ], identifier[len] ( identifier[keys] ),*( identifier[keys] + identifier[args] ))
keyword[except] identifier[redis] . identifier[exceptions] . identifier[ResponseError] keyword[as] identifier[msg] :
keyword[if] keyword[not] identifier[any] ( identifier[msg] . identifier[args] [ literal[int] ]. identifier[startswith] ( identifier[nsm] ) keyword[for] identifier[nsm] keyword[in] identifier[NO_SCRIPT_MESSAGES] ):
keyword[raise]
keyword[return] identifier[conn] . identifier[execute_command] (
literal[string] , identifier[script] , identifier[len] ( identifier[keys] ),*( identifier[keys] + identifier[args] ))
keyword[return] identifier[call] | def _script_load(script):
"""
Borrowed/modified from my book, Redis in Action:
https://github.com/josiahcarlson/redis-in-action/blob/master/python/ch11_listing_source.py
Used for Lua scripting support when writing against Redis 2.6+ to allow
for multiple unique columns per model.
"""
script = script.encode('utf-8') if isinstance(script, six.text_type) else script
sha = [None, sha1(script).hexdigest()]
def call(conn, keys=[], args=[], force_eval=False):
keys = tuple(keys)
args = tuple(args)
if not force_eval:
if not sha[0]:
try:
# executing the script implicitly loads it
return conn.execute_command('EVAL', script, len(keys), *keys + args) # depends on [control=['try'], data=[]]
finally:
# thread safe by re-using the GIL ;)
del sha[:-1] # depends on [control=['if'], data=[]]
try:
return conn.execute_command('EVALSHA', sha[0], len(keys), *keys + args) # depends on [control=['try'], data=[]]
except redis.exceptions.ResponseError as msg:
if not any((msg.args[0].startswith(nsm) for nsm in NO_SCRIPT_MESSAGES)):
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['msg']] # depends on [control=['if'], data=[]]
return conn.execute_command('EVAL', script, len(keys), *keys + args)
return call |
def add_alignment_patterns(matrix, version):
"""\
Adds the adjustment patterns to the matrix. For versions < 2 this is a
no-op.
ISO/IEC 18004:2015(E) -- 6.3.6 Alignment patterns (page 17)
ISO/IEC 18004:2015(E) -- Annex E Position of alignment patterns (page 83)
"""
if version < 2:
return
matrix_size = len(matrix)
positions = consts.ALIGNMENT_POS[version - 2]
for pos_x in positions:
for pos_y in positions:
# Finder pattern?
if pos_x - 6 == 0 and (pos_y - 6 == 0 or pos_y + 7 == matrix_size) \
or pos_y - 6 == 0 and pos_x + 7 == matrix_size:
continue
row, col = pos_x - 2, pos_y - 2
for r in range(5):
matrix[row + r][col:col+5] = _ALIGNMENT_PATTERN[r] | def function[add_alignment_patterns, parameter[matrix, version]]:
constant[ Adds the adjustment patterns to the matrix. For versions < 2 this is a
no-op.
ISO/IEC 18004:2015(E) -- 6.3.6 Alignment patterns (page 17)
ISO/IEC 18004:2015(E) -- Annex E Position of alignment patterns (page 83)
]
if compare[name[version] less[<] constant[2]] begin[:]
return[None]
variable[matrix_size] assign[=] call[name[len], parameter[name[matrix]]]
variable[positions] assign[=] call[name[consts].ALIGNMENT_POS][binary_operation[name[version] - constant[2]]]
for taget[name[pos_x]] in starred[name[positions]] begin[:]
for taget[name[pos_y]] in starred[name[positions]] begin[:]
if <ast.BoolOp object at 0x7da1b0cb1780> begin[:]
continue
<ast.Tuple object at 0x7da1b0cb3e50> assign[=] tuple[[<ast.BinOp object at 0x7da1b0cb3cd0>, <ast.BinOp object at 0x7da1b0cb34c0>]]
for taget[name[r]] in starred[call[name[range], parameter[constant[5]]]] begin[:]
call[call[name[matrix]][binary_operation[name[row] + name[r]]]][<ast.Slice object at 0x7da1b0cb37f0>] assign[=] call[name[_ALIGNMENT_PATTERN]][name[r]] | keyword[def] identifier[add_alignment_patterns] ( identifier[matrix] , identifier[version] ):
literal[string]
keyword[if] identifier[version] < literal[int] :
keyword[return]
identifier[matrix_size] = identifier[len] ( identifier[matrix] )
identifier[positions] = identifier[consts] . identifier[ALIGNMENT_POS] [ identifier[version] - literal[int] ]
keyword[for] identifier[pos_x] keyword[in] identifier[positions] :
keyword[for] identifier[pos_y] keyword[in] identifier[positions] :
keyword[if] identifier[pos_x] - literal[int] == literal[int] keyword[and] ( identifier[pos_y] - literal[int] == literal[int] keyword[or] identifier[pos_y] + literal[int] == identifier[matrix_size] ) keyword[or] identifier[pos_y] - literal[int] == literal[int] keyword[and] identifier[pos_x] + literal[int] == identifier[matrix_size] :
keyword[continue]
identifier[row] , identifier[col] = identifier[pos_x] - literal[int] , identifier[pos_y] - literal[int]
keyword[for] identifier[r] keyword[in] identifier[range] ( literal[int] ):
identifier[matrix] [ identifier[row] + identifier[r] ][ identifier[col] : identifier[col] + literal[int] ]= identifier[_ALIGNMENT_PATTERN] [ identifier[r] ] | def add_alignment_patterns(matrix, version):
""" Adds the adjustment patterns to the matrix. For versions < 2 this is a
no-op.
ISO/IEC 18004:2015(E) -- 6.3.6 Alignment patterns (page 17)
ISO/IEC 18004:2015(E) -- Annex E Position of alignment patterns (page 83)
"""
if version < 2:
return # depends on [control=['if'], data=[]]
matrix_size = len(matrix)
positions = consts.ALIGNMENT_POS[version - 2]
for pos_x in positions:
for pos_y in positions:
# Finder pattern?
if pos_x - 6 == 0 and (pos_y - 6 == 0 or pos_y + 7 == matrix_size) or (pos_y - 6 == 0 and pos_x + 7 == matrix_size):
continue # depends on [control=['if'], data=[]]
(row, col) = (pos_x - 2, pos_y - 2)
for r in range(5):
matrix[row + r][col:col + 5] = _ALIGNMENT_PATTERN[r] # depends on [control=['for'], data=['r']] # depends on [control=['for'], data=['pos_y']] # depends on [control=['for'], data=['pos_x']] |
def save_channels(self, checked=False, test_name=None):
"""Save channel groups to file."""
self.read_group_info()
if self.filename is not None:
filename = self.filename
elif self.parent.info.filename is not None:
filename = (splitext(self.parent.info.filename)[0] +
'_channels.json')
else:
filename = None
if test_name is None:
filename, _ = QFileDialog.getSaveFileName(self,
'Save Channels Montage',
filename,
'Channels File (*.json)')
else:
filename = test_name
if filename == '':
return
self.filename = filename
groups = deepcopy(self.groups)
for one_grp in groups:
one_grp['color'] = one_grp['color'].rgba()
with open(filename, 'w') as outfile:
dump(groups, outfile, indent=' ') | def function[save_channels, parameter[self, checked, test_name]]:
constant[Save channel groups to file.]
call[name[self].read_group_info, parameter[]]
if compare[name[self].filename is_not constant[None]] begin[:]
variable[filename] assign[=] name[self].filename
if compare[name[test_name] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b0e77fd0> assign[=] call[name[QFileDialog].getSaveFileName, parameter[name[self], constant[Save Channels Montage], name[filename], constant[Channels File (*.json)]]]
if compare[name[filename] equal[==] constant[]] begin[:]
return[None]
name[self].filename assign[=] name[filename]
variable[groups] assign[=] call[name[deepcopy], parameter[name[self].groups]]
for taget[name[one_grp]] in starred[name[groups]] begin[:]
call[name[one_grp]][constant[color]] assign[=] call[call[name[one_grp]][constant[color]].rgba, parameter[]]
with call[name[open], parameter[name[filename], constant[w]]] begin[:]
call[name[dump], parameter[name[groups], name[outfile]]] | keyword[def] identifier[save_channels] ( identifier[self] , identifier[checked] = keyword[False] , identifier[test_name] = keyword[None] ):
literal[string]
identifier[self] . identifier[read_group_info] ()
keyword[if] identifier[self] . identifier[filename] keyword[is] keyword[not] keyword[None] :
identifier[filename] = identifier[self] . identifier[filename]
keyword[elif] identifier[self] . identifier[parent] . identifier[info] . identifier[filename] keyword[is] keyword[not] keyword[None] :
identifier[filename] =( identifier[splitext] ( identifier[self] . identifier[parent] . identifier[info] . identifier[filename] )[ literal[int] ]+
literal[string] )
keyword[else] :
identifier[filename] = keyword[None]
keyword[if] identifier[test_name] keyword[is] keyword[None] :
identifier[filename] , identifier[_] = identifier[QFileDialog] . identifier[getSaveFileName] ( identifier[self] ,
literal[string] ,
identifier[filename] ,
literal[string] )
keyword[else] :
identifier[filename] = identifier[test_name]
keyword[if] identifier[filename] == literal[string] :
keyword[return]
identifier[self] . identifier[filename] = identifier[filename]
identifier[groups] = identifier[deepcopy] ( identifier[self] . identifier[groups] )
keyword[for] identifier[one_grp] keyword[in] identifier[groups] :
identifier[one_grp] [ literal[string] ]= identifier[one_grp] [ literal[string] ]. identifier[rgba] ()
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[outfile] :
identifier[dump] ( identifier[groups] , identifier[outfile] , identifier[indent] = literal[string] ) | def save_channels(self, checked=False, test_name=None):
"""Save channel groups to file."""
self.read_group_info()
if self.filename is not None:
filename = self.filename # depends on [control=['if'], data=[]]
elif self.parent.info.filename is not None:
filename = splitext(self.parent.info.filename)[0] + '_channels.json' # depends on [control=['if'], data=[]]
else:
filename = None
if test_name is None:
(filename, _) = QFileDialog.getSaveFileName(self, 'Save Channels Montage', filename, 'Channels File (*.json)') # depends on [control=['if'], data=[]]
else:
filename = test_name
if filename == '':
return # depends on [control=['if'], data=[]]
self.filename = filename
groups = deepcopy(self.groups)
for one_grp in groups:
one_grp['color'] = one_grp['color'].rgba() # depends on [control=['for'], data=['one_grp']]
with open(filename, 'w') as outfile:
dump(groups, outfile, indent=' ') # depends on [control=['with'], data=['outfile']] |
def process_op(self, ns, raw):
""" Processes a single operation from the oplog.
Performs a switch by raw['op']:
"i" insert
"u" update
"d" delete
"c" db cmd
"db" declares presence of a database
"n" no op
"""
# Compute the document id of the document that will be altered
# (in case of insert, update or delete).
docid = self.__get_id(raw)
op = raw['op']
if op == 'i':
self.insert(ns=ns, docid=docid, raw=raw)
elif op == 'u':
self.update(ns=ns, docid=docid, raw=raw)
elif op == 'd':
self.delete(ns=ns, docid=docid, raw=raw)
elif op == 'c':
self.command(ns=ns, raw=raw)
elif op == 'db':
self.db_declare(ns=ns, raw=raw)
elif op == 'n':
self.noop()
else:
logging.error("Unknown op: %r" % op)
# Save timestamp of last processed oplog.
self.ts = raw['ts'] | def function[process_op, parameter[self, ns, raw]]:
constant[ Processes a single operation from the oplog.
Performs a switch by raw['op']:
"i" insert
"u" update
"d" delete
"c" db cmd
"db" declares presence of a database
"n" no op
]
variable[docid] assign[=] call[name[self].__get_id, parameter[name[raw]]]
variable[op] assign[=] call[name[raw]][constant[op]]
if compare[name[op] equal[==] constant[i]] begin[:]
call[name[self].insert, parameter[]]
name[self].ts assign[=] call[name[raw]][constant[ts]] | keyword[def] identifier[process_op] ( identifier[self] , identifier[ns] , identifier[raw] ):
literal[string]
identifier[docid] = identifier[self] . identifier[__get_id] ( identifier[raw] )
identifier[op] = identifier[raw] [ literal[string] ]
keyword[if] identifier[op] == literal[string] :
identifier[self] . identifier[insert] ( identifier[ns] = identifier[ns] , identifier[docid] = identifier[docid] , identifier[raw] = identifier[raw] )
keyword[elif] identifier[op] == literal[string] :
identifier[self] . identifier[update] ( identifier[ns] = identifier[ns] , identifier[docid] = identifier[docid] , identifier[raw] = identifier[raw] )
keyword[elif] identifier[op] == literal[string] :
identifier[self] . identifier[delete] ( identifier[ns] = identifier[ns] , identifier[docid] = identifier[docid] , identifier[raw] = identifier[raw] )
keyword[elif] identifier[op] == literal[string] :
identifier[self] . identifier[command] ( identifier[ns] = identifier[ns] , identifier[raw] = identifier[raw] )
keyword[elif] identifier[op] == literal[string] :
identifier[self] . identifier[db_declare] ( identifier[ns] = identifier[ns] , identifier[raw] = identifier[raw] )
keyword[elif] identifier[op] == literal[string] :
identifier[self] . identifier[noop] ()
keyword[else] :
identifier[logging] . identifier[error] ( literal[string] % identifier[op] )
identifier[self] . identifier[ts] = identifier[raw] [ literal[string] ] | def process_op(self, ns, raw):
""" Processes a single operation from the oplog.
Performs a switch by raw['op']:
"i" insert
"u" update
"d" delete
"c" db cmd
"db" declares presence of a database
"n" no op
"""
# Compute the document id of the document that will be altered
# (in case of insert, update or delete).
docid = self.__get_id(raw)
op = raw['op']
if op == 'i':
self.insert(ns=ns, docid=docid, raw=raw) # depends on [control=['if'], data=[]]
elif op == 'u':
self.update(ns=ns, docid=docid, raw=raw) # depends on [control=['if'], data=[]]
elif op == 'd':
self.delete(ns=ns, docid=docid, raw=raw) # depends on [control=['if'], data=[]]
elif op == 'c':
self.command(ns=ns, raw=raw) # depends on [control=['if'], data=[]]
elif op == 'db':
self.db_declare(ns=ns, raw=raw) # depends on [control=['if'], data=[]]
elif op == 'n':
self.noop() # depends on [control=['if'], data=[]]
else:
logging.error('Unknown op: %r' % op)
# Save timestamp of last processed oplog.
self.ts = raw['ts'] |
def get_error_details(self):
# type: () -> Optional[Dict[str, Any]]
""" Get more information about the latest X server error. """
details = {} # type: Dict[str, Any]
if ERROR.details:
details = {"xerror_details": ERROR.details}
ERROR.details = None
xserver_error = ctypes.create_string_buffer(1024)
self.xlib.XGetErrorText(
MSS.display,
details.get("xerror_details", {}).get("error_code", 0),
xserver_error,
len(xserver_error),
)
xerror = xserver_error.value.decode("utf-8")
if xerror != "0":
details["xerror"] = xerror
return details | def function[get_error_details, parameter[self]]:
constant[ Get more information about the latest X server error. ]
variable[details] assign[=] dictionary[[], []]
if name[ERROR].details begin[:]
variable[details] assign[=] dictionary[[<ast.Constant object at 0x7da1b07951e0>], [<ast.Attribute object at 0x7da1b07968f0>]]
name[ERROR].details assign[=] constant[None]
variable[xserver_error] assign[=] call[name[ctypes].create_string_buffer, parameter[constant[1024]]]
call[name[self].xlib.XGetErrorText, parameter[name[MSS].display, call[call[name[details].get, parameter[constant[xerror_details], dictionary[[], []]]].get, parameter[constant[error_code], constant[0]]], name[xserver_error], call[name[len], parameter[name[xserver_error]]]]]
variable[xerror] assign[=] call[name[xserver_error].value.decode, parameter[constant[utf-8]]]
if compare[name[xerror] not_equal[!=] constant[0]] begin[:]
call[name[details]][constant[xerror]] assign[=] name[xerror]
return[name[details]] | keyword[def] identifier[get_error_details] ( identifier[self] ):
literal[string]
identifier[details] ={}
keyword[if] identifier[ERROR] . identifier[details] :
identifier[details] ={ literal[string] : identifier[ERROR] . identifier[details] }
identifier[ERROR] . identifier[details] = keyword[None]
identifier[xserver_error] = identifier[ctypes] . identifier[create_string_buffer] ( literal[int] )
identifier[self] . identifier[xlib] . identifier[XGetErrorText] (
identifier[MSS] . identifier[display] ,
identifier[details] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , literal[int] ),
identifier[xserver_error] ,
identifier[len] ( identifier[xserver_error] ),
)
identifier[xerror] = identifier[xserver_error] . identifier[value] . identifier[decode] ( literal[string] )
keyword[if] identifier[xerror] != literal[string] :
identifier[details] [ literal[string] ]= identifier[xerror]
keyword[return] identifier[details] | def get_error_details(self):
# type: () -> Optional[Dict[str, Any]]
' Get more information about the latest X server error. '
details = {} # type: Dict[str, Any]
if ERROR.details:
details = {'xerror_details': ERROR.details}
ERROR.details = None
xserver_error = ctypes.create_string_buffer(1024)
self.xlib.XGetErrorText(MSS.display, details.get('xerror_details', {}).get('error_code', 0), xserver_error, len(xserver_error))
xerror = xserver_error.value.decode('utf-8')
if xerror != '0':
details['xerror'] = xerror # depends on [control=['if'], data=['xerror']] # depends on [control=['if'], data=[]]
return details |
def calculate_tile_shape_for_max_bytes(
array_shape,
array_itemsize,
max_tile_bytes,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"""
Returns a tile shape :samp:`tile_shape`
such that :samp:`numpy.product(tile_shape)*numpy.sum({array_itemsize}) <= {max_tile_bytes}`.
Also, if :samp:`{max_tile_shape} is not None`
then :samp:`numpy.all(tile_shape <= {max_tile_shape}) is True` and
if :samp:`{sub_tile_shape} is not None`
the :samp:`numpy.all((tile_shape % {sub_tile_shape}) == 0) is True`.
:type array_shape: sequence of :obj:`int`
:param array_shape: Shape of the array which is to be split into tiles.
:type array_itemsize: :obj:`int`
:param array_itemsize: The number of bytes per element of the array to be tiled.
:type max_tile_bytes: :obj:`int`
:param max_tile_bytes: The maximum number of bytes for the returned :samp:`tile_shape`.
:type max_tile_shape: sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the returned :samp:`tile_shape`.
:type sub_tile_shape: sequence of :obj:`int`
:param sub_tile_shape: The returned :samp:`tile_shape` will be an even multiple
of this sub-tile shape.
:type halo: :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended in each axis direction with *halo*
elements. See :ref:`the-halo-parameter-examples` for meaning of :samp:`{halo}` values.
:rtype: :obj:`numpy.ndarray`
:return: A 1D array of shape :samp:`(len(array_shape),)` indicating a *tile shape*
which will (approximately) uniformly divide the given :samp:`{array_shape}` into
tiles (sub-arrays).
Examples::
>>> from array_split.split import calculate_tile_shape_for_max_bytes
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512
... )
array([512])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=2, # Doubling the itemsize halves the tile size.
... max_tile_bytes=512
... )
array([256])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512-1 # tile shape will now be halved
... )
array([256])
"""
logger = _logging.getLogger(__name__ + ".calculate_tile_shape_for_max_bytes")
logger.debug("calculate_tile_shape_for_max_bytes: enter:")
logger.debug("array_shape=%s", array_shape)
logger.debug("array_itemsize=%s", array_itemsize)
logger.debug("max_tile_bytes=%s", max_tile_bytes)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_shape = _np.array(array_shape, dtype="int64")
array_itemsize = _np.sum(array_itemsize, dtype="int64")
if max_tile_shape is None:
max_tile_shape = _np.array(array_shape, copy=True)
max_tile_shape = \
_np.array(_np.minimum(max_tile_shape, array_shape), copy=True, dtype=array_shape.dtype)
if sub_tile_shape is None:
sub_tile_shape = _np.ones((len(array_shape),), dtype="int64")
sub_tile_shape = _np.array(sub_tile_shape, dtype="int64")
halo = convert_halo_to_array_form(halo=halo, ndim=len(array_shape))
if _np.any(array_shape < sub_tile_shape):
raise ValueError(
"Got array_shape=%s element less than corresponding sub_tile_shape=%s element."
%
(
array_shape,
sub_tile_shape
)
)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_sub_tile_split_shape = ((array_shape - 1) // sub_tile_shape) + 1
tile_sub_tile_split_shape = array_shape // sub_tile_shape
if len(tile_sub_tile_split_shape) <= 1:
tile_sub_tile_split_shape[0] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo)
)
/
float(sub_tile_shape[0])
))
tile_sub_tile_split_shape = \
_np.minimum(
tile_sub_tile_split_shape,
max_tile_shape // sub_tile_shape
)
logger.debug("Pre loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
current_axis = 0
while (
(current_axis < len(tile_sub_tile_split_shape))
and
(
(
_np.product(tile_sub_tile_split_shape * sub_tile_shape + _np.sum(halo, axis=1))
*
array_itemsize
)
>
max_tile_bytes
)
):
if current_axis < (len(tile_sub_tile_split_shape) - 1):
tile_sub_tile_split_shape[current_axis] = 1
tile_sub_tile_split_shape[current_axis] = \
(
max_tile_bytes
//
(
_np.product(
tile_sub_tile_split_shape *
sub_tile_shape +
_np.sum(
halo,
axis=1))
*
array_itemsize
)
)
tile_sub_tile_split_shape[current_axis] = \
max([1, tile_sub_tile_split_shape[current_axis]])
else:
sub_tile_shape_h = sub_tile_shape.copy()
sub_tile_shape_h[0:current_axis] += _np.sum(halo[0:current_axis, :], axis=1)
tile_sub_tile_split_shape[current_axis] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo[current_axis]) * _np.product(sub_tile_shape_h[0:current_axis])
)
/
float(_np.product(sub_tile_shape_h))
))
current_axis += 1
logger.debug("Post loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
tile_shape = _np.minimum(array_shape, tile_sub_tile_split_shape * sub_tile_shape)
logger.debug("pre cannonicalise tile_shape=%s", tile_shape)
tile_split_shape = ((array_shape - 1) // tile_shape) + 1
logger.debug("tile_split_shape=%s", tile_split_shape)
tile_shape = (((array_sub_tile_split_shape - 1) // tile_split_shape) + 1) * sub_tile_shape
logger.debug("post cannonicalise tile_shape=%s", tile_shape)
return tile_shape | def function[calculate_tile_shape_for_max_bytes, parameter[array_shape, array_itemsize, max_tile_bytes, max_tile_shape, sub_tile_shape, halo]]:
constant[
Returns a tile shape :samp:`tile_shape`
such that :samp:`numpy.product(tile_shape)*numpy.sum({array_itemsize}) <= {max_tile_bytes}`.
Also, if :samp:`{max_tile_shape} is not None`
then :samp:`numpy.all(tile_shape <= {max_tile_shape}) is True` and
if :samp:`{sub_tile_shape} is not None`
the :samp:`numpy.all((tile_shape % {sub_tile_shape}) == 0) is True`.
:type array_shape: sequence of :obj:`int`
:param array_shape: Shape of the array which is to be split into tiles.
:type array_itemsize: :obj:`int`
:param array_itemsize: The number of bytes per element of the array to be tiled.
:type max_tile_bytes: :obj:`int`
:param max_tile_bytes: The maximum number of bytes for the returned :samp:`tile_shape`.
:type max_tile_shape: sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the returned :samp:`tile_shape`.
:type sub_tile_shape: sequence of :obj:`int`
:param sub_tile_shape: The returned :samp:`tile_shape` will be an even multiple
of this sub-tile shape.
:type halo: :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended in each axis direction with *halo*
elements. See :ref:`the-halo-parameter-examples` for meaning of :samp:`{halo}` values.
:rtype: :obj:`numpy.ndarray`
:return: A 1D array of shape :samp:`(len(array_shape),)` indicating a *tile shape*
which will (approximately) uniformly divide the given :samp:`{array_shape}` into
tiles (sub-arrays).
Examples::
>>> from array_split.split import calculate_tile_shape_for_max_bytes
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512
... )
array([512])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=2, # Doubling the itemsize halves the tile size.
... max_tile_bytes=512
... )
array([256])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512-1 # tile shape will now be halved
... )
array([256])
]
variable[logger] assign[=] call[name[_logging].getLogger, parameter[binary_operation[name[__name__] + constant[.calculate_tile_shape_for_max_bytes]]]]
call[name[logger].debug, parameter[constant[calculate_tile_shape_for_max_bytes: enter:]]]
call[name[logger].debug, parameter[constant[array_shape=%s], name[array_shape]]]
call[name[logger].debug, parameter[constant[array_itemsize=%s], name[array_itemsize]]]
call[name[logger].debug, parameter[constant[max_tile_bytes=%s], name[max_tile_bytes]]]
call[name[logger].debug, parameter[constant[max_tile_shape=%s], name[max_tile_shape]]]
call[name[logger].debug, parameter[constant[sub_tile_shape=%s], name[sub_tile_shape]]]
call[name[logger].debug, parameter[constant[halo=%s], name[halo]]]
variable[array_shape] assign[=] call[name[_np].array, parameter[name[array_shape]]]
variable[array_itemsize] assign[=] call[name[_np].sum, parameter[name[array_itemsize]]]
if compare[name[max_tile_shape] is constant[None]] begin[:]
variable[max_tile_shape] assign[=] call[name[_np].array, parameter[name[array_shape]]]
variable[max_tile_shape] assign[=] call[name[_np].array, parameter[call[name[_np].minimum, parameter[name[max_tile_shape], name[array_shape]]]]]
if compare[name[sub_tile_shape] is constant[None]] begin[:]
variable[sub_tile_shape] assign[=] call[name[_np].ones, parameter[tuple[[<ast.Call object at 0x7da1b0a36050>]]]]
variable[sub_tile_shape] assign[=] call[name[_np].array, parameter[name[sub_tile_shape]]]
variable[halo] assign[=] call[name[convert_halo_to_array_form], parameter[]]
if call[name[_np].any, parameter[compare[name[array_shape] less[<] name[sub_tile_shape]]]] begin[:]
<ast.Raise object at 0x7da1b0a36710>
call[name[logger].debug, parameter[constant[max_tile_shape=%s], name[max_tile_shape]]]
call[name[logger].debug, parameter[constant[sub_tile_shape=%s], name[sub_tile_shape]]]
call[name[logger].debug, parameter[constant[halo=%s], name[halo]]]
variable[array_sub_tile_split_shape] assign[=] binary_operation[binary_operation[binary_operation[name[array_shape] - constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> name[sub_tile_shape]] + constant[1]]
variable[tile_sub_tile_split_shape] assign[=] binary_operation[name[array_shape] <ast.FloorDiv object at 0x7da2590d6bc0> name[sub_tile_shape]]
if compare[call[name[len], parameter[name[tile_sub_tile_split_shape]]] less_or_equal[<=] constant[1]] begin[:]
call[name[tile_sub_tile_split_shape]][constant[0]] assign[=] call[name[int], parameter[call[name[_np].floor, parameter[binary_operation[binary_operation[binary_operation[name[max_tile_bytes] / call[name[float], parameter[name[array_itemsize]]]] - call[name[_np].sum, parameter[name[halo]]]] / call[name[float], parameter[call[name[sub_tile_shape]][constant[0]]]]]]]]]
variable[tile_sub_tile_split_shape] assign[=] call[name[_np].minimum, parameter[name[tile_sub_tile_split_shape], binary_operation[name[max_tile_shape] <ast.FloorDiv object at 0x7da2590d6bc0> name[sub_tile_shape]]]]
call[name[logger].debug, parameter[constant[Pre loop: tile_sub_tile_split_shape=%s], name[tile_sub_tile_split_shape]]]
variable[current_axis] assign[=] constant[0]
while <ast.BoolOp object at 0x7da1b0a357b0> begin[:]
if compare[name[current_axis] less[<] binary_operation[call[name[len], parameter[name[tile_sub_tile_split_shape]]] - constant[1]]] begin[:]
call[name[tile_sub_tile_split_shape]][name[current_axis]] assign[=] constant[1]
call[name[tile_sub_tile_split_shape]][name[current_axis]] assign[=] binary_operation[name[max_tile_bytes] <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[call[name[_np].product, parameter[binary_operation[binary_operation[name[tile_sub_tile_split_shape] * name[sub_tile_shape]] + call[name[_np].sum, parameter[name[halo]]]]]] * name[array_itemsize]]]
call[name[tile_sub_tile_split_shape]][name[current_axis]] assign[=] call[name[max], parameter[list[[<ast.Constant object at 0x7da1b0a37fa0>, <ast.Subscript object at 0x7da1b0a37ee0>]]]]
<ast.AugAssign object at 0x7da1b0a35db0>
call[name[logger].debug, parameter[constant[Post loop: tile_sub_tile_split_shape=%s], name[tile_sub_tile_split_shape]]]
variable[tile_shape] assign[=] call[name[_np].minimum, parameter[name[array_shape], binary_operation[name[tile_sub_tile_split_shape] * name[sub_tile_shape]]]]
call[name[logger].debug, parameter[constant[pre cannonicalise tile_shape=%s], name[tile_shape]]]
variable[tile_split_shape] assign[=] binary_operation[binary_operation[binary_operation[name[array_shape] - constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> name[tile_shape]] + constant[1]]
call[name[logger].debug, parameter[constant[tile_split_shape=%s], name[tile_split_shape]]]
variable[tile_shape] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[array_sub_tile_split_shape] - constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> name[tile_split_shape]] + constant[1]] * name[sub_tile_shape]]
call[name[logger].debug, parameter[constant[post cannonicalise tile_shape=%s], name[tile_shape]]]
return[name[tile_shape]] | keyword[def] identifier[calculate_tile_shape_for_max_bytes] (
identifier[array_shape] ,
identifier[array_itemsize] ,
identifier[max_tile_bytes] ,
identifier[max_tile_shape] = keyword[None] ,
identifier[sub_tile_shape] = keyword[None] ,
identifier[halo] = keyword[None]
):
literal[string]
identifier[logger] = identifier[_logging] . identifier[getLogger] ( identifier[__name__] + literal[string] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[array_shape] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[array_itemsize] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[max_tile_bytes] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[max_tile_shape] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[sub_tile_shape] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[halo] )
identifier[array_shape] = identifier[_np] . identifier[array] ( identifier[array_shape] , identifier[dtype] = literal[string] )
identifier[array_itemsize] = identifier[_np] . identifier[sum] ( identifier[array_itemsize] , identifier[dtype] = literal[string] )
keyword[if] identifier[max_tile_shape] keyword[is] keyword[None] :
identifier[max_tile_shape] = identifier[_np] . identifier[array] ( identifier[array_shape] , identifier[copy] = keyword[True] )
identifier[max_tile_shape] = identifier[_np] . identifier[array] ( identifier[_np] . identifier[minimum] ( identifier[max_tile_shape] , identifier[array_shape] ), identifier[copy] = keyword[True] , identifier[dtype] = identifier[array_shape] . identifier[dtype] )
keyword[if] identifier[sub_tile_shape] keyword[is] keyword[None] :
identifier[sub_tile_shape] = identifier[_np] . identifier[ones] (( identifier[len] ( identifier[array_shape] ),), identifier[dtype] = literal[string] )
identifier[sub_tile_shape] = identifier[_np] . identifier[array] ( identifier[sub_tile_shape] , identifier[dtype] = literal[string] )
identifier[halo] = identifier[convert_halo_to_array_form] ( identifier[halo] = identifier[halo] , identifier[ndim] = identifier[len] ( identifier[array_shape] ))
keyword[if] identifier[_np] . identifier[any] ( identifier[array_shape] < identifier[sub_tile_shape] ):
keyword[raise] identifier[ValueError] (
literal[string]
%
(
identifier[array_shape] ,
identifier[sub_tile_shape]
)
)
identifier[logger] . identifier[debug] ( literal[string] , identifier[max_tile_shape] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[sub_tile_shape] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[halo] )
identifier[array_sub_tile_split_shape] =(( identifier[array_shape] - literal[int] )// identifier[sub_tile_shape] )+ literal[int]
identifier[tile_sub_tile_split_shape] = identifier[array_shape] // identifier[sub_tile_shape]
keyword[if] identifier[len] ( identifier[tile_sub_tile_split_shape] )<= literal[int] :
identifier[tile_sub_tile_split_shape] [ literal[int] ]= identifier[int] ( identifier[_np] . identifier[floor] (
(
( identifier[max_tile_bytes] / identifier[float] ( identifier[array_itemsize] ))
-
identifier[_np] . identifier[sum] ( identifier[halo] )
)
/
identifier[float] ( identifier[sub_tile_shape] [ literal[int] ])
))
identifier[tile_sub_tile_split_shape] = identifier[_np] . identifier[minimum] (
identifier[tile_sub_tile_split_shape] ,
identifier[max_tile_shape] // identifier[sub_tile_shape]
)
identifier[logger] . identifier[debug] ( literal[string] , identifier[tile_sub_tile_split_shape] )
identifier[current_axis] = literal[int]
keyword[while] (
( identifier[current_axis] < identifier[len] ( identifier[tile_sub_tile_split_shape] ))
keyword[and]
(
(
identifier[_np] . identifier[product] ( identifier[tile_sub_tile_split_shape] * identifier[sub_tile_shape] + identifier[_np] . identifier[sum] ( identifier[halo] , identifier[axis] = literal[int] ))
*
identifier[array_itemsize]
)
>
identifier[max_tile_bytes]
)
):
keyword[if] identifier[current_axis] <( identifier[len] ( identifier[tile_sub_tile_split_shape] )- literal[int] ):
identifier[tile_sub_tile_split_shape] [ identifier[current_axis] ]= literal[int]
identifier[tile_sub_tile_split_shape] [ identifier[current_axis] ]=(
identifier[max_tile_bytes]
//
(
identifier[_np] . identifier[product] (
identifier[tile_sub_tile_split_shape] *
identifier[sub_tile_shape] +
identifier[_np] . identifier[sum] (
identifier[halo] ,
identifier[axis] = literal[int] ))
*
identifier[array_itemsize]
)
)
identifier[tile_sub_tile_split_shape] [ identifier[current_axis] ]= identifier[max] ([ literal[int] , identifier[tile_sub_tile_split_shape] [ identifier[current_axis] ]])
keyword[else] :
identifier[sub_tile_shape_h] = identifier[sub_tile_shape] . identifier[copy] ()
identifier[sub_tile_shape_h] [ literal[int] : identifier[current_axis] ]+= identifier[_np] . identifier[sum] ( identifier[halo] [ literal[int] : identifier[current_axis] ,:], identifier[axis] = literal[int] )
identifier[tile_sub_tile_split_shape] [ identifier[current_axis] ]= identifier[int] ( identifier[_np] . identifier[floor] (
(
( identifier[max_tile_bytes] / identifier[float] ( identifier[array_itemsize] ))
-
identifier[_np] . identifier[sum] ( identifier[halo] [ identifier[current_axis] ])* identifier[_np] . identifier[product] ( identifier[sub_tile_shape_h] [ literal[int] : identifier[current_axis] ])
)
/
identifier[float] ( identifier[_np] . identifier[product] ( identifier[sub_tile_shape_h] ))
))
identifier[current_axis] += literal[int]
identifier[logger] . identifier[debug] ( literal[string] , identifier[tile_sub_tile_split_shape] )
identifier[tile_shape] = identifier[_np] . identifier[minimum] ( identifier[array_shape] , identifier[tile_sub_tile_split_shape] * identifier[sub_tile_shape] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[tile_shape] )
identifier[tile_split_shape] =(( identifier[array_shape] - literal[int] )// identifier[tile_shape] )+ literal[int]
identifier[logger] . identifier[debug] ( literal[string] , identifier[tile_split_shape] )
identifier[tile_shape] =((( identifier[array_sub_tile_split_shape] - literal[int] )// identifier[tile_split_shape] )+ literal[int] )* identifier[sub_tile_shape]
identifier[logger] . identifier[debug] ( literal[string] , identifier[tile_shape] )
keyword[return] identifier[tile_shape] | def calculate_tile_shape_for_max_bytes(array_shape, array_itemsize, max_tile_bytes, max_tile_shape=None, sub_tile_shape=None, halo=None):
"""
Returns a tile shape :samp:`tile_shape`
such that :samp:`numpy.product(tile_shape)*numpy.sum({array_itemsize}) <= {max_tile_bytes}`.
Also, if :samp:`{max_tile_shape} is not None`
then :samp:`numpy.all(tile_shape <= {max_tile_shape}) is True` and
if :samp:`{sub_tile_shape} is not None`
the :samp:`numpy.all((tile_shape % {sub_tile_shape}) == 0) is True`.
:type array_shape: sequence of :obj:`int`
:param array_shape: Shape of the array which is to be split into tiles.
:type array_itemsize: :obj:`int`
:param array_itemsize: The number of bytes per element of the array to be tiled.
:type max_tile_bytes: :obj:`int`
:param max_tile_bytes: The maximum number of bytes for the returned :samp:`tile_shape`.
:type max_tile_shape: sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the returned :samp:`tile_shape`.
:type sub_tile_shape: sequence of :obj:`int`
:param sub_tile_shape: The returned :samp:`tile_shape` will be an even multiple
of this sub-tile shape.
:type halo: :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended in each axis direction with *halo*
elements. See :ref:`the-halo-parameter-examples` for meaning of :samp:`{halo}` values.
:rtype: :obj:`numpy.ndarray`
:return: A 1D array of shape :samp:`(len(array_shape),)` indicating a *tile shape*
which will (approximately) uniformly divide the given :samp:`{array_shape}` into
tiles (sub-arrays).
Examples::
>>> from array_split.split import calculate_tile_shape_for_max_bytes
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512
... )
array([512])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=2, # Doubling the itemsize halves the tile size.
... max_tile_bytes=512
... )
array([256])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512-1 # tile shape will now be halved
... )
array([256])
"""
logger = _logging.getLogger(__name__ + '.calculate_tile_shape_for_max_bytes')
logger.debug('calculate_tile_shape_for_max_bytes: enter:')
logger.debug('array_shape=%s', array_shape)
logger.debug('array_itemsize=%s', array_itemsize)
logger.debug('max_tile_bytes=%s', max_tile_bytes)
logger.debug('max_tile_shape=%s', max_tile_shape)
logger.debug('sub_tile_shape=%s', sub_tile_shape)
logger.debug('halo=%s', halo)
array_shape = _np.array(array_shape, dtype='int64')
array_itemsize = _np.sum(array_itemsize, dtype='int64')
if max_tile_shape is None:
max_tile_shape = _np.array(array_shape, copy=True) # depends on [control=['if'], data=['max_tile_shape']]
max_tile_shape = _np.array(_np.minimum(max_tile_shape, array_shape), copy=True, dtype=array_shape.dtype)
if sub_tile_shape is None:
sub_tile_shape = _np.ones((len(array_shape),), dtype='int64') # depends on [control=['if'], data=['sub_tile_shape']]
sub_tile_shape = _np.array(sub_tile_shape, dtype='int64')
halo = convert_halo_to_array_form(halo=halo, ndim=len(array_shape))
if _np.any(array_shape < sub_tile_shape):
raise ValueError('Got array_shape=%s element less than corresponding sub_tile_shape=%s element.' % (array_shape, sub_tile_shape)) # depends on [control=['if'], data=[]]
logger.debug('max_tile_shape=%s', max_tile_shape)
logger.debug('sub_tile_shape=%s', sub_tile_shape)
logger.debug('halo=%s', halo)
array_sub_tile_split_shape = (array_shape - 1) // sub_tile_shape + 1
tile_sub_tile_split_shape = array_shape // sub_tile_shape
if len(tile_sub_tile_split_shape) <= 1:
tile_sub_tile_split_shape[0] = int(_np.floor((max_tile_bytes / float(array_itemsize) - _np.sum(halo)) / float(sub_tile_shape[0]))) # depends on [control=['if'], data=[]]
tile_sub_tile_split_shape = _np.minimum(tile_sub_tile_split_shape, max_tile_shape // sub_tile_shape)
logger.debug('Pre loop: tile_sub_tile_split_shape=%s', tile_sub_tile_split_shape)
current_axis = 0
while current_axis < len(tile_sub_tile_split_shape) and _np.product(tile_sub_tile_split_shape * sub_tile_shape + _np.sum(halo, axis=1)) * array_itemsize > max_tile_bytes:
if current_axis < len(tile_sub_tile_split_shape) - 1:
tile_sub_tile_split_shape[current_axis] = 1
tile_sub_tile_split_shape[current_axis] = max_tile_bytes // (_np.product(tile_sub_tile_split_shape * sub_tile_shape + _np.sum(halo, axis=1)) * array_itemsize)
tile_sub_tile_split_shape[current_axis] = max([1, tile_sub_tile_split_shape[current_axis]]) # depends on [control=['if'], data=['current_axis']]
else:
sub_tile_shape_h = sub_tile_shape.copy()
sub_tile_shape_h[0:current_axis] += _np.sum(halo[0:current_axis, :], axis=1)
tile_sub_tile_split_shape[current_axis] = int(_np.floor((max_tile_bytes / float(array_itemsize) - _np.sum(halo[current_axis]) * _np.product(sub_tile_shape_h[0:current_axis])) / float(_np.product(sub_tile_shape_h))))
current_axis += 1 # depends on [control=['while'], data=[]]
logger.debug('Post loop: tile_sub_tile_split_shape=%s', tile_sub_tile_split_shape)
tile_shape = _np.minimum(array_shape, tile_sub_tile_split_shape * sub_tile_shape)
logger.debug('pre cannonicalise tile_shape=%s', tile_shape)
tile_split_shape = (array_shape - 1) // tile_shape + 1
logger.debug('tile_split_shape=%s', tile_split_shape)
tile_shape = ((array_sub_tile_split_shape - 1) // tile_split_shape + 1) * sub_tile_shape
logger.debug('post cannonicalise tile_shape=%s', tile_shape)
return tile_shape |
def arnoldi_res(A, V, H, ip_B=None):
"""Measure Arnoldi residual.
:param A: a linear operator that can be used with scipy's aslinearoperator
with ``shape==(N,N)``.
:param V: Arnoldi basis matrix with ``shape==(N,n)``.
:param H: Hessenberg matrix: either :math:`\\underline{H}_{n-1}` with
``shape==(n,n-1)`` or :math:`H_n` with ``shape==(n,n)`` (if the Arnoldi
basis spans an A-invariant subspace).
:param ip_B: (optional) the inner product to use, see :py:meth:`inner`.
:returns: either :math:`\\|AV_{n-1} - V_n \\underline{H}_{n-1}\\|` or
:math:`\\|A V_n - V_n H_n\\|` (in the invariant case).
"""
N = V.shape[0]
invariant = H.shape[0] == H.shape[1]
A = get_linearoperator((N, N), A)
if invariant:
res = A*V - numpy.dot(V, H)
else:
res = A*V[:, :-1] - numpy.dot(V, H)
return norm(res, ip_B=ip_B) | def function[arnoldi_res, parameter[A, V, H, ip_B]]:
constant[Measure Arnoldi residual.
:param A: a linear operator that can be used with scipy's aslinearoperator
with ``shape==(N,N)``.
:param V: Arnoldi basis matrix with ``shape==(N,n)``.
:param H: Hessenberg matrix: either :math:`\underline{H}_{n-1}` with
``shape==(n,n-1)`` or :math:`H_n` with ``shape==(n,n)`` (if the Arnoldi
basis spans an A-invariant subspace).
:param ip_B: (optional) the inner product to use, see :py:meth:`inner`.
:returns: either :math:`\|AV_{n-1} - V_n \underline{H}_{n-1}\|` or
:math:`\|A V_n - V_n H_n\|` (in the invariant case).
]
variable[N] assign[=] call[name[V].shape][constant[0]]
variable[invariant] assign[=] compare[call[name[H].shape][constant[0]] equal[==] call[name[H].shape][constant[1]]]
variable[A] assign[=] call[name[get_linearoperator], parameter[tuple[[<ast.Name object at 0x7da1b26c67d0>, <ast.Name object at 0x7da1b26c71c0>]], name[A]]]
if name[invariant] begin[:]
variable[res] assign[=] binary_operation[binary_operation[name[A] * name[V]] - call[name[numpy].dot, parameter[name[V], name[H]]]]
return[call[name[norm], parameter[name[res]]]] | keyword[def] identifier[arnoldi_res] ( identifier[A] , identifier[V] , identifier[H] , identifier[ip_B] = keyword[None] ):
literal[string]
identifier[N] = identifier[V] . identifier[shape] [ literal[int] ]
identifier[invariant] = identifier[H] . identifier[shape] [ literal[int] ]== identifier[H] . identifier[shape] [ literal[int] ]
identifier[A] = identifier[get_linearoperator] (( identifier[N] , identifier[N] ), identifier[A] )
keyword[if] identifier[invariant] :
identifier[res] = identifier[A] * identifier[V] - identifier[numpy] . identifier[dot] ( identifier[V] , identifier[H] )
keyword[else] :
identifier[res] = identifier[A] * identifier[V] [:,:- literal[int] ]- identifier[numpy] . identifier[dot] ( identifier[V] , identifier[H] )
keyword[return] identifier[norm] ( identifier[res] , identifier[ip_B] = identifier[ip_B] ) | def arnoldi_res(A, V, H, ip_B=None):
"""Measure Arnoldi residual.
:param A: a linear operator that can be used with scipy's aslinearoperator
with ``shape==(N,N)``.
:param V: Arnoldi basis matrix with ``shape==(N,n)``.
:param H: Hessenberg matrix: either :math:`\\underline{H}_{n-1}` with
``shape==(n,n-1)`` or :math:`H_n` with ``shape==(n,n)`` (if the Arnoldi
basis spans an A-invariant subspace).
:param ip_B: (optional) the inner product to use, see :py:meth:`inner`.
:returns: either :math:`\\|AV_{n-1} - V_n \\underline{H}_{n-1}\\|` or
:math:`\\|A V_n - V_n H_n\\|` (in the invariant case).
"""
N = V.shape[0]
invariant = H.shape[0] == H.shape[1]
A = get_linearoperator((N, N), A)
if invariant:
res = A * V - numpy.dot(V, H) # depends on [control=['if'], data=[]]
else:
res = A * V[:, :-1] - numpy.dot(V, H)
return norm(res, ip_B=ip_B) |
def extract(self, obj, bypass_ref=False):
"""
Extract subelement from obj, according to current token.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors
"""
try:
if isinstance(obj, Mapping):
if not bypass_ref and '$ref' in obj:
raise RefError(obj, 'presence of a $ref member')
obj = self.extract_mapping(obj)
elif isinstance(obj, Sequence) and not isinstance(obj, string_types):
obj = self.extract_sequence(obj)
else:
raise WrongType(obj, '{!r} does not apply '
'for {!r}'.format(str(self), obj))
if isinstance(obj, Mapping):
if not bypass_ref and '$ref' in obj:
raise RefError(obj, 'presence of a $ref member')
return obj
except ExtractError as error:
logger.exception(error)
raise
except Exception as error:
logger.exception(error)
args = [arg for arg in error.args if arg not in (self, obj)]
raise ExtractError(obj, *args) | def function[extract, parameter[self, obj, bypass_ref]]:
constant[
Extract subelement from obj, according to current token.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors
]
<ast.Try object at 0x7da1b235bd30> | keyword[def] identifier[extract] ( identifier[self] , identifier[obj] , identifier[bypass_ref] = keyword[False] ):
literal[string]
keyword[try] :
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Mapping] ):
keyword[if] keyword[not] identifier[bypass_ref] keyword[and] literal[string] keyword[in] identifier[obj] :
keyword[raise] identifier[RefError] ( identifier[obj] , literal[string] )
identifier[obj] = identifier[self] . identifier[extract_mapping] ( identifier[obj] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[Sequence] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[string_types] ):
identifier[obj] = identifier[self] . identifier[extract_sequence] ( identifier[obj] )
keyword[else] :
keyword[raise] identifier[WrongType] ( identifier[obj] , literal[string]
literal[string] . identifier[format] ( identifier[str] ( identifier[self] ), identifier[obj] ))
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Mapping] ):
keyword[if] keyword[not] identifier[bypass_ref] keyword[and] literal[string] keyword[in] identifier[obj] :
keyword[raise] identifier[RefError] ( identifier[obj] , literal[string] )
keyword[return] identifier[obj]
keyword[except] identifier[ExtractError] keyword[as] identifier[error] :
identifier[logger] . identifier[exception] ( identifier[error] )
keyword[raise]
keyword[except] identifier[Exception] keyword[as] identifier[error] :
identifier[logger] . identifier[exception] ( identifier[error] )
identifier[args] =[ identifier[arg] keyword[for] identifier[arg] keyword[in] identifier[error] . identifier[args] keyword[if] identifier[arg] keyword[not] keyword[in] ( identifier[self] , identifier[obj] )]
keyword[raise] identifier[ExtractError] ( identifier[obj] ,* identifier[args] ) | def extract(self, obj, bypass_ref=False):
"""
Extract subelement from obj, according to current token.
:param obj: the object source
:param bypass_ref: disable JSON Reference errors
"""
try:
if isinstance(obj, Mapping):
if not bypass_ref and '$ref' in obj:
raise RefError(obj, 'presence of a $ref member') # depends on [control=['if'], data=[]]
obj = self.extract_mapping(obj) # depends on [control=['if'], data=[]]
elif isinstance(obj, Sequence) and (not isinstance(obj, string_types)):
obj = self.extract_sequence(obj) # depends on [control=['if'], data=[]]
else:
raise WrongType(obj, '{!r} does not apply for {!r}'.format(str(self), obj))
if isinstance(obj, Mapping):
if not bypass_ref and '$ref' in obj:
raise RefError(obj, 'presence of a $ref member') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return obj # depends on [control=['try'], data=[]]
except ExtractError as error:
logger.exception(error)
raise # depends on [control=['except'], data=['error']]
except Exception as error:
logger.exception(error)
args = [arg for arg in error.args if arg not in (self, obj)]
raise ExtractError(obj, *args) # depends on [control=['except'], data=['error']] |
def _set_bucket(self, type, name):
"""
prepares bucket, sets index name
:param str type: bucket type
:param str name: bucket name
:return:
"""
if type:
self._cfg['bucket_type'] = type
if name:
self._cfg['bucket_name'] = name
self.bucket = self._client.bucket_type(self._cfg['bucket_type']
).bucket(self._cfg['bucket_name'])
self.index_name = "%s_%s" % (self._cfg['bucket_type'], self._cfg['bucket_name'])
return self | def function[_set_bucket, parameter[self, type, name]]:
constant[
prepares bucket, sets index name
:param str type: bucket type
:param str name: bucket name
:return:
]
if name[type] begin[:]
call[name[self]._cfg][constant[bucket_type]] assign[=] name[type]
if name[name] begin[:]
call[name[self]._cfg][constant[bucket_name]] assign[=] name[name]
name[self].bucket assign[=] call[call[name[self]._client.bucket_type, parameter[call[name[self]._cfg][constant[bucket_type]]]].bucket, parameter[call[name[self]._cfg][constant[bucket_name]]]]
name[self].index_name assign[=] binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18dc99660>, <ast.Subscript object at 0x7da18dc99ff0>]]]
return[name[self]] | keyword[def] identifier[_set_bucket] ( identifier[self] , identifier[type] , identifier[name] ):
literal[string]
keyword[if] identifier[type] :
identifier[self] . identifier[_cfg] [ literal[string] ]= identifier[type]
keyword[if] identifier[name] :
identifier[self] . identifier[_cfg] [ literal[string] ]= identifier[name]
identifier[self] . identifier[bucket] = identifier[self] . identifier[_client] . identifier[bucket_type] ( identifier[self] . identifier[_cfg] [ literal[string] ]
). identifier[bucket] ( identifier[self] . identifier[_cfg] [ literal[string] ])
identifier[self] . identifier[index_name] = literal[string] %( identifier[self] . identifier[_cfg] [ literal[string] ], identifier[self] . identifier[_cfg] [ literal[string] ])
keyword[return] identifier[self] | def _set_bucket(self, type, name):
"""
prepares bucket, sets index name
:param str type: bucket type
:param str name: bucket name
:return:
"""
if type:
self._cfg['bucket_type'] = type # depends on [control=['if'], data=[]]
if name:
self._cfg['bucket_name'] = name # depends on [control=['if'], data=[]]
self.bucket = self._client.bucket_type(self._cfg['bucket_type']).bucket(self._cfg['bucket_name'])
self.index_name = '%s_%s' % (self._cfg['bucket_type'], self._cfg['bucket_name'])
return self |
def add_deployment_group(self, deployment_group, project):
"""AddDeploymentGroup.
[Preview API] Create a deployment group.
:param :class:`<DeploymentGroupCreateParameter> <azure.devops.v5_1.task_agent.models.DeploymentGroupCreateParameter>` deployment_group: Deployment group to create.
:param str project: Project ID or project name
:rtype: :class:`<DeploymentGroup> <azure.devops.v5_1.task-agent.models.DeploymentGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(deployment_group, 'DeploymentGroupCreateParameter')
response = self._send(http_method='POST',
location_id='083c4d89-ab35-45af-aa11-7cf66895c53e',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('DeploymentGroup', response) | def function[add_deployment_group, parameter[self, deployment_group, project]]:
constant[AddDeploymentGroup.
[Preview API] Create a deployment group.
:param :class:`<DeploymentGroupCreateParameter> <azure.devops.v5_1.task_agent.models.DeploymentGroupCreateParameter>` deployment_group: Deployment group to create.
:param str project: Project ID or project name
:rtype: :class:`<DeploymentGroup> <azure.devops.v5_1.task-agent.models.DeploymentGroup>`
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[project] is_not constant[None]] begin[:]
call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]]
variable[content] assign[=] call[name[self]._serialize.body, parameter[name[deployment_group], constant[DeploymentGroupCreateParameter]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[DeploymentGroup], name[response]]]] | keyword[def] identifier[add_deployment_group] ( identifier[self] , identifier[deployment_group] , identifier[project] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] )
identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[deployment_group] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[content] = identifier[content] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] ) | def add_deployment_group(self, deployment_group, project):
"""AddDeploymentGroup.
[Preview API] Create a deployment group.
:param :class:`<DeploymentGroupCreateParameter> <azure.devops.v5_1.task_agent.models.DeploymentGroupCreateParameter>` deployment_group: Deployment group to create.
:param str project: Project ID or project name
:rtype: :class:`<DeploymentGroup> <azure.devops.v5_1.task-agent.models.DeploymentGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']]
content = self._serialize.body(deployment_group, 'DeploymentGroupCreateParameter')
response = self._send(http_method='POST', location_id='083c4d89-ab35-45af-aa11-7cf66895c53e', version='5.1-preview.1', route_values=route_values, content=content)
return self._deserialize('DeploymentGroup', response) |
def dropna(self, subset=None):
"""Remove missing values according to Baloo's convention.
Parameters
----------
subset : list of str, optional
Which columns to check for missing values in.
Returns
-------
DataFrame
DataFrame with no null values in columns.
"""
subset = check_and_obtain_subset_columns(subset, self)
not_nas = [v.notna() for v in self[subset]._iter()]
and_filter = reduce(lambda x, y: x & y, not_nas)
return self[and_filter] | def function[dropna, parameter[self, subset]]:
constant[Remove missing values according to Baloo's convention.
Parameters
----------
subset : list of str, optional
Which columns to check for missing values in.
Returns
-------
DataFrame
DataFrame with no null values in columns.
]
variable[subset] assign[=] call[name[check_and_obtain_subset_columns], parameter[name[subset], name[self]]]
variable[not_nas] assign[=] <ast.ListComp object at 0x7da1b09db4f0>
variable[and_filter] assign[=] call[name[reduce], parameter[<ast.Lambda object at 0x7da1b09db760>, name[not_nas]]]
return[call[name[self]][name[and_filter]]] | keyword[def] identifier[dropna] ( identifier[self] , identifier[subset] = keyword[None] ):
literal[string]
identifier[subset] = identifier[check_and_obtain_subset_columns] ( identifier[subset] , identifier[self] )
identifier[not_nas] =[ identifier[v] . identifier[notna] () keyword[for] identifier[v] keyword[in] identifier[self] [ identifier[subset] ]. identifier[_iter] ()]
identifier[and_filter] = identifier[reduce] ( keyword[lambda] identifier[x] , identifier[y] : identifier[x] & identifier[y] , identifier[not_nas] )
keyword[return] identifier[self] [ identifier[and_filter] ] | def dropna(self, subset=None):
"""Remove missing values according to Baloo's convention.
Parameters
----------
subset : list of str, optional
Which columns to check for missing values in.
Returns
-------
DataFrame
DataFrame with no null values in columns.
"""
subset = check_and_obtain_subset_columns(subset, self)
not_nas = [v.notna() for v in self[subset]._iter()]
and_filter = reduce(lambda x, y: x & y, not_nas)
return self[and_filter] |
def _create_cipher(self, password, salt, nonce = None):
"""
Create the cipher object to encrypt or decrypt a payload.
"""
from argon2.low_level import hash_secret_raw, Type
from Crypto.Cipher import AES
aesmode = self._get_mode(self.aesmode)
if aesmode is None: # pragma: no cover
raise ValueError('invalid AES mode: %s' % self.aesmode)
key = hash_secret_raw(
secret = password.encode(self.password_encoding),
salt = salt,
time_cost = self.time_cost,
memory_cost = self.memory_cost,
parallelism = self.parallelism,
hash_len = 16,
type = Type.ID)
return AES.new(key, aesmode, nonce) | def function[_create_cipher, parameter[self, password, salt, nonce]]:
constant[
Create the cipher object to encrypt or decrypt a payload.
]
from relative_module[argon2.low_level] import module[hash_secret_raw], module[Type]
from relative_module[Crypto.Cipher] import module[AES]
variable[aesmode] assign[=] call[name[self]._get_mode, parameter[name[self].aesmode]]
if compare[name[aesmode] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b026e020>
variable[key] assign[=] call[name[hash_secret_raw], parameter[]]
return[call[name[AES].new, parameter[name[key], name[aesmode], name[nonce]]]] | keyword[def] identifier[_create_cipher] ( identifier[self] , identifier[password] , identifier[salt] , identifier[nonce] = keyword[None] ):
literal[string]
keyword[from] identifier[argon2] . identifier[low_level] keyword[import] identifier[hash_secret_raw] , identifier[Type]
keyword[from] identifier[Crypto] . identifier[Cipher] keyword[import] identifier[AES]
identifier[aesmode] = identifier[self] . identifier[_get_mode] ( identifier[self] . identifier[aesmode] )
keyword[if] identifier[aesmode] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[aesmode] )
identifier[key] = identifier[hash_secret_raw] (
identifier[secret] = identifier[password] . identifier[encode] ( identifier[self] . identifier[password_encoding] ),
identifier[salt] = identifier[salt] ,
identifier[time_cost] = identifier[self] . identifier[time_cost] ,
identifier[memory_cost] = identifier[self] . identifier[memory_cost] ,
identifier[parallelism] = identifier[self] . identifier[parallelism] ,
identifier[hash_len] = literal[int] ,
identifier[type] = identifier[Type] . identifier[ID] )
keyword[return] identifier[AES] . identifier[new] ( identifier[key] , identifier[aesmode] , identifier[nonce] ) | def _create_cipher(self, password, salt, nonce=None):
"""
Create the cipher object to encrypt or decrypt a payload.
"""
from argon2.low_level import hash_secret_raw, Type
from Crypto.Cipher import AES
aesmode = self._get_mode(self.aesmode)
if aesmode is None: # pragma: no cover
raise ValueError('invalid AES mode: %s' % self.aesmode) # depends on [control=['if'], data=[]]
key = hash_secret_raw(secret=password.encode(self.password_encoding), salt=salt, time_cost=self.time_cost, memory_cost=self.memory_cost, parallelism=self.parallelism, hash_len=16, type=Type.ID)
return AES.new(key, aesmode, nonce) |
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
list_display = []
for field_name in self.list_display:
try:
db_field = self.model._meta.get_field(field_name)
if isinstance(db_field, BooleanField):
field_name = boolean_switch_field(db_field)
except FieldDoesNotExist:
pass
list_display.append(field_name)
return list_display | def function[get_list_display, parameter[self, request]]:
constant[
Return a sequence containing the fields to be displayed on the
changelist.
]
variable[list_display] assign[=] list[[]]
for taget[name[field_name]] in starred[name[self].list_display] begin[:]
<ast.Try object at 0x7da1b03ba2c0>
call[name[list_display].append, parameter[name[field_name]]]
return[name[list_display]] | keyword[def] identifier[get_list_display] ( identifier[self] , identifier[request] ):
literal[string]
identifier[list_display] =[]
keyword[for] identifier[field_name] keyword[in] identifier[self] . identifier[list_display] :
keyword[try] :
identifier[db_field] = identifier[self] . identifier[model] . identifier[_meta] . identifier[get_field] ( identifier[field_name] )
keyword[if] identifier[isinstance] ( identifier[db_field] , identifier[BooleanField] ):
identifier[field_name] = identifier[boolean_switch_field] ( identifier[db_field] )
keyword[except] identifier[FieldDoesNotExist] :
keyword[pass]
identifier[list_display] . identifier[append] ( identifier[field_name] )
keyword[return] identifier[list_display] | def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
list_display = []
for field_name in self.list_display:
try:
db_field = self.model._meta.get_field(field_name)
if isinstance(db_field, BooleanField):
field_name = boolean_switch_field(db_field) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except FieldDoesNotExist:
pass # depends on [control=['except'], data=[]]
list_display.append(field_name) # depends on [control=['for'], data=['field_name']]
return list_display |
def linewidth(self, linewidth=None):
"""Returns or sets (if a value is provided) the width of the series'
line.
:param Number linewidth: If given, the series' linewidth will be set to\
this.
:rtype: ``Number``"""
if linewidth is None:
return self._linewidth
else:
if not is_numeric(linewidth):
raise TypeError(
"linewidth must be number, not '%s'" % str(linewidth)
)
self._linewidth = linewidth | def function[linewidth, parameter[self, linewidth]]:
constant[Returns or sets (if a value is provided) the width of the series'
line.
:param Number linewidth: If given, the series' linewidth will be set to this.
:rtype: ``Number``]
if compare[name[linewidth] is constant[None]] begin[:]
return[name[self]._linewidth] | keyword[def] identifier[linewidth] ( identifier[self] , identifier[linewidth] = keyword[None] ):
literal[string]
keyword[if] identifier[linewidth] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_linewidth]
keyword[else] :
keyword[if] keyword[not] identifier[is_numeric] ( identifier[linewidth] ):
keyword[raise] identifier[TypeError] (
literal[string] % identifier[str] ( identifier[linewidth] )
)
identifier[self] . identifier[_linewidth] = identifier[linewidth] | def linewidth(self, linewidth=None):
"""Returns or sets (if a value is provided) the width of the series'
line.
:param Number linewidth: If given, the series' linewidth will be set to this.
:rtype: ``Number``"""
if linewidth is None:
return self._linewidth # depends on [control=['if'], data=[]]
else:
if not is_numeric(linewidth):
raise TypeError("linewidth must be number, not '%s'" % str(linewidth)) # depends on [control=['if'], data=[]]
self._linewidth = linewidth |
def repeat(self, repeat):
"""Set the queue's repeat option"""
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)] | def function[repeat, parameter[self, repeat]]:
constant[Set the queue's repeat option]
variable[shuffle] assign[=] name[self].shuffle
name[self].play_mode assign[=] call[name[PLAY_MODE_BY_MEANING]][tuple[[<ast.Name object at 0x7da18eb573a0>, <ast.Name object at 0x7da18eb547f0>]]] | keyword[def] identifier[repeat] ( identifier[self] , identifier[repeat] ):
literal[string]
identifier[shuffle] = identifier[self] . identifier[shuffle]
identifier[self] . identifier[play_mode] = identifier[PLAY_MODE_BY_MEANING] [( identifier[shuffle] , identifier[repeat] )] | def repeat(self, repeat):
"""Set the queue's repeat option"""
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[shuffle, repeat] |
def get_call_repr(func, *args, **kwargs):
"""Return the string representation of the function call.
:param func: A callable (e.g. function, method).
:type func: callable
:param args: Positional arguments for the callable.
:param kwargs: Keyword arguments for the callable.
:return: String representation of the function call.
:rtype: str
"""
# Functions, builtins and methods
if ismethod(func) or isfunction(func) or isbuiltin(func):
func_repr = '{}.{}'.format(func.__module__, func.__qualname__)
# A callable class instance
elif not isclass(func) and hasattr(func, '__call__'):
func_repr = '{}.{}'.format(func.__module__, func.__class__.__name__)
else:
func_repr = repr(func)
args_reprs = [repr(arg) for arg in args]
kwargs_reprs = [k + '=' + repr(v) for k, v in sorted(kwargs.items())]
return '{}({})'.format(func_repr, ', '.join(args_reprs + kwargs_reprs)) | def function[get_call_repr, parameter[func]]:
constant[Return the string representation of the function call.
:param func: A callable (e.g. function, method).
:type func: callable
:param args: Positional arguments for the callable.
:param kwargs: Keyword arguments for the callable.
:return: String representation of the function call.
:rtype: str
]
if <ast.BoolOp object at 0x7da18ede7970> begin[:]
variable[func_repr] assign[=] call[constant[{}.{}].format, parameter[name[func].__module__, name[func].__qualname__]]
variable[args_reprs] assign[=] <ast.ListComp object at 0x7da18ede4d60>
variable[kwargs_reprs] assign[=] <ast.ListComp object at 0x7da18ede53f0>
return[call[constant[{}({})].format, parameter[name[func_repr], call[constant[, ].join, parameter[binary_operation[name[args_reprs] + name[kwargs_reprs]]]]]]] | keyword[def] identifier[get_call_repr] ( identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[ismethod] ( identifier[func] ) keyword[or] identifier[isfunction] ( identifier[func] ) keyword[or] identifier[isbuiltin] ( identifier[func] ):
identifier[func_repr] = literal[string] . identifier[format] ( identifier[func] . identifier[__module__] , identifier[func] . identifier[__qualname__] )
keyword[elif] keyword[not] identifier[isclass] ( identifier[func] ) keyword[and] identifier[hasattr] ( identifier[func] , literal[string] ):
identifier[func_repr] = literal[string] . identifier[format] ( identifier[func] . identifier[__module__] , identifier[func] . identifier[__class__] . identifier[__name__] )
keyword[else] :
identifier[func_repr] = identifier[repr] ( identifier[func] )
identifier[args_reprs] =[ identifier[repr] ( identifier[arg] ) keyword[for] identifier[arg] keyword[in] identifier[args] ]
identifier[kwargs_reprs] =[ identifier[k] + literal[string] + identifier[repr] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[sorted] ( identifier[kwargs] . identifier[items] ())]
keyword[return] literal[string] . identifier[format] ( identifier[func_repr] , literal[string] . identifier[join] ( identifier[args_reprs] + identifier[kwargs_reprs] )) | def get_call_repr(func, *args, **kwargs):
"""Return the string representation of the function call.
:param func: A callable (e.g. function, method).
:type func: callable
:param args: Positional arguments for the callable.
:param kwargs: Keyword arguments for the callable.
:return: String representation of the function call.
:rtype: str
"""
# Functions, builtins and methods
if ismethod(func) or isfunction(func) or isbuiltin(func):
func_repr = '{}.{}'.format(func.__module__, func.__qualname__) # depends on [control=['if'], data=[]]
# A callable class instance
elif not isclass(func) and hasattr(func, '__call__'):
func_repr = '{}.{}'.format(func.__module__, func.__class__.__name__) # depends on [control=['if'], data=[]]
else:
func_repr = repr(func)
args_reprs = [repr(arg) for arg in args]
kwargs_reprs = [k + '=' + repr(v) for (k, v) in sorted(kwargs.items())]
return '{}({})'.format(func_repr, ', '.join(args_reprs + kwargs_reprs)) |
def get_mim_phenotypes(genemap_lines):
"""Get a dictionary with phenotypes
Use the mim numbers for phenotypes as keys and phenotype information as
values.
Args:
genemap_lines(iterable(str))
Returns:
phenotypes_found(dict): A dictionary with mim_numbers as keys and
dictionaries with phenotype information as values.
{
'description': str, # Description of the phenotype
'hgnc_symbols': set(), # Associated hgnc symbols
'inheritance': set(), # Associated phenotypes
'mim_number': int, # mim number of phenotype
}
"""
# Set with all omim numbers that are phenotypes
# Parsed from mim2gene.txt
phenotype_mims = set()
phenotypes_found = {}
# Genemap is a file with one entry per gene.
# Each line hold a lot of information and in specific it
# has information about the phenotypes that a gene is associated with
# From this source we collect inheritane patterns and what hgnc symbols
# a phenotype is associated with
for entry in parse_genemap2(genemap_lines):
hgnc_symbol = entry['hgnc_symbol']
for phenotype in entry['phenotypes']:
mim_nr = phenotype['mim_number']
if mim_nr in phenotypes_found:
phenotype_entry = phenotypes_found[mim_nr]
phenotype_entry['inheritance'] = phenotype_entry['inheritance'].union(phenotype['inheritance'])
phenotype_entry['hgnc_symbols'].add(hgnc_symbol)
else:
phenotype['hgnc_symbols'] = set([hgnc_symbol])
phenotypes_found[mim_nr] = phenotype
return phenotypes_found | def function[get_mim_phenotypes, parameter[genemap_lines]]:
constant[Get a dictionary with phenotypes
Use the mim numbers for phenotypes as keys and phenotype information as
values.
Args:
genemap_lines(iterable(str))
Returns:
phenotypes_found(dict): A dictionary with mim_numbers as keys and
dictionaries with phenotype information as values.
{
'description': str, # Description of the phenotype
'hgnc_symbols': set(), # Associated hgnc symbols
'inheritance': set(), # Associated phenotypes
'mim_number': int, # mim number of phenotype
}
]
variable[phenotype_mims] assign[=] call[name[set], parameter[]]
variable[phenotypes_found] assign[=] dictionary[[], []]
for taget[name[entry]] in starred[call[name[parse_genemap2], parameter[name[genemap_lines]]]] begin[:]
variable[hgnc_symbol] assign[=] call[name[entry]][constant[hgnc_symbol]]
for taget[name[phenotype]] in starred[call[name[entry]][constant[phenotypes]]] begin[:]
variable[mim_nr] assign[=] call[name[phenotype]][constant[mim_number]]
if compare[name[mim_nr] in name[phenotypes_found]] begin[:]
variable[phenotype_entry] assign[=] call[name[phenotypes_found]][name[mim_nr]]
call[name[phenotype_entry]][constant[inheritance]] assign[=] call[call[name[phenotype_entry]][constant[inheritance]].union, parameter[call[name[phenotype]][constant[inheritance]]]]
call[call[name[phenotype_entry]][constant[hgnc_symbols]].add, parameter[name[hgnc_symbol]]]
return[name[phenotypes_found]] | keyword[def] identifier[get_mim_phenotypes] ( identifier[genemap_lines] ):
literal[string]
identifier[phenotype_mims] = identifier[set] ()
identifier[phenotypes_found] ={}
keyword[for] identifier[entry] keyword[in] identifier[parse_genemap2] ( identifier[genemap_lines] ):
identifier[hgnc_symbol] = identifier[entry] [ literal[string] ]
keyword[for] identifier[phenotype] keyword[in] identifier[entry] [ literal[string] ]:
identifier[mim_nr] = identifier[phenotype] [ literal[string] ]
keyword[if] identifier[mim_nr] keyword[in] identifier[phenotypes_found] :
identifier[phenotype_entry] = identifier[phenotypes_found] [ identifier[mim_nr] ]
identifier[phenotype_entry] [ literal[string] ]= identifier[phenotype_entry] [ literal[string] ]. identifier[union] ( identifier[phenotype] [ literal[string] ])
identifier[phenotype_entry] [ literal[string] ]. identifier[add] ( identifier[hgnc_symbol] )
keyword[else] :
identifier[phenotype] [ literal[string] ]= identifier[set] ([ identifier[hgnc_symbol] ])
identifier[phenotypes_found] [ identifier[mim_nr] ]= identifier[phenotype]
keyword[return] identifier[phenotypes_found] | def get_mim_phenotypes(genemap_lines):
"""Get a dictionary with phenotypes
Use the mim numbers for phenotypes as keys and phenotype information as
values.
Args:
genemap_lines(iterable(str))
Returns:
phenotypes_found(dict): A dictionary with mim_numbers as keys and
dictionaries with phenotype information as values.
{
'description': str, # Description of the phenotype
'hgnc_symbols': set(), # Associated hgnc symbols
'inheritance': set(), # Associated phenotypes
'mim_number': int, # mim number of phenotype
}
"""
# Set with all omim numbers that are phenotypes
# Parsed from mim2gene.txt
phenotype_mims = set()
phenotypes_found = {}
# Genemap is a file with one entry per gene.
# Each line hold a lot of information and in specific it
# has information about the phenotypes that a gene is associated with
# From this source we collect inheritane patterns and what hgnc symbols
# a phenotype is associated with
for entry in parse_genemap2(genemap_lines):
hgnc_symbol = entry['hgnc_symbol']
for phenotype in entry['phenotypes']:
mim_nr = phenotype['mim_number']
if mim_nr in phenotypes_found:
phenotype_entry = phenotypes_found[mim_nr]
phenotype_entry['inheritance'] = phenotype_entry['inheritance'].union(phenotype['inheritance'])
phenotype_entry['hgnc_symbols'].add(hgnc_symbol) # depends on [control=['if'], data=['mim_nr', 'phenotypes_found']]
else:
phenotype['hgnc_symbols'] = set([hgnc_symbol])
phenotypes_found[mim_nr] = phenotype # depends on [control=['for'], data=['phenotype']] # depends on [control=['for'], data=['entry']]
return phenotypes_found |
def create_tag(self, tag, message, sha, obj_type, tagger,
lightweight=False):
"""Create a tag in this repository.
:param str tag: (required), name of the tag
:param str message: (required), tag message
:param str sha: (required), SHA of the git object this is tagging
:param str obj_type: (required), type of object being tagged, e.g.,
'commit', 'tree', 'blob'
:param dict tagger: (required), containing the name, email of the
tagger and the date it was tagged
:param bool lightweight: (optional), if False, create an annotated
tag, otherwise create a lightweight tag (a Reference).
:returns: If lightweight == False: :class:`Tag <github3.git.Tag>` if
successful, else None. If lightweight == True: :class:`Reference
<github3.git.Reference>`
"""
if lightweight and tag and sha:
return self.create_ref('refs/tags/' + tag, sha)
json = None
if tag and message and sha and obj_type and len(tagger) == 3:
data = {'tag': tag, 'message': message, 'object': sha,
'type': obj_type, 'tagger': tagger}
url = self._build_url('git', 'tags', base_url=self._api)
json = self._json(self._post(url, data=data), 201)
if json:
self.create_ref('refs/tags/' + tag, sha)
return Tag(json) if json else None | def function[create_tag, parameter[self, tag, message, sha, obj_type, tagger, lightweight]]:
constant[Create a tag in this repository.
:param str tag: (required), name of the tag
:param str message: (required), tag message
:param str sha: (required), SHA of the git object this is tagging
:param str obj_type: (required), type of object being tagged, e.g.,
'commit', 'tree', 'blob'
:param dict tagger: (required), containing the name, email of the
tagger and the date it was tagged
:param bool lightweight: (optional), if False, create an annotated
tag, otherwise create a lightweight tag (a Reference).
:returns: If lightweight == False: :class:`Tag <github3.git.Tag>` if
successful, else None. If lightweight == True: :class:`Reference
<github3.git.Reference>`
]
if <ast.BoolOp object at 0x7da1b0feeaa0> begin[:]
return[call[name[self].create_ref, parameter[binary_operation[constant[refs/tags/] + name[tag]], name[sha]]]]
variable[json] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b0fec790> begin[:]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0fec8e0>, <ast.Constant object at 0x7da1b0fefa30>, <ast.Constant object at 0x7da1b0feeb00>, <ast.Constant object at 0x7da1b0feec50>, <ast.Constant object at 0x7da1b0fecc10>], [<ast.Name object at 0x7da1b0f2a4d0>, <ast.Name object at 0x7da1b0f28970>, <ast.Name object at 0x7da1b0f2b940>, <ast.Name object at 0x7da1b0f2a770>, <ast.Name object at 0x7da1b0f2b730>]]
variable[url] assign[=] call[name[self]._build_url, parameter[constant[git], constant[tags]]]
variable[json] assign[=] call[name[self]._json, parameter[call[name[self]._post, parameter[name[url]]], constant[201]]]
if name[json] begin[:]
call[name[self].create_ref, parameter[binary_operation[constant[refs/tags/] + name[tag]], name[sha]]]
return[<ast.IfExp object at 0x7da1b0fba4d0>] | keyword[def] identifier[create_tag] ( identifier[self] , identifier[tag] , identifier[message] , identifier[sha] , identifier[obj_type] , identifier[tagger] ,
identifier[lightweight] = keyword[False] ):
literal[string]
keyword[if] identifier[lightweight] keyword[and] identifier[tag] keyword[and] identifier[sha] :
keyword[return] identifier[self] . identifier[create_ref] ( literal[string] + identifier[tag] , identifier[sha] )
identifier[json] = keyword[None]
keyword[if] identifier[tag] keyword[and] identifier[message] keyword[and] identifier[sha] keyword[and] identifier[obj_type] keyword[and] identifier[len] ( identifier[tagger] )== literal[int] :
identifier[data] ={ literal[string] : identifier[tag] , literal[string] : identifier[message] , literal[string] : identifier[sha] ,
literal[string] : identifier[obj_type] , literal[string] : identifier[tagger] }
identifier[url] = identifier[self] . identifier[_build_url] ( literal[string] , literal[string] , identifier[base_url] = identifier[self] . identifier[_api] )
identifier[json] = identifier[self] . identifier[_json] ( identifier[self] . identifier[_post] ( identifier[url] , identifier[data] = identifier[data] ), literal[int] )
keyword[if] identifier[json] :
identifier[self] . identifier[create_ref] ( literal[string] + identifier[tag] , identifier[sha] )
keyword[return] identifier[Tag] ( identifier[json] ) keyword[if] identifier[json] keyword[else] keyword[None] | def create_tag(self, tag, message, sha, obj_type, tagger, lightweight=False):
"""Create a tag in this repository.
:param str tag: (required), name of the tag
:param str message: (required), tag message
:param str sha: (required), SHA of the git object this is tagging
:param str obj_type: (required), type of object being tagged, e.g.,
'commit', 'tree', 'blob'
:param dict tagger: (required), containing the name, email of the
tagger and the date it was tagged
:param bool lightweight: (optional), if False, create an annotated
tag, otherwise create a lightweight tag (a Reference).
:returns: If lightweight == False: :class:`Tag <github3.git.Tag>` if
successful, else None. If lightweight == True: :class:`Reference
<github3.git.Reference>`
"""
if lightweight and tag and sha:
return self.create_ref('refs/tags/' + tag, sha) # depends on [control=['if'], data=[]]
json = None
if tag and message and sha and obj_type and (len(tagger) == 3):
data = {'tag': tag, 'message': message, 'object': sha, 'type': obj_type, 'tagger': tagger}
url = self._build_url('git', 'tags', base_url=self._api)
json = self._json(self._post(url, data=data), 201)
if json:
self.create_ref('refs/tags/' + tag, sha) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return Tag(json) if json else None |
def display(self, highlight = None, basename = 'graph', format = 'png',
pause = True):
'''
API:
display(self, highlight = None, basename = 'graph', format = 'png',
pause = True)
Description:
Displays graph according to the arguments provided.
Current display modes: 'off', 'file', 'pygame', 'PIL', 'xdot',
'svg'
Current layout modes: Layouts provided by graphviz ('dot', 'fdp',
'circo', etc.) and 'dot2tex'.
Current formats: Formats provided by graphviz ('ps', 'pdf', 'png',
etc.)
Input:
highlight: List of nodes to be highlighted.
basename: File name. It will be used if display mode is 'file'.
format: Image format, all format supported by Dot are wellcome.
pause: If display is 'pygame' and pause is True pygame will pause
and wait for user input before closing the display. It will close
display window straightaway otherwise.
Post:
A display window will pop up or a file will be written depending
on display mode.
'''
if self.attr['display'] == 'off':
return
if highlight != None:
for n in highlight:
if not isinstance(n, Node):
n = self.get_node(n)
n.set_attr('color', 'red')
if self.get_layout() == 'dot2tex':
if self.attr['display'] != 'file':
self.attr['display'] = 'file'
print("Warning: Dot2tex layout can only be used with display mode 'file'")
print(" Automatically changing setting")
if self.attr['display'] == 'file':
if self.get_layout() == 'dot2tex':
try:
if DOT2TEX_INSTALLED:
if format != 'pdf' or format != 'ps':
print("Dot2tex only supports pdf and ps formats, falling back to pdf")
format = 'pdf'
self.set_layout('dot')
tex = dot2tex.dot2tex(self.to_string(), autosize=True, texmode = 'math', template = DOT2TEX_TEMPLATE)
else:
print("Error: Dot2tex not installed.")
except:
try:
self.set_layout('dot')
self.write(basename+'.dot', self.get_layout(), 'dot')
sp = subprocess.Popen('dot2tex -t math ' + basename + '.dot', stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
tex = sp.communicate()[0]
except:
print("There was an error running dot2tex.")
f = open(basename+'.tex', 'w')
f.write(tex)
f.close()
try:
subprocess.call(['latex', basename])
if format == 'ps':
subprocess.call(['dvips', basename])
elif format == 'pdf':
subprocess.call(['pdflatex', basename])
self.set_layout('dot2tex')
except:
print("There was an error runing latex. Is it installed?")
else:
self.write(basename+'.'+format, self.get_layout(), format)
return
elif self.attr['display'] == 'pygame':
output = self.create(self.get_layout(), format)
if output is not None:
im = io.StringIO(self.create(self.get_layout(), format))
picture = pygame.image.load(im, format)
screen = pygame.display.set_mode(picture.get_size())
screen.blit(picture, picture.get_rect())
pygame.display.flip()
while pause:
e = pygame.event.poll()
if e.type == pygame.KEYDOWN:
break
if e.type == pygame.QUIT:
pause = False
pygame.display.quit()
# sys.exit() exits the whole program and I (aykut) guess it is
# not appropriate here.
#sys.exit()
else:
print('Error in displaying graph. Display disabled')
self.set_display_mode('off')
elif self.attr['display'] == 'PIL':
im = io.StringIO(self.create(self.get_layout(), format))
if PIL_INSTALLED:
im2 = PIL_Image.open(im)
im2.show()
else:
print('Error: PIL not installed. Display disabled.')
self.attr['display'] = 'off'
elif self.attr['display'] == 'xdot':
if XDOT_INSTALLED:
window = xdot.DotWindow()
window.set_dotcode(self.to_string())
window.connect('destroy', gtk.main_quit)
gtk.main()
else:
print('Error: xdot not installed. Display disabled.')
self.attr['display'] = 'off'
elif self.attr['display'] == 'svg':
if not ETREE_INSTALLED:
print('Error: etree not installed (display mode: svg). Display disabled.')
self.attr['display'] = 'off'
else:
print("Unknown display mode: ", end=' ')
print(self.attr['display'])
if highlight != None:
for n in highlight:
if not isinstance(n, Node):
n = self.get_node(n)
n.set_attr('color', 'black') | def function[display, parameter[self, highlight, basename, format, pause]]:
constant[
API:
display(self, highlight = None, basename = 'graph', format = 'png',
pause = True)
Description:
Displays graph according to the arguments provided.
Current display modes: 'off', 'file', 'pygame', 'PIL', 'xdot',
'svg'
Current layout modes: Layouts provided by graphviz ('dot', 'fdp',
'circo', etc.) and 'dot2tex'.
Current formats: Formats provided by graphviz ('ps', 'pdf', 'png',
etc.)
Input:
highlight: List of nodes to be highlighted.
basename: File name. It will be used if display mode is 'file'.
format: Image format, all format supported by Dot are wellcome.
pause: If display is 'pygame' and pause is True pygame will pause
and wait for user input before closing the display. It will close
display window straightaway otherwise.
Post:
A display window will pop up or a file will be written depending
on display mode.
]
if compare[call[name[self].attr][constant[display]] equal[==] constant[off]] begin[:]
return[None]
if compare[name[highlight] not_equal[!=] constant[None]] begin[:]
for taget[name[n]] in starred[name[highlight]] begin[:]
if <ast.UnaryOp object at 0x7da1b05590c0> begin[:]
variable[n] assign[=] call[name[self].get_node, parameter[name[n]]]
call[name[n].set_attr, parameter[constant[color], constant[red]]]
if compare[call[name[self].get_layout, parameter[]] equal[==] constant[dot2tex]] begin[:]
if compare[call[name[self].attr][constant[display]] not_equal[!=] constant[file]] begin[:]
call[name[self].attr][constant[display]] assign[=] constant[file]
call[name[print], parameter[constant[Warning: Dot2tex layout can only be used with display mode 'file']]]
call[name[print], parameter[constant[ Automatically changing setting]]]
if compare[call[name[self].attr][constant[display]] equal[==] constant[file]] begin[:]
if compare[call[name[self].get_layout, parameter[]] equal[==] constant[dot2tex]] begin[:]
<ast.Try object at 0x7da1b055b3d0>
variable[f] assign[=] call[name[open], parameter[binary_operation[name[basename] + constant[.tex]], constant[w]]]
call[name[f].write, parameter[name[tex]]]
call[name[f].close, parameter[]]
<ast.Try object at 0x7da1b05c41c0>
return[None]
if compare[name[highlight] not_equal[!=] constant[None]] begin[:]
for taget[name[n]] in starred[name[highlight]] begin[:]
if <ast.UnaryOp object at 0x7da1b0431d50> begin[:]
variable[n] assign[=] call[name[self].get_node, parameter[name[n]]]
call[name[n].set_attr, parameter[constant[color], constant[black]]] | keyword[def] identifier[display] ( identifier[self] , identifier[highlight] = keyword[None] , identifier[basename] = literal[string] , identifier[format] = literal[string] ,
identifier[pause] = keyword[True] ):
literal[string]
keyword[if] identifier[self] . identifier[attr] [ literal[string] ]== literal[string] :
keyword[return]
keyword[if] identifier[highlight] != keyword[None] :
keyword[for] identifier[n] keyword[in] identifier[highlight] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[n] , identifier[Node] ):
identifier[n] = identifier[self] . identifier[get_node] ( identifier[n] )
identifier[n] . identifier[set_attr] ( literal[string] , literal[string] )
keyword[if] identifier[self] . identifier[get_layout] ()== literal[string] :
keyword[if] identifier[self] . identifier[attr] [ literal[string] ]!= literal[string] :
identifier[self] . identifier[attr] [ literal[string] ]= literal[string]
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[if] identifier[self] . identifier[attr] [ literal[string] ]== literal[string] :
keyword[if] identifier[self] . identifier[get_layout] ()== literal[string] :
keyword[try] :
keyword[if] identifier[DOT2TEX_INSTALLED] :
keyword[if] identifier[format] != literal[string] keyword[or] identifier[format] != literal[string] :
identifier[print] ( literal[string] )
identifier[format] = literal[string]
identifier[self] . identifier[set_layout] ( literal[string] )
identifier[tex] = identifier[dot2tex] . identifier[dot2tex] ( identifier[self] . identifier[to_string] (), identifier[autosize] = keyword[True] , identifier[texmode] = literal[string] , identifier[template] = identifier[DOT2TEX_TEMPLATE] )
keyword[else] :
identifier[print] ( literal[string] )
keyword[except] :
keyword[try] :
identifier[self] . identifier[set_layout] ( literal[string] )
identifier[self] . identifier[write] ( identifier[basename] + literal[string] , identifier[self] . identifier[get_layout] (), literal[string] )
identifier[sp] = identifier[subprocess] . identifier[Popen] ( literal[string] + identifier[basename] + literal[string] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[subprocess] . identifier[STDOUT] )
identifier[tex] = identifier[sp] . identifier[communicate] ()[ literal[int] ]
keyword[except] :
identifier[print] ( literal[string] )
identifier[f] = identifier[open] ( identifier[basename] + literal[string] , literal[string] )
identifier[f] . identifier[write] ( identifier[tex] )
identifier[f] . identifier[close] ()
keyword[try] :
identifier[subprocess] . identifier[call] ([ literal[string] , identifier[basename] ])
keyword[if] identifier[format] == literal[string] :
identifier[subprocess] . identifier[call] ([ literal[string] , identifier[basename] ])
keyword[elif] identifier[format] == literal[string] :
identifier[subprocess] . identifier[call] ([ literal[string] , identifier[basename] ])
identifier[self] . identifier[set_layout] ( literal[string] )
keyword[except] :
identifier[print] ( literal[string] )
keyword[else] :
identifier[self] . identifier[write] ( identifier[basename] + literal[string] + identifier[format] , identifier[self] . identifier[get_layout] (), identifier[format] )
keyword[return]
keyword[elif] identifier[self] . identifier[attr] [ literal[string] ]== literal[string] :
identifier[output] = identifier[self] . identifier[create] ( identifier[self] . identifier[get_layout] (), identifier[format] )
keyword[if] identifier[output] keyword[is] keyword[not] keyword[None] :
identifier[im] = identifier[io] . identifier[StringIO] ( identifier[self] . identifier[create] ( identifier[self] . identifier[get_layout] (), identifier[format] ))
identifier[picture] = identifier[pygame] . identifier[image] . identifier[load] ( identifier[im] , identifier[format] )
identifier[screen] = identifier[pygame] . identifier[display] . identifier[set_mode] ( identifier[picture] . identifier[get_size] ())
identifier[screen] . identifier[blit] ( identifier[picture] , identifier[picture] . identifier[get_rect] ())
identifier[pygame] . identifier[display] . identifier[flip] ()
keyword[while] identifier[pause] :
identifier[e] = identifier[pygame] . identifier[event] . identifier[poll] ()
keyword[if] identifier[e] . identifier[type] == identifier[pygame] . identifier[KEYDOWN] :
keyword[break]
keyword[if] identifier[e] . identifier[type] == identifier[pygame] . identifier[QUIT] :
identifier[pause] = keyword[False]
identifier[pygame] . identifier[display] . identifier[quit] ()
keyword[else] :
identifier[print] ( literal[string] )
identifier[self] . identifier[set_display_mode] ( literal[string] )
keyword[elif] identifier[self] . identifier[attr] [ literal[string] ]== literal[string] :
identifier[im] = identifier[io] . identifier[StringIO] ( identifier[self] . identifier[create] ( identifier[self] . identifier[get_layout] (), identifier[format] ))
keyword[if] identifier[PIL_INSTALLED] :
identifier[im2] = identifier[PIL_Image] . identifier[open] ( identifier[im] )
identifier[im2] . identifier[show] ()
keyword[else] :
identifier[print] ( literal[string] )
identifier[self] . identifier[attr] [ literal[string] ]= literal[string]
keyword[elif] identifier[self] . identifier[attr] [ literal[string] ]== literal[string] :
keyword[if] identifier[XDOT_INSTALLED] :
identifier[window] = identifier[xdot] . identifier[DotWindow] ()
identifier[window] . identifier[set_dotcode] ( identifier[self] . identifier[to_string] ())
identifier[window] . identifier[connect] ( literal[string] , identifier[gtk] . identifier[main_quit] )
identifier[gtk] . identifier[main] ()
keyword[else] :
identifier[print] ( literal[string] )
identifier[self] . identifier[attr] [ literal[string] ]= literal[string]
keyword[elif] identifier[self] . identifier[attr] [ literal[string] ]== literal[string] :
keyword[if] keyword[not] identifier[ETREE_INSTALLED] :
identifier[print] ( literal[string] )
identifier[self] . identifier[attr] [ literal[string] ]= literal[string]
keyword[else] :
identifier[print] ( literal[string] , identifier[end] = literal[string] )
identifier[print] ( identifier[self] . identifier[attr] [ literal[string] ])
keyword[if] identifier[highlight] != keyword[None] :
keyword[for] identifier[n] keyword[in] identifier[highlight] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[n] , identifier[Node] ):
identifier[n] = identifier[self] . identifier[get_node] ( identifier[n] )
identifier[n] . identifier[set_attr] ( literal[string] , literal[string] ) | def display(self, highlight=None, basename='graph', format='png', pause=True):
"""
API:
display(self, highlight = None, basename = 'graph', format = 'png',
pause = True)
Description:
Displays graph according to the arguments provided.
Current display modes: 'off', 'file', 'pygame', 'PIL', 'xdot',
'svg'
Current layout modes: Layouts provided by graphviz ('dot', 'fdp',
'circo', etc.) and 'dot2tex'.
Current formats: Formats provided by graphviz ('ps', 'pdf', 'png',
etc.)
Input:
highlight: List of nodes to be highlighted.
basename: File name. It will be used if display mode is 'file'.
format: Image format, all format supported by Dot are wellcome.
pause: If display is 'pygame' and pause is True pygame will pause
and wait for user input before closing the display. It will close
display window straightaway otherwise.
Post:
A display window will pop up or a file will be written depending
on display mode.
"""
if self.attr['display'] == 'off':
return # depends on [control=['if'], data=[]]
if highlight != None:
for n in highlight:
if not isinstance(n, Node):
n = self.get_node(n) # depends on [control=['if'], data=[]]
n.set_attr('color', 'red') # depends on [control=['for'], data=['n']] # depends on [control=['if'], data=['highlight']]
if self.get_layout() == 'dot2tex':
if self.attr['display'] != 'file':
self.attr['display'] = 'file'
print("Warning: Dot2tex layout can only be used with display mode 'file'")
print(' Automatically changing setting') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.attr['display'] == 'file':
if self.get_layout() == 'dot2tex':
try:
if DOT2TEX_INSTALLED:
if format != 'pdf' or format != 'ps':
print('Dot2tex only supports pdf and ps formats, falling back to pdf')
format = 'pdf' # depends on [control=['if'], data=[]]
self.set_layout('dot')
tex = dot2tex.dot2tex(self.to_string(), autosize=True, texmode='math', template=DOT2TEX_TEMPLATE) # depends on [control=['if'], data=[]]
else:
print('Error: Dot2tex not installed.') # depends on [control=['try'], data=[]]
except:
try:
self.set_layout('dot')
self.write(basename + '.dot', self.get_layout(), 'dot')
sp = subprocess.Popen('dot2tex -t math ' + basename + '.dot', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
tex = sp.communicate()[0] # depends on [control=['try'], data=[]]
except:
print('There was an error running dot2tex.') # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
f = open(basename + '.tex', 'w')
f.write(tex)
f.close()
try:
subprocess.call(['latex', basename])
if format == 'ps':
subprocess.call(['dvips', basename]) # depends on [control=['if'], data=[]]
elif format == 'pdf':
subprocess.call(['pdflatex', basename]) # depends on [control=['if'], data=[]]
self.set_layout('dot2tex') # depends on [control=['try'], data=[]]
except:
print('There was an error runing latex. Is it installed?') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
self.write(basename + '.' + format, self.get_layout(), format)
return # depends on [control=['if'], data=[]]
elif self.attr['display'] == 'pygame':
output = self.create(self.get_layout(), format)
if output is not None:
im = io.StringIO(self.create(self.get_layout(), format))
picture = pygame.image.load(im, format)
screen = pygame.display.set_mode(picture.get_size())
screen.blit(picture, picture.get_rect())
pygame.display.flip()
while pause:
e = pygame.event.poll()
if e.type == pygame.KEYDOWN:
break # depends on [control=['if'], data=[]]
if e.type == pygame.QUIT:
pause = False
pygame.display.quit() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
# sys.exit() exits the whole program and I (aykut) guess it is
# not appropriate here.
#sys.exit()
print('Error in displaying graph. Display disabled')
self.set_display_mode('off') # depends on [control=['if'], data=[]]
elif self.attr['display'] == 'PIL':
im = io.StringIO(self.create(self.get_layout(), format))
if PIL_INSTALLED:
im2 = PIL_Image.open(im)
im2.show() # depends on [control=['if'], data=[]]
else:
print('Error: PIL not installed. Display disabled.')
self.attr['display'] = 'off' # depends on [control=['if'], data=[]]
elif self.attr['display'] == 'xdot':
if XDOT_INSTALLED:
window = xdot.DotWindow()
window.set_dotcode(self.to_string())
window.connect('destroy', gtk.main_quit)
gtk.main() # depends on [control=['if'], data=[]]
else:
print('Error: xdot not installed. Display disabled.')
self.attr['display'] = 'off' # depends on [control=['if'], data=[]]
elif self.attr['display'] == 'svg':
if not ETREE_INSTALLED:
print('Error: etree not installed (display mode: svg). Display disabled.')
self.attr['display'] = 'off' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
print('Unknown display mode: ', end=' ')
print(self.attr['display'])
if highlight != None:
for n in highlight:
if not isinstance(n, Node):
n = self.get_node(n) # depends on [control=['if'], data=[]]
n.set_attr('color', 'black') # depends on [control=['for'], data=['n']] # depends on [control=['if'], data=['highlight']] |
def investment_term(self, investment_term):
""" This investment term is in months. This might change to a relative delta."""
try:
if isinstance(investment_term, (str, int)):
self._investment_term = int(investment_term)
except Exception:
raise ValueError('invalid input of investment type %s, cannot be converted to an int' %
investment_term) | def function[investment_term, parameter[self, investment_term]]:
constant[ This investment term is in months. This might change to a relative delta.]
<ast.Try object at 0x7da1b083ace0> | keyword[def] identifier[investment_term] ( identifier[self] , identifier[investment_term] ):
literal[string]
keyword[try] :
keyword[if] identifier[isinstance] ( identifier[investment_term] ,( identifier[str] , identifier[int] )):
identifier[self] . identifier[_investment_term] = identifier[int] ( identifier[investment_term] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[ValueError] ( literal[string] %
identifier[investment_term] ) | def investment_term(self, investment_term):
""" This investment term is in months. This might change to a relative delta."""
try:
if isinstance(investment_term, (str, int)):
self._investment_term = int(investment_term) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
raise ValueError('invalid input of investment type %s, cannot be converted to an int' % investment_term) # depends on [control=['except'], data=[]] |
def get_absolute_url_link(self, text=None, cls=None, icon_class=None,
**attrs):
"""Gets the html link for the object."""
if text is None:
text = self.get_link_text()
return build_link(href=self.get_absolute_url(),
text=text,
cls=cls,
icon_class=icon_class,
**attrs) | def function[get_absolute_url_link, parameter[self, text, cls, icon_class]]:
constant[Gets the html link for the object.]
if compare[name[text] is constant[None]] begin[:]
variable[text] assign[=] call[name[self].get_link_text, parameter[]]
return[call[name[build_link], parameter[]]] | keyword[def] identifier[get_absolute_url_link] ( identifier[self] , identifier[text] = keyword[None] , identifier[cls] = keyword[None] , identifier[icon_class] = keyword[None] ,
** identifier[attrs] ):
literal[string]
keyword[if] identifier[text] keyword[is] keyword[None] :
identifier[text] = identifier[self] . identifier[get_link_text] ()
keyword[return] identifier[build_link] ( identifier[href] = identifier[self] . identifier[get_absolute_url] (),
identifier[text] = identifier[text] ,
identifier[cls] = identifier[cls] ,
identifier[icon_class] = identifier[icon_class] ,
** identifier[attrs] ) | def get_absolute_url_link(self, text=None, cls=None, icon_class=None, **attrs):
"""Gets the html link for the object."""
if text is None:
text = self.get_link_text() # depends on [control=['if'], data=['text']]
return build_link(href=self.get_absolute_url(), text=text, cls=cls, icon_class=icon_class, **attrs) |
def update_account_password_policy(allow_users_to_change_password=None,
hard_expiry=None, max_password_age=None,
minimum_password_length=None,
password_reuse_prevention=None,
require_lowercase_characters=None,
require_numbers=None, require_symbols=None,
require_uppercase_characters=None,
region=None, key=None, keyid=None,
profile=None):
'''
Update the password policy for the AWS account.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.update_account_password_policy True
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.update_account_password_policy(allow_users_to_change_password,
hard_expiry, max_password_age,
minimum_password_length,
password_reuse_prevention,
require_lowercase_characters,
require_numbers, require_symbols,
require_uppercase_characters)
log.info('The password policy has been updated.')
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to update the password policy'
log.error(msg)
return False | def function[update_account_password_policy, parameter[allow_users_to_change_password, hard_expiry, max_password_age, minimum_password_length, password_reuse_prevention, require_lowercase_characters, require_numbers, require_symbols, require_uppercase_characters, region, key, keyid, profile]]:
constant[
Update the password policy for the AWS account.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.update_account_password_policy True
]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
<ast.Try object at 0x7da1b21ea020> | keyword[def] identifier[update_account_password_policy] ( identifier[allow_users_to_change_password] = keyword[None] ,
identifier[hard_expiry] = keyword[None] , identifier[max_password_age] = keyword[None] ,
identifier[minimum_password_length] = keyword[None] ,
identifier[password_reuse_prevention] = keyword[None] ,
identifier[require_lowercase_characters] = keyword[None] ,
identifier[require_numbers] = keyword[None] , identifier[require_symbols] = keyword[None] ,
identifier[require_uppercase_characters] = keyword[None] ,
identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] ,
identifier[profile] = keyword[None] ):
literal[string]
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[try] :
identifier[conn] . identifier[update_account_password_policy] ( identifier[allow_users_to_change_password] ,
identifier[hard_expiry] , identifier[max_password_age] ,
identifier[minimum_password_length] ,
identifier[password_reuse_prevention] ,
identifier[require_lowercase_characters] ,
identifier[require_numbers] , identifier[require_symbols] ,
identifier[require_uppercase_characters] )
identifier[log] . identifier[info] ( literal[string] )
keyword[return] keyword[True]
keyword[except] identifier[boto] . identifier[exception] . identifier[BotoServerError] keyword[as] identifier[e] :
identifier[log] . identifier[debug] ( identifier[e] )
identifier[msg] = literal[string]
identifier[log] . identifier[error] ( identifier[msg] )
keyword[return] keyword[False] | def update_account_password_policy(allow_users_to_change_password=None, hard_expiry=None, max_password_age=None, minimum_password_length=None, password_reuse_prevention=None, require_lowercase_characters=None, require_numbers=None, require_symbols=None, require_uppercase_characters=None, region=None, key=None, keyid=None, profile=None):
"""
Update the password policy for the AWS account.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.update_account_password_policy True
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.update_account_password_policy(allow_users_to_change_password, hard_expiry, max_password_age, minimum_password_length, password_reuse_prevention, require_lowercase_characters, require_numbers, require_symbols, require_uppercase_characters)
log.info('The password policy has been updated.')
return True # depends on [control=['try'], data=[]]
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to update the password policy'
log.error(msg)
return False # depends on [control=['except'], data=['e']] |
def file_upload(f):
"""
Return list of `werkzeug.datastructures.FileStorage` objects - files to be
uploaded
"""
@wraps(f)
def file_upload_decorator(*args, **kwargs):
# If the data is already transformed, we do not transform it any
# further.
task_data = _get_data_from_args(args)
if task_data is None:
logging.error("Task data is empty during FilesUploadDecorator.")
task_data.transform_payload(lambda _: request.files.getlist('file'))
return f(*args, **kwargs)
return file_upload_decorator | def function[file_upload, parameter[f]]:
constant[
Return list of `werkzeug.datastructures.FileStorage` objects - files to be
uploaded
]
def function[file_upload_decorator, parameter[]]:
variable[task_data] assign[=] call[name[_get_data_from_args], parameter[name[args]]]
if compare[name[task_data] is constant[None]] begin[:]
call[name[logging].error, parameter[constant[Task data is empty during FilesUploadDecorator.]]]
call[name[task_data].transform_payload, parameter[<ast.Lambda object at 0x7da207f00400>]]
return[call[name[f], parameter[<ast.Starred object at 0x7da207f02740>]]]
return[name[file_upload_decorator]] | keyword[def] identifier[file_upload] ( identifier[f] ):
literal[string]
@ identifier[wraps] ( identifier[f] )
keyword[def] identifier[file_upload_decorator] (* identifier[args] ,** identifier[kwargs] ):
identifier[task_data] = identifier[_get_data_from_args] ( identifier[args] )
keyword[if] identifier[task_data] keyword[is] keyword[None] :
identifier[logging] . identifier[error] ( literal[string] )
identifier[task_data] . identifier[transform_payload] ( keyword[lambda] identifier[_] : identifier[request] . identifier[files] . identifier[getlist] ( literal[string] ))
keyword[return] identifier[f] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[file_upload_decorator] | def file_upload(f):
"""
Return list of `werkzeug.datastructures.FileStorage` objects - files to be
uploaded
"""
@wraps(f)
def file_upload_decorator(*args, **kwargs):
# If the data is already transformed, we do not transform it any
# further.
task_data = _get_data_from_args(args)
if task_data is None:
logging.error('Task data is empty during FilesUploadDecorator.') # depends on [control=['if'], data=[]]
task_data.transform_payload(lambda _: request.files.getlist('file'))
return f(*args, **kwargs)
return file_upload_decorator |
def _erase_buffer(self, output_buffer):
"""Erase readings in the specified buffer to make space."""
erase_size = self._model.get(u'buffer_erase_size')
buffer_type = u'storage'
if output_buffer:
buffer_type = u'streaming'
old_readings = self._engine.popn(buffer_type, erase_size)
# Now go through all of our walkers that could match and
# update their availability counts and data buffer pointers
for reading in old_readings:
stream = DataStream.FromEncoded(reading.stream)
for walker in self._queue_walkers:
# Only notify the walkers that are on this queue
if walker.selector.output == output_buffer:
walker.notify_rollover(stream) | def function[_erase_buffer, parameter[self, output_buffer]]:
constant[Erase readings in the specified buffer to make space.]
variable[erase_size] assign[=] call[name[self]._model.get, parameter[constant[buffer_erase_size]]]
variable[buffer_type] assign[=] constant[storage]
if name[output_buffer] begin[:]
variable[buffer_type] assign[=] constant[streaming]
variable[old_readings] assign[=] call[name[self]._engine.popn, parameter[name[buffer_type], name[erase_size]]]
for taget[name[reading]] in starred[name[old_readings]] begin[:]
variable[stream] assign[=] call[name[DataStream].FromEncoded, parameter[name[reading].stream]]
for taget[name[walker]] in starred[name[self]._queue_walkers] begin[:]
if compare[name[walker].selector.output equal[==] name[output_buffer]] begin[:]
call[name[walker].notify_rollover, parameter[name[stream]]] | keyword[def] identifier[_erase_buffer] ( identifier[self] , identifier[output_buffer] ):
literal[string]
identifier[erase_size] = identifier[self] . identifier[_model] . identifier[get] ( literal[string] )
identifier[buffer_type] = literal[string]
keyword[if] identifier[output_buffer] :
identifier[buffer_type] = literal[string]
identifier[old_readings] = identifier[self] . identifier[_engine] . identifier[popn] ( identifier[buffer_type] , identifier[erase_size] )
keyword[for] identifier[reading] keyword[in] identifier[old_readings] :
identifier[stream] = identifier[DataStream] . identifier[FromEncoded] ( identifier[reading] . identifier[stream] )
keyword[for] identifier[walker] keyword[in] identifier[self] . identifier[_queue_walkers] :
keyword[if] identifier[walker] . identifier[selector] . identifier[output] == identifier[output_buffer] :
identifier[walker] . identifier[notify_rollover] ( identifier[stream] ) | def _erase_buffer(self, output_buffer):
"""Erase readings in the specified buffer to make space."""
erase_size = self._model.get(u'buffer_erase_size')
buffer_type = u'storage'
if output_buffer:
buffer_type = u'streaming' # depends on [control=['if'], data=[]]
old_readings = self._engine.popn(buffer_type, erase_size)
# Now go through all of our walkers that could match and
# update their availability counts and data buffer pointers
for reading in old_readings:
stream = DataStream.FromEncoded(reading.stream)
for walker in self._queue_walkers:
# Only notify the walkers that are on this queue
if walker.selector.output == output_buffer:
walker.notify_rollover(stream) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['walker']] # depends on [control=['for'], data=['reading']] |
def _add_rid_to_vrf_list(self, ri):
"""Add router ID to a VRF list.
In order to properly manage VRFs in the ASR, their
usage has to be tracked. VRFs are provided with neutron
router objects in their hosting_info fields of the gateway ports.
This means that the VRF is only available when the gateway port
of the router is set. VRFs can span routers, and even OpenStack
tenants, so lists of routers that belong to the same VRF are
kept in a dictionary, with the VRF name as the key.
"""
if ri.ex_gw_port or ri.router.get('gw_port'):
driver = self.driver_manager.get_driver(ri.id)
vrf_name = driver._get_vrf_name(ri)
if not vrf_name:
return
if not self._router_ids_by_vrf.get(vrf_name):
LOG.debug("++ CREATING VRF %s" % vrf_name)
driver._do_create_vrf(vrf_name)
self._router_ids_by_vrf.setdefault(vrf_name, set()).add(
ri.router['id']) | def function[_add_rid_to_vrf_list, parameter[self, ri]]:
constant[Add router ID to a VRF list.
In order to properly manage VRFs in the ASR, their
usage has to be tracked. VRFs are provided with neutron
router objects in their hosting_info fields of the gateway ports.
This means that the VRF is only available when the gateway port
of the router is set. VRFs can span routers, and even OpenStack
tenants, so lists of routers that belong to the same VRF are
kept in a dictionary, with the VRF name as the key.
]
if <ast.BoolOp object at 0x7da18dc98e20> begin[:]
variable[driver] assign[=] call[name[self].driver_manager.get_driver, parameter[name[ri].id]]
variable[vrf_name] assign[=] call[name[driver]._get_vrf_name, parameter[name[ri]]]
if <ast.UnaryOp object at 0x7da1b1be4100> begin[:]
return[None]
if <ast.UnaryOp object at 0x7da1b1be5db0> begin[:]
call[name[LOG].debug, parameter[binary_operation[constant[++ CREATING VRF %s] <ast.Mod object at 0x7da2590d6920> name[vrf_name]]]]
call[name[driver]._do_create_vrf, parameter[name[vrf_name]]]
call[call[name[self]._router_ids_by_vrf.setdefault, parameter[name[vrf_name], call[name[set], parameter[]]]].add, parameter[call[name[ri].router][constant[id]]]] | keyword[def] identifier[_add_rid_to_vrf_list] ( identifier[self] , identifier[ri] ):
literal[string]
keyword[if] identifier[ri] . identifier[ex_gw_port] keyword[or] identifier[ri] . identifier[router] . identifier[get] ( literal[string] ):
identifier[driver] = identifier[self] . identifier[driver_manager] . identifier[get_driver] ( identifier[ri] . identifier[id] )
identifier[vrf_name] = identifier[driver] . identifier[_get_vrf_name] ( identifier[ri] )
keyword[if] keyword[not] identifier[vrf_name] :
keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[_router_ids_by_vrf] . identifier[get] ( identifier[vrf_name] ):
identifier[LOG] . identifier[debug] ( literal[string] % identifier[vrf_name] )
identifier[driver] . identifier[_do_create_vrf] ( identifier[vrf_name] )
identifier[self] . identifier[_router_ids_by_vrf] . identifier[setdefault] ( identifier[vrf_name] , identifier[set] ()). identifier[add] (
identifier[ri] . identifier[router] [ literal[string] ]) | def _add_rid_to_vrf_list(self, ri):
"""Add router ID to a VRF list.
In order to properly manage VRFs in the ASR, their
usage has to be tracked. VRFs are provided with neutron
router objects in their hosting_info fields of the gateway ports.
This means that the VRF is only available when the gateway port
of the router is set. VRFs can span routers, and even OpenStack
tenants, so lists of routers that belong to the same VRF are
kept in a dictionary, with the VRF name as the key.
"""
if ri.ex_gw_port or ri.router.get('gw_port'):
driver = self.driver_manager.get_driver(ri.id)
vrf_name = driver._get_vrf_name(ri)
if not vrf_name:
return # depends on [control=['if'], data=[]]
if not self._router_ids_by_vrf.get(vrf_name):
LOG.debug('++ CREATING VRF %s' % vrf_name)
driver._do_create_vrf(vrf_name) # depends on [control=['if'], data=[]]
self._router_ids_by_vrf.setdefault(vrf_name, set()).add(ri.router['id']) # depends on [control=['if'], data=[]] |
def parse_sort_key(identity: str, sort_key_string: str) -> 'Key':
""" Parses a flat key string and returns a key """
parts = sort_key_string.split(Key.PARTITION)
key_type = KeyType.DIMENSION
if parts[2]:
key_type = KeyType.TIMESTAMP
return Key(key_type, identity, parts[0], parts[1].split(Key.DIMENSION_PARTITION)
if parts[1] else [],
parser.parse(parts[2]) if parts[2] else None) | def function[parse_sort_key, parameter[identity, sort_key_string]]:
constant[ Parses a flat key string and returns a key ]
variable[parts] assign[=] call[name[sort_key_string].split, parameter[name[Key].PARTITION]]
variable[key_type] assign[=] name[KeyType].DIMENSION
if call[name[parts]][constant[2]] begin[:]
variable[key_type] assign[=] name[KeyType].TIMESTAMP
return[call[name[Key], parameter[name[key_type], name[identity], call[name[parts]][constant[0]], <ast.IfExp object at 0x7da204347820>, <ast.IfExp object at 0x7da204345630>]]] | keyword[def] identifier[parse_sort_key] ( identifier[identity] : identifier[str] , identifier[sort_key_string] : identifier[str] )-> literal[string] :
literal[string]
identifier[parts] = identifier[sort_key_string] . identifier[split] ( identifier[Key] . identifier[PARTITION] )
identifier[key_type] = identifier[KeyType] . identifier[DIMENSION]
keyword[if] identifier[parts] [ literal[int] ]:
identifier[key_type] = identifier[KeyType] . identifier[TIMESTAMP]
keyword[return] identifier[Key] ( identifier[key_type] , identifier[identity] , identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ]. identifier[split] ( identifier[Key] . identifier[DIMENSION_PARTITION] )
keyword[if] identifier[parts] [ literal[int] ] keyword[else] [],
identifier[parser] . identifier[parse] ( identifier[parts] [ literal[int] ]) keyword[if] identifier[parts] [ literal[int] ] keyword[else] keyword[None] ) | def parse_sort_key(identity: str, sort_key_string: str) -> 'Key':
""" Parses a flat key string and returns a key """
parts = sort_key_string.split(Key.PARTITION)
key_type = KeyType.DIMENSION
if parts[2]:
key_type = KeyType.TIMESTAMP # depends on [control=['if'], data=[]]
return Key(key_type, identity, parts[0], parts[1].split(Key.DIMENSION_PARTITION) if parts[1] else [], parser.parse(parts[2]) if parts[2] else None) |
def simplify(geoids):
"""
Given a list of geoids, reduce it to a simpler set. If there are five or more geoids at one summary level
convert them to a single geoid at the higher level.
:param geoids:
:return:
"""
from collections import defaultdict
aggregated = defaultdict(set)
d = {}
for g in geoids:
if not bool(g):
continue
av = g.allval()
d[av] = None
aggregated[av].add(g)
compiled = set()
for k, v in aggregated.items():
if len(v) >= 5:
compiled.add(k)
compiled.add(k.promote())
else:
compiled |= v
return compiled | def function[simplify, parameter[geoids]]:
constant[
Given a list of geoids, reduce it to a simpler set. If there are five or more geoids at one summary level
convert them to a single geoid at the higher level.
:param geoids:
:return:
]
from relative_module[collections] import module[defaultdict]
variable[aggregated] assign[=] call[name[defaultdict], parameter[name[set]]]
variable[d] assign[=] dictionary[[], []]
for taget[name[g]] in starred[name[geoids]] begin[:]
if <ast.UnaryOp object at 0x7da18dc07580> begin[:]
continue
variable[av] assign[=] call[name[g].allval, parameter[]]
call[name[d]][name[av]] assign[=] constant[None]
call[call[name[aggregated]][name[av]].add, parameter[name[g]]]
variable[compiled] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18dc078e0>, <ast.Name object at 0x7da18dc04040>]]] in starred[call[name[aggregated].items, parameter[]]] begin[:]
if compare[call[name[len], parameter[name[v]]] greater_or_equal[>=] constant[5]] begin[:]
call[name[compiled].add, parameter[name[k]]]
call[name[compiled].add, parameter[call[name[k].promote, parameter[]]]]
return[name[compiled]] | keyword[def] identifier[simplify] ( identifier[geoids] ):
literal[string]
keyword[from] identifier[collections] keyword[import] identifier[defaultdict]
identifier[aggregated] = identifier[defaultdict] ( identifier[set] )
identifier[d] ={}
keyword[for] identifier[g] keyword[in] identifier[geoids] :
keyword[if] keyword[not] identifier[bool] ( identifier[g] ):
keyword[continue]
identifier[av] = identifier[g] . identifier[allval] ()
identifier[d] [ identifier[av] ]= keyword[None]
identifier[aggregated] [ identifier[av] ]. identifier[add] ( identifier[g] )
identifier[compiled] = identifier[set] ()
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[aggregated] . identifier[items] ():
keyword[if] identifier[len] ( identifier[v] )>= literal[int] :
identifier[compiled] . identifier[add] ( identifier[k] )
identifier[compiled] . identifier[add] ( identifier[k] . identifier[promote] ())
keyword[else] :
identifier[compiled] |= identifier[v]
keyword[return] identifier[compiled] | def simplify(geoids):
"""
Given a list of geoids, reduce it to a simpler set. If there are five or more geoids at one summary level
convert them to a single geoid at the higher level.
:param geoids:
:return:
"""
from collections import defaultdict
aggregated = defaultdict(set)
d = {}
for g in geoids:
if not bool(g):
continue # depends on [control=['if'], data=[]]
av = g.allval()
d[av] = None
aggregated[av].add(g) # depends on [control=['for'], data=['g']]
compiled = set()
for (k, v) in aggregated.items():
if len(v) >= 5:
compiled.add(k)
compiled.add(k.promote()) # depends on [control=['if'], data=[]]
else:
compiled |= v # depends on [control=['for'], data=[]]
return compiled |
def _update_heap(self, peer):
"""Recalculate the peer's rank and update itself in the peer heap."""
rank = self.rank_calculator.get_rank(peer)
if rank == peer.rank:
return
peer.rank = rank
self.peer_heap.update_peer(peer) | def function[_update_heap, parameter[self, peer]]:
constant[Recalculate the peer's rank and update itself in the peer heap.]
variable[rank] assign[=] call[name[self].rank_calculator.get_rank, parameter[name[peer]]]
if compare[name[rank] equal[==] name[peer].rank] begin[:]
return[None]
name[peer].rank assign[=] name[rank]
call[name[self].peer_heap.update_peer, parameter[name[peer]]] | keyword[def] identifier[_update_heap] ( identifier[self] , identifier[peer] ):
literal[string]
identifier[rank] = identifier[self] . identifier[rank_calculator] . identifier[get_rank] ( identifier[peer] )
keyword[if] identifier[rank] == identifier[peer] . identifier[rank] :
keyword[return]
identifier[peer] . identifier[rank] = identifier[rank]
identifier[self] . identifier[peer_heap] . identifier[update_peer] ( identifier[peer] ) | def _update_heap(self, peer):
"""Recalculate the peer's rank and update itself in the peer heap."""
rank = self.rank_calculator.get_rank(peer)
if rank == peer.rank:
return # depends on [control=['if'], data=[]]
peer.rank = rank
self.peer_heap.update_peer(peer) |
def restore(self):
"""Restore the pipeline to its state when DisabledPipes was created."""
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)]
if unexpected:
# Don't change the pipeline if we're raising an error.
self.nlp.pipeline = current
raise ValueError(Errors.E008.format(names=unexpected))
self[:] = [] | def function[restore, parameter[self]]:
constant[Restore the pipeline to its state when DisabledPipes was created.]
<ast.Tuple object at 0x7da20cabf280> assign[=] tuple[[<ast.Attribute object at 0x7da20cabc7c0>, <ast.Attribute object at 0x7da20cabe5f0>]]
variable[unexpected] assign[=] <ast.ListComp object at 0x7da20cabe080>
if name[unexpected] begin[:]
name[self].nlp.pipeline assign[=] name[current]
<ast.Raise object at 0x7da20cabff70>
call[name[self]][<ast.Slice object at 0x7da20cabf790>] assign[=] list[[]] | keyword[def] identifier[restore] ( identifier[self] ):
literal[string]
identifier[current] , identifier[self] . identifier[nlp] . identifier[pipeline] = identifier[self] . identifier[nlp] . identifier[pipeline] , identifier[self] . identifier[original_pipeline]
identifier[unexpected] =[ identifier[name] keyword[for] identifier[name] , identifier[pipe] keyword[in] identifier[current] keyword[if] keyword[not] identifier[self] . identifier[nlp] . identifier[has_pipe] ( identifier[name] )]
keyword[if] identifier[unexpected] :
identifier[self] . identifier[nlp] . identifier[pipeline] = identifier[current]
keyword[raise] identifier[ValueError] ( identifier[Errors] . identifier[E008] . identifier[format] ( identifier[names] = identifier[unexpected] ))
identifier[self] [:]=[] | def restore(self):
"""Restore the pipeline to its state when DisabledPipes was created."""
(current, self.nlp.pipeline) = (self.nlp.pipeline, self.original_pipeline)
unexpected = [name for (name, pipe) in current if not self.nlp.has_pipe(name)]
if unexpected:
# Don't change the pipeline if we're raising an error.
self.nlp.pipeline = current
raise ValueError(Errors.E008.format(names=unexpected)) # depends on [control=['if'], data=[]]
self[:] = [] |
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
while True:
try:
return self._sslobj.do_handshake()
except SSLError:
ex = sys.exc_info()[1]
if ex.args[0] == SSL_ERROR_WANT_READ:
if self.timeout == 0.0:
raise
six.exc_clear()
self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorHandshakeTimeout)
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
if self.timeout == 0.0:
raise
six.exc_clear()
self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorHandshakeTimeout)
else:
raise | def function[do_handshake, parameter[self]]:
constant[Perform a TLS/SSL handshake.]
while constant[True] begin[:]
<ast.Try object at 0x7da1b0a2aec0> | keyword[def] identifier[do_handshake] ( identifier[self] ):
literal[string]
keyword[while] keyword[True] :
keyword[try] :
keyword[return] identifier[self] . identifier[_sslobj] . identifier[do_handshake] ()
keyword[except] identifier[SSLError] :
identifier[ex] = identifier[sys] . identifier[exc_info] ()[ literal[int] ]
keyword[if] identifier[ex] . identifier[args] [ literal[int] ]== identifier[SSL_ERROR_WANT_READ] :
keyword[if] identifier[self] . identifier[timeout] == literal[int] :
keyword[raise]
identifier[six] . identifier[exc_clear] ()
identifier[self] . identifier[_io] . identifier[wait_read] ( identifier[timeout] = identifier[self] . identifier[timeout] , identifier[timeout_exc] = identifier[_SSLErrorHandshakeTimeout] )
keyword[elif] identifier[ex] . identifier[args] [ literal[int] ]== identifier[SSL_ERROR_WANT_WRITE] :
keyword[if] identifier[self] . identifier[timeout] == literal[int] :
keyword[raise]
identifier[six] . identifier[exc_clear] ()
identifier[self] . identifier[_io] . identifier[wait_write] ( identifier[timeout] = identifier[self] . identifier[timeout] , identifier[timeout_exc] = identifier[_SSLErrorHandshakeTimeout] )
keyword[else] :
keyword[raise] | def do_handshake(self):
"""Perform a TLS/SSL handshake."""
while True:
try:
return self._sslobj.do_handshake() # depends on [control=['try'], data=[]]
except SSLError:
ex = sys.exc_info()[1]
if ex.args[0] == SSL_ERROR_WANT_READ:
if self.timeout == 0.0:
raise # depends on [control=['if'], data=[]]
six.exc_clear()
self._io.wait_read(timeout=self.timeout, timeout_exc=_SSLErrorHandshakeTimeout) # depends on [control=['if'], data=[]]
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
if self.timeout == 0.0:
raise # depends on [control=['if'], data=[]]
six.exc_clear()
self._io.wait_write(timeout=self.timeout, timeout_exc=_SSLErrorHandshakeTimeout) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def scheme(self, value=None):
"""
Return or set the scheme.
:param string value: the new scheme to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, scheme=value)
return self._tuple.scheme | def function[scheme, parameter[self, value]]:
constant[
Return or set the scheme.
:param string value: the new scheme to use
:returns: string or new :class:`URL` instance
]
if compare[name[value] is_not constant[None]] begin[:]
return[call[name[URL]._mutate, parameter[name[self]]]]
return[name[self]._tuple.scheme] | keyword[def] identifier[scheme] ( identifier[self] , identifier[value] = keyword[None] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[URL] . identifier[_mutate] ( identifier[self] , identifier[scheme] = identifier[value] )
keyword[return] identifier[self] . identifier[_tuple] . identifier[scheme] | def scheme(self, value=None):
"""
Return or set the scheme.
:param string value: the new scheme to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, scheme=value) # depends on [control=['if'], data=['value']]
return self._tuple.scheme |
def grok_for_node(element, default_vars):
"""Properly parses a For loop element"""
if isinstance(element.iter, jinja2.nodes.Filter):
if element.iter.name == 'default' \
and element.iter.node.name not in default_vars:
default_vars.append(element.iter.node.name)
default_vars = default_vars + grok_vars(element)
return default_vars | def function[grok_for_node, parameter[element, default_vars]]:
constant[Properly parses a For loop element]
if call[name[isinstance], parameter[name[element].iter, name[jinja2].nodes.Filter]] begin[:]
if <ast.BoolOp object at 0x7da1b1b6b160> begin[:]
call[name[default_vars].append, parameter[name[element].iter.node.name]]
variable[default_vars] assign[=] binary_operation[name[default_vars] + call[name[grok_vars], parameter[name[element]]]]
return[name[default_vars]] | keyword[def] identifier[grok_for_node] ( identifier[element] , identifier[default_vars] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[element] . identifier[iter] , identifier[jinja2] . identifier[nodes] . identifier[Filter] ):
keyword[if] identifier[element] . identifier[iter] . identifier[name] == literal[string] keyword[and] identifier[element] . identifier[iter] . identifier[node] . identifier[name] keyword[not] keyword[in] identifier[default_vars] :
identifier[default_vars] . identifier[append] ( identifier[element] . identifier[iter] . identifier[node] . identifier[name] )
identifier[default_vars] = identifier[default_vars] + identifier[grok_vars] ( identifier[element] )
keyword[return] identifier[default_vars] | def grok_for_node(element, default_vars):
"""Properly parses a For loop element"""
if isinstance(element.iter, jinja2.nodes.Filter):
if element.iter.name == 'default' and element.iter.node.name not in default_vars:
default_vars.append(element.iter.node.name) # depends on [control=['if'], data=[]]
default_vars = default_vars + grok_vars(element) # depends on [control=['if'], data=[]]
return default_vars |
def is_readable_dir(path):
"""Returns whether a path names an existing directory we can list and read files from."""
return os.path.isdir(path) and os.access(path, os.R_OK) and os.access(path, os.X_OK) | def function[is_readable_dir, parameter[path]]:
constant[Returns whether a path names an existing directory we can list and read files from.]
return[<ast.BoolOp object at 0x7da1b2249210>] | keyword[def] identifier[is_readable_dir] ( identifier[path] ):
literal[string]
keyword[return] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ) keyword[and] identifier[os] . identifier[access] ( identifier[path] , identifier[os] . identifier[R_OK] ) keyword[and] identifier[os] . identifier[access] ( identifier[path] , identifier[os] . identifier[X_OK] ) | def is_readable_dir(path):
"""Returns whether a path names an existing directory we can list and read files from."""
return os.path.isdir(path) and os.access(path, os.R_OK) and os.access(path, os.X_OK) |
def generate(self, x, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: (required) A tensor with the inputs.
:param kwargs: See `parse_params`
"""
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
self.parse_params(**kwargs)
if self.y_target is None:
self.y_target, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = False
else:
_, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = True
attack = LBFGS_impl(
self.sess, x, self.model.get_logits(x),
self.y_target, self.targeted_attack,
self.binary_search_steps, self.max_iterations, self.initial_const,
self.clip_min, self.clip_max, nb_classes, self.batch_size)
def lbfgs_wrap(x_val, y_val):
"""
Wrapper creating TensorFlow interface for use with py_func
"""
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap | def function[generate, parameter[self, x]]:
constant[
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: (required) A tensor with the inputs.
:param kwargs: See `parse_params`
]
assert[compare[name[self].sess is_not constant[None]]]
call[name[self].parse_params, parameter[]]
if compare[name[self].y_target is constant[None]] begin[:]
<ast.Tuple object at 0x7da20c6e5000> assign[=] call[name[self].get_or_guess_labels, parameter[name[x], name[kwargs]]]
name[self].targeted_attack assign[=] constant[False]
variable[attack] assign[=] call[name[LBFGS_impl], parameter[name[self].sess, name[x], call[name[self].model.get_logits, parameter[name[x]]], name[self].y_target, name[self].targeted_attack, name[self].binary_search_steps, name[self].max_iterations, name[self].initial_const, name[self].clip_min, name[self].clip_max, name[nb_classes], name[self].batch_size]]
def function[lbfgs_wrap, parameter[x_val, y_val]]:
constant[
Wrapper creating TensorFlow interface for use with py_func
]
return[call[name[np].array, parameter[call[name[attack].attack, parameter[name[x_val], name[y_val]]]]]]
variable[wrap] assign[=] call[name[tf].py_func, parameter[name[lbfgs_wrap], list[[<ast.Name object at 0x7da2044c3b50>, <ast.Attribute object at 0x7da2044c2f20>]], name[self].tf_dtype]]
call[name[wrap].set_shape, parameter[call[name[x].get_shape, parameter[]]]]
return[name[wrap]] | keyword[def] identifier[generate] ( identifier[self] , identifier[x] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[self] . identifier[sess] keyword[is] keyword[not] keyword[None] , literal[string]
identifier[self] . identifier[parse_params] (** identifier[kwargs] )
keyword[if] identifier[self] . identifier[y_target] keyword[is] keyword[None] :
identifier[self] . identifier[y_target] , identifier[nb_classes] = identifier[self] . identifier[get_or_guess_labels] ( identifier[x] , identifier[kwargs] )
identifier[self] . identifier[targeted_attack] = keyword[False]
keyword[else] :
identifier[_] , identifier[nb_classes] = identifier[self] . identifier[get_or_guess_labels] ( identifier[x] , identifier[kwargs] )
identifier[self] . identifier[targeted_attack] = keyword[True]
identifier[attack] = identifier[LBFGS_impl] (
identifier[self] . identifier[sess] , identifier[x] , identifier[self] . identifier[model] . identifier[get_logits] ( identifier[x] ),
identifier[self] . identifier[y_target] , identifier[self] . identifier[targeted_attack] ,
identifier[self] . identifier[binary_search_steps] , identifier[self] . identifier[max_iterations] , identifier[self] . identifier[initial_const] ,
identifier[self] . identifier[clip_min] , identifier[self] . identifier[clip_max] , identifier[nb_classes] , identifier[self] . identifier[batch_size] )
keyword[def] identifier[lbfgs_wrap] ( identifier[x_val] , identifier[y_val] ):
literal[string]
keyword[return] identifier[np] . identifier[array] ( identifier[attack] . identifier[attack] ( identifier[x_val] , identifier[y_val] ), identifier[dtype] = identifier[self] . identifier[np_dtype] )
identifier[wrap] = identifier[tf] . identifier[py_func] ( identifier[lbfgs_wrap] ,[ identifier[x] , identifier[self] . identifier[y_target] ], identifier[self] . identifier[tf_dtype] )
identifier[wrap] . identifier[set_shape] ( identifier[x] . identifier[get_shape] ())
keyword[return] identifier[wrap] | def generate(self, x, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: (required) A tensor with the inputs.
:param kwargs: See `parse_params`
"""
assert self.sess is not None, 'Cannot use `generate` when no `sess` was provided'
self.parse_params(**kwargs)
if self.y_target is None:
(self.y_target, nb_classes) = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = False # depends on [control=['if'], data=[]]
else:
(_, nb_classes) = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = True
attack = LBFGS_impl(self.sess, x, self.model.get_logits(x), self.y_target, self.targeted_attack, self.binary_search_steps, self.max_iterations, self.initial_const, self.clip_min, self.clip_max, nb_classes, self.batch_size)
def lbfgs_wrap(x_val, y_val):
"""
Wrapper creating TensorFlow interface for use with py_func
"""
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap |
def highlightBlock(self, text, prevContextStack):
"""Parse block and return ParseBlockFullResult
return (lineData, highlightedSegments)
where lineData is (contextStack, textTypeMap)
where textTypeMap is a string of textType characters
"""
if prevContextStack is not None:
contextStack = prevContextStack
else:
contextStack = self._defaultContextStack
highlightedSegments = []
lineContinue = False
currentColumnIndex = 0
textTypeMap = []
if len(text) > 0:
while currentColumnIndex < len(text):
_logger.debug('In context %s', contextStack.currentContext().name)
length, newContextStack, segments, textTypeMapPart, lineContinue = \
contextStack.currentContext().parseBlock(contextStack, currentColumnIndex, text)
highlightedSegments += segments
contextStack = newContextStack
textTypeMap += textTypeMapPart
currentColumnIndex += length
if not lineContinue:
while contextStack.currentContext().lineEndContext is not None:
oldStack = contextStack
contextStack = contextStack.currentContext().lineEndContext.getNextContextStack(contextStack)
if oldStack == contextStack: # avoid infinite while loop if nothing to switch
break
# this code is not tested, because lineBeginContext is not defined by any xml file
if contextStack.currentContext().lineBeginContext is not None:
contextStack = contextStack.currentContext().lineBeginContext.getNextContextStack(contextStack)
elif contextStack.currentContext().lineEmptyContext is not None:
contextStack = contextStack.currentContext().lineEmptyContext.getNextContextStack(contextStack)
lineData = (contextStack, textTypeMap)
return lineData, highlightedSegments | def function[highlightBlock, parameter[self, text, prevContextStack]]:
constant[Parse block and return ParseBlockFullResult
return (lineData, highlightedSegments)
where lineData is (contextStack, textTypeMap)
where textTypeMap is a string of textType characters
]
if compare[name[prevContextStack] is_not constant[None]] begin[:]
variable[contextStack] assign[=] name[prevContextStack]
variable[highlightedSegments] assign[=] list[[]]
variable[lineContinue] assign[=] constant[False]
variable[currentColumnIndex] assign[=] constant[0]
variable[textTypeMap] assign[=] list[[]]
if compare[call[name[len], parameter[name[text]]] greater[>] constant[0]] begin[:]
while compare[name[currentColumnIndex] less[<] call[name[len], parameter[name[text]]]] begin[:]
call[name[_logger].debug, parameter[constant[In context %s], call[name[contextStack].currentContext, parameter[]].name]]
<ast.Tuple object at 0x7da18eb57c40> assign[=] call[call[name[contextStack].currentContext, parameter[]].parseBlock, parameter[name[contextStack], name[currentColumnIndex], name[text]]]
<ast.AugAssign object at 0x7da18eb55bd0>
variable[contextStack] assign[=] name[newContextStack]
<ast.AugAssign object at 0x7da18eb57310>
<ast.AugAssign object at 0x7da18eb55ba0>
if <ast.UnaryOp object at 0x7da18eb55a20> begin[:]
while compare[call[name[contextStack].currentContext, parameter[]].lineEndContext is_not constant[None]] begin[:]
variable[oldStack] assign[=] name[contextStack]
variable[contextStack] assign[=] call[call[name[contextStack].currentContext, parameter[]].lineEndContext.getNextContextStack, parameter[name[contextStack]]]
if compare[name[oldStack] equal[==] name[contextStack]] begin[:]
break
if compare[call[name[contextStack].currentContext, parameter[]].lineBeginContext is_not constant[None]] begin[:]
variable[contextStack] assign[=] call[call[name[contextStack].currentContext, parameter[]].lineBeginContext.getNextContextStack, parameter[name[contextStack]]]
variable[lineData] assign[=] tuple[[<ast.Name object at 0x7da20e957730>, <ast.Name object at 0x7da20e9568c0>]]
return[tuple[[<ast.Name object at 0x7da20e9564a0>, <ast.Name object at 0x7da20e9576a0>]]] | keyword[def] identifier[highlightBlock] ( identifier[self] , identifier[text] , identifier[prevContextStack] ):
literal[string]
keyword[if] identifier[prevContextStack] keyword[is] keyword[not] keyword[None] :
identifier[contextStack] = identifier[prevContextStack]
keyword[else] :
identifier[contextStack] = identifier[self] . identifier[_defaultContextStack]
identifier[highlightedSegments] =[]
identifier[lineContinue] = keyword[False]
identifier[currentColumnIndex] = literal[int]
identifier[textTypeMap] =[]
keyword[if] identifier[len] ( identifier[text] )> literal[int] :
keyword[while] identifier[currentColumnIndex] < identifier[len] ( identifier[text] ):
identifier[_logger] . identifier[debug] ( literal[string] , identifier[contextStack] . identifier[currentContext] (). identifier[name] )
identifier[length] , identifier[newContextStack] , identifier[segments] , identifier[textTypeMapPart] , identifier[lineContinue] = identifier[contextStack] . identifier[currentContext] (). identifier[parseBlock] ( identifier[contextStack] , identifier[currentColumnIndex] , identifier[text] )
identifier[highlightedSegments] += identifier[segments]
identifier[contextStack] = identifier[newContextStack]
identifier[textTypeMap] += identifier[textTypeMapPart]
identifier[currentColumnIndex] += identifier[length]
keyword[if] keyword[not] identifier[lineContinue] :
keyword[while] identifier[contextStack] . identifier[currentContext] (). identifier[lineEndContext] keyword[is] keyword[not] keyword[None] :
identifier[oldStack] = identifier[contextStack]
identifier[contextStack] = identifier[contextStack] . identifier[currentContext] (). identifier[lineEndContext] . identifier[getNextContextStack] ( identifier[contextStack] )
keyword[if] identifier[oldStack] == identifier[contextStack] :
keyword[break]
keyword[if] identifier[contextStack] . identifier[currentContext] (). identifier[lineBeginContext] keyword[is] keyword[not] keyword[None] :
identifier[contextStack] = identifier[contextStack] . identifier[currentContext] (). identifier[lineBeginContext] . identifier[getNextContextStack] ( identifier[contextStack] )
keyword[elif] identifier[contextStack] . identifier[currentContext] (). identifier[lineEmptyContext] keyword[is] keyword[not] keyword[None] :
identifier[contextStack] = identifier[contextStack] . identifier[currentContext] (). identifier[lineEmptyContext] . identifier[getNextContextStack] ( identifier[contextStack] )
identifier[lineData] =( identifier[contextStack] , identifier[textTypeMap] )
keyword[return] identifier[lineData] , identifier[highlightedSegments] | def highlightBlock(self, text, prevContextStack):
"""Parse block and return ParseBlockFullResult
return (lineData, highlightedSegments)
where lineData is (contextStack, textTypeMap)
where textTypeMap is a string of textType characters
"""
if prevContextStack is not None:
contextStack = prevContextStack # depends on [control=['if'], data=['prevContextStack']]
else:
contextStack = self._defaultContextStack
highlightedSegments = []
lineContinue = False
currentColumnIndex = 0
textTypeMap = []
if len(text) > 0:
while currentColumnIndex < len(text):
_logger.debug('In context %s', contextStack.currentContext().name)
(length, newContextStack, segments, textTypeMapPart, lineContinue) = contextStack.currentContext().parseBlock(contextStack, currentColumnIndex, text)
highlightedSegments += segments
contextStack = newContextStack
textTypeMap += textTypeMapPart
currentColumnIndex += length # depends on [control=['while'], data=['currentColumnIndex']]
if not lineContinue:
while contextStack.currentContext().lineEndContext is not None:
oldStack = contextStack
contextStack = contextStack.currentContext().lineEndContext.getNextContextStack(contextStack)
if oldStack == contextStack: # avoid infinite while loop if nothing to switch
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
# this code is not tested, because lineBeginContext is not defined by any xml file
if contextStack.currentContext().lineBeginContext is not None:
contextStack = contextStack.currentContext().lineBeginContext.getNextContextStack(contextStack) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif contextStack.currentContext().lineEmptyContext is not None:
contextStack = contextStack.currentContext().lineEmptyContext.getNextContextStack(contextStack) # depends on [control=['if'], data=[]]
lineData = (contextStack, textTypeMap)
return (lineData, highlightedSegments) |
def process_response(self, req, resp, resource):
""" Post-processing of the response (after routing).
Some fundamental errors can't be intercepted any other
way in Falcon. These include 404 when the route isn't found
& 405 when the method is bunk. Falcon will try to send its
own errors.
In these cases, intercept them & replace them with our
JSON API compliant version.
TIP: If no route could be determined then the resource
will be None.
"""
if not resource and resp.status == falcon.HTTP_404:
abort(exceptions.RouteNotFound)
elif resp.status == falcon.HTTP_405:
abort(exceptions.MethodNotAllowed) | def function[process_response, parameter[self, req, resp, resource]]:
constant[ Post-processing of the response (after routing).
Some fundamental errors can't be intercepted any other
way in Falcon. These include 404 when the route isn't found
& 405 when the method is bunk. Falcon will try to send its
own errors.
In these cases, intercept them & replace them with our
JSON API compliant version.
TIP: If no route could be determined then the resource
will be None.
]
if <ast.BoolOp object at 0x7da2054a6e00> begin[:]
call[name[abort], parameter[name[exceptions].RouteNotFound]] | keyword[def] identifier[process_response] ( identifier[self] , identifier[req] , identifier[resp] , identifier[resource] ):
literal[string]
keyword[if] keyword[not] identifier[resource] keyword[and] identifier[resp] . identifier[status] == identifier[falcon] . identifier[HTTP_404] :
identifier[abort] ( identifier[exceptions] . identifier[RouteNotFound] )
keyword[elif] identifier[resp] . identifier[status] == identifier[falcon] . identifier[HTTP_405] :
identifier[abort] ( identifier[exceptions] . identifier[MethodNotAllowed] ) | def process_response(self, req, resp, resource):
""" Post-processing of the response (after routing).
Some fundamental errors can't be intercepted any other
way in Falcon. These include 404 when the route isn't found
& 405 when the method is bunk. Falcon will try to send its
own errors.
In these cases, intercept them & replace them with our
JSON API compliant version.
TIP: If no route could be determined then the resource
will be None.
"""
if not resource and resp.status == falcon.HTTP_404:
abort(exceptions.RouteNotFound) # depends on [control=['if'], data=[]]
elif resp.status == falcon.HTTP_405:
abort(exceptions.MethodNotAllowed) # depends on [control=['if'], data=[]] |
def dump_travis_configuration(config, path):
"""Dump the travis configuration settings to the travis.yml file.
The configuration settings from the travis.yml will be dumped with
ordering preserved. Thus, when a password is added to the travis.yml
file, a diff will show that only the password was added.
Parameters
----------
config: collections.OrderedDict
The configuration settings to dump into the travis.yml file
path: str
The file path to the .travis.yml file
Returns
-------
None
"""
with open(path, 'w') as config_file:
ordered_dump(config, config_file, default_flow_style=False) | def function[dump_travis_configuration, parameter[config, path]]:
constant[Dump the travis configuration settings to the travis.yml file.
The configuration settings from the travis.yml will be dumped with
ordering preserved. Thus, when a password is added to the travis.yml
file, a diff will show that only the password was added.
Parameters
----------
config: collections.OrderedDict
The configuration settings to dump into the travis.yml file
path: str
The file path to the .travis.yml file
Returns
-------
None
]
with call[name[open], parameter[name[path], constant[w]]] begin[:]
call[name[ordered_dump], parameter[name[config], name[config_file]]] | keyword[def] identifier[dump_travis_configuration] ( identifier[config] , identifier[path] ):
literal[string]
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[config_file] :
identifier[ordered_dump] ( identifier[config] , identifier[config_file] , identifier[default_flow_style] = keyword[False] ) | def dump_travis_configuration(config, path):
"""Dump the travis configuration settings to the travis.yml file.
The configuration settings from the travis.yml will be dumped with
ordering preserved. Thus, when a password is added to the travis.yml
file, a diff will show that only the password was added.
Parameters
----------
config: collections.OrderedDict
The configuration settings to dump into the travis.yml file
path: str
The file path to the .travis.yml file
Returns
-------
None
"""
with open(path, 'w') as config_file:
ordered_dump(config, config_file, default_flow_style=False) # depends on [control=['with'], data=['config_file']] |
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if an ELB exists.
CLI example:
.. code-block:: bash
salt myminion boto_elb.exists myelb region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
elb = conn.get_all_load_balancers(load_balancer_names=[name])
if elb:
return True
else:
log.debug('The load balancer does not exist in region %s', region)
return False
except boto.exception.BotoServerError as error:
log.warning(error)
return False | def function[exists, parameter[name, region, key, keyid, profile]]:
constant[
Check to see if an ELB exists.
CLI example:
.. code-block:: bash
salt myminion boto_elb.exists myelb region=us-east-1
]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
<ast.Try object at 0x7da18f58e500> | keyword[def] identifier[exists] ( identifier[name] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ):
literal[string]
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[try] :
identifier[elb] = identifier[conn] . identifier[get_all_load_balancers] ( identifier[load_balancer_names] =[ identifier[name] ])
keyword[if] identifier[elb] :
keyword[return] keyword[True]
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] , identifier[region] )
keyword[return] keyword[False]
keyword[except] identifier[boto] . identifier[exception] . identifier[BotoServerError] keyword[as] identifier[error] :
identifier[log] . identifier[warning] ( identifier[error] )
keyword[return] keyword[False] | def exists(name, region=None, key=None, keyid=None, profile=None):
"""
Check to see if an ELB exists.
CLI example:
.. code-block:: bash
salt myminion boto_elb.exists myelb region=us-east-1
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
elb = conn.get_all_load_balancers(load_balancer_names=[name])
if elb:
return True # depends on [control=['if'], data=[]]
else:
log.debug('The load balancer does not exist in region %s', region)
return False # depends on [control=['try'], data=[]]
except boto.exception.BotoServerError as error:
log.warning(error)
return False # depends on [control=['except'], data=['error']] |
def registerAugmentation(self, *names):
"""Register table extension.
SNMP SMI provides a way to extend already existing SMI table with
another table. This method registers dependent (extending) table
(or type :py:class:`MibTableRow`) to already existing table.
Whenever a row of the parent table is created or destroyed, the
same mass columnar operation is applied on the extending table
row.
Parameters
----------
names: :py:class:`tuple`
One or more `tuple`'s of `str` referring to the extending table by
MIB module name (first `str`) and `:py:class:`MibTableRow` object
name (second `str`).
"""
for name in names:
if name in self._augmentingRows:
raise error.SmiError(
'Row %s already augmented by %s::%s' % (self.name, name[0], name[1])
)
self._augmentingRows.add(name)
return self | def function[registerAugmentation, parameter[self]]:
constant[Register table extension.
SNMP SMI provides a way to extend already existing SMI table with
another table. This method registers dependent (extending) table
(or type :py:class:`MibTableRow`) to already existing table.
Whenever a row of the parent table is created or destroyed, the
same mass columnar operation is applied on the extending table
row.
Parameters
----------
names: :py:class:`tuple`
One or more `tuple`'s of `str` referring to the extending table by
MIB module name (first `str`) and `:py:class:`MibTableRow` object
name (second `str`).
]
for taget[name[name]] in starred[name[names]] begin[:]
if compare[name[name] in name[self]._augmentingRows] begin[:]
<ast.Raise object at 0x7da1b1575600>
call[name[self]._augmentingRows.add, parameter[name[name]]]
return[name[self]] | keyword[def] identifier[registerAugmentation] ( identifier[self] ,* identifier[names] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[names] :
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[_augmentingRows] :
keyword[raise] identifier[error] . identifier[SmiError] (
literal[string] %( identifier[self] . identifier[name] , identifier[name] [ literal[int] ], identifier[name] [ literal[int] ])
)
identifier[self] . identifier[_augmentingRows] . identifier[add] ( identifier[name] )
keyword[return] identifier[self] | def registerAugmentation(self, *names):
"""Register table extension.
SNMP SMI provides a way to extend already existing SMI table with
another table. This method registers dependent (extending) table
(or type :py:class:`MibTableRow`) to already existing table.
Whenever a row of the parent table is created or destroyed, the
same mass columnar operation is applied on the extending table
row.
Parameters
----------
names: :py:class:`tuple`
One or more `tuple`'s of `str` referring to the extending table by
MIB module name (first `str`) and `:py:class:`MibTableRow` object
name (second `str`).
"""
for name in names:
if name in self._augmentingRows:
raise error.SmiError('Row %s already augmented by %s::%s' % (self.name, name[0], name[1])) # depends on [control=['if'], data=['name']]
self._augmentingRows.add(name) # depends on [control=['for'], data=['name']]
return self |
def filter(self, table, mappings, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [mapping for mapping in mappings
if q in mapping.ud.lower()] | def function[filter, parameter[self, table, mappings, filter_string]]:
constant[Naive case-insensitive search.]
variable[q] assign[=] call[name[filter_string].lower, parameter[]]
return[<ast.ListComp object at 0x7da1b1950610>] | keyword[def] identifier[filter] ( identifier[self] , identifier[table] , identifier[mappings] , identifier[filter_string] ):
literal[string]
identifier[q] = identifier[filter_string] . identifier[lower] ()
keyword[return] [ identifier[mapping] keyword[for] identifier[mapping] keyword[in] identifier[mappings]
keyword[if] identifier[q] keyword[in] identifier[mapping] . identifier[ud] . identifier[lower] ()] | def filter(self, table, mappings, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [mapping for mapping in mappings if q in mapping.ud.lower()] |
def is_pattern_valid(pattern):
"""Returns True if pattern is valid.
:param pattern: Normalized pattern.
is_pattern_valid() assumes pattern to be normalized.
see: globbing.normalize_pattern
"""
result = True
translator = Globster.pattern_info[Globster.identify(pattern)]["translator"]
tpattern = '(%s)' % translator(pattern)
try:
re_obj = lazy_regex.lazy_compile(tpattern, re.UNICODE)
re_obj.search("") # force compile
except Exception as e:
result = False
return result | def function[is_pattern_valid, parameter[pattern]]:
constant[Returns True if pattern is valid.
:param pattern: Normalized pattern.
is_pattern_valid() assumes pattern to be normalized.
see: globbing.normalize_pattern
]
variable[result] assign[=] constant[True]
variable[translator] assign[=] call[call[name[Globster].pattern_info][call[name[Globster].identify, parameter[name[pattern]]]]][constant[translator]]
variable[tpattern] assign[=] binary_operation[constant[(%s)] <ast.Mod object at 0x7da2590d6920> call[name[translator], parameter[name[pattern]]]]
<ast.Try object at 0x7da18ede7ca0>
return[name[result]] | keyword[def] identifier[is_pattern_valid] ( identifier[pattern] ):
literal[string]
identifier[result] = keyword[True]
identifier[translator] = identifier[Globster] . identifier[pattern_info] [ identifier[Globster] . identifier[identify] ( identifier[pattern] )][ literal[string] ]
identifier[tpattern] = literal[string] % identifier[translator] ( identifier[pattern] )
keyword[try] :
identifier[re_obj] = identifier[lazy_regex] . identifier[lazy_compile] ( identifier[tpattern] , identifier[re] . identifier[UNICODE] )
identifier[re_obj] . identifier[search] ( literal[string] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[result] = keyword[False]
keyword[return] identifier[result] | def is_pattern_valid(pattern):
"""Returns True if pattern is valid.
:param pattern: Normalized pattern.
is_pattern_valid() assumes pattern to be normalized.
see: globbing.normalize_pattern
"""
result = True
translator = Globster.pattern_info[Globster.identify(pattern)]['translator']
tpattern = '(%s)' % translator(pattern)
try:
re_obj = lazy_regex.lazy_compile(tpattern, re.UNICODE)
re_obj.search('') # force compile # depends on [control=['try'], data=[]]
except Exception as e:
result = False # depends on [control=['except'], data=[]]
return result |
def debug(dbg_opts=None, start_opts=None, post_mortem=True,
step_ignore=1, level=0):
"""
Enter the debugger.
Parameters
----------
level : how many stack frames go back. Usually it will be
the default 0. But sometimes though there may be calls in setup to the debugger
that you may want to skip.
step_ignore : how many line events to ignore after the
debug() call. 0 means don't even wait for the debug() call to finish.
param dbg_opts : is an optional "options" dictionary that gets fed
trepan.Debugger(); `start_opts' are the optional "options"
dictionary that gets fed to trepan.Debugger.core.start().
Use like this:
.. code-block:: python
... # Possibly some Python code
import trepan.api # Needed only once
... # Possibly some more Python code
trepan.api.debug() # You can wrap inside conditional logic too
pass # Stop will be here.
# Below is code you want to use the debugger to do things.
.... # more Python code
# If you get to a place in the program where you aren't going
# want to debug any more, but want to remove debugger trace overhead:
trepan.api.stop()
Parameter "level" specifies how many stack frames go back. Usually it will be
the default 0. But sometimes though there may be calls in setup to the debugger
that you may want to skip.
Parameter "step_ignore" specifies how many line events to ignore after the
debug() call. 0 means don't even wait for the debug() call to finish.
In situations where you want an immediate stop in the "debug" call
rather than the statement following it ("pass" above), add parameter
step_ignore=0 to debug() like this::
import trepan.api # Needed only once
# ... as before
trepan.api.debug(step_ignore=0)
# ... as before
Module variable _debugger_obj_ from module trepan.debugger is used as
the debugger instance variable; it can be subsequently used to change
settings or alter behavior. It should be of type Debugger (found in
module trepan). If not, it will get changed to that type::
$ python
>>> from trepan.debugger import debugger_obj
>>> type(debugger_obj)
<type 'NoneType'>
>>> import trepan.api
>>> trepan.api.debug()
...
(Trepan) c
>>> from trepan.debugger import debugger_obj
>>> debugger_obj
<trepan.debugger.Debugger instance at 0x7fbcacd514d0>
>>>
If however you want your own separate debugger instance, you can
create it from the debugger _class Debugger()_ from module
trepan.debugger::
$ python
>>> from trepan.debugger import Debugger
>>> dbgr = Debugger() # Add options as desired
>>> dbgr
<trepan.debugger.Debugger instance at 0x2e25320>
`dbg_opts' is an optional "options" dictionary that gets fed
trepan.Debugger(); `start_opts' are the optional "options"
dictionary that gets fed to trepan.Debugger.core.start().
"""
if not isinstance(Mdebugger.debugger_obj, Mdebugger.Trepan):
Mdebugger.debugger_obj = Mdebugger.Trepan(dbg_opts)
Mdebugger.debugger_obj.core.add_ignore(debug, stop)
pass
core = Mdebugger.debugger_obj.core
frame = sys._getframe(0+level)
core.set_next(frame)
if start_opts and 'startup-profile' in start_opts and start_opts['startup-profile']:
dbg_initfiles = start_opts['startup-profile']
from trepan import options
options.add_startup_file(dbg_initfiles)
for init_cmdfile in dbg_initfiles:
core.processor.queue_startfile(init_cmdfile)
if not core.is_started():
core.start(start_opts)
pass
if post_mortem:
debugger_on_post_mortem()
pass
if 0 == step_ignore:
frame = sys._getframe(1+level)
core.stop_reason = 'at a debug() call'
old_trace_hook_suspend = core.trace_hook_suspend
core.trace_hook_suspend = True
core.processor.event_processor(frame, 'line', None)
core.trace_hook_suspend = old_trace_hook_suspend
else:
core.step_ignore = step_ignore-1
pass
return | def function[debug, parameter[dbg_opts, start_opts, post_mortem, step_ignore, level]]:
constant[
Enter the debugger.
Parameters
----------
level : how many stack frames go back. Usually it will be
the default 0. But sometimes though there may be calls in setup to the debugger
that you may want to skip.
step_ignore : how many line events to ignore after the
debug() call. 0 means don't even wait for the debug() call to finish.
param dbg_opts : is an optional "options" dictionary that gets fed
trepan.Debugger(); `start_opts' are the optional "options"
dictionary that gets fed to trepan.Debugger.core.start().
Use like this:
.. code-block:: python
... # Possibly some Python code
import trepan.api # Needed only once
... # Possibly some more Python code
trepan.api.debug() # You can wrap inside conditional logic too
pass # Stop will be here.
# Below is code you want to use the debugger to do things.
.... # more Python code
# If you get to a place in the program where you aren't going
# want to debug any more, but want to remove debugger trace overhead:
trepan.api.stop()
Parameter "level" specifies how many stack frames go back. Usually it will be
the default 0. But sometimes though there may be calls in setup to the debugger
that you may want to skip.
Parameter "step_ignore" specifies how many line events to ignore after the
debug() call. 0 means don't even wait for the debug() call to finish.
In situations where you want an immediate stop in the "debug" call
rather than the statement following it ("pass" above), add parameter
step_ignore=0 to debug() like this::
import trepan.api # Needed only once
# ... as before
trepan.api.debug(step_ignore=0)
# ... as before
Module variable _debugger_obj_ from module trepan.debugger is used as
the debugger instance variable; it can be subsequently used to change
settings or alter behavior. It should be of type Debugger (found in
module trepan). If not, it will get changed to that type::
$ python
>>> from trepan.debugger import debugger_obj
>>> type(debugger_obj)
<type 'NoneType'>
>>> import trepan.api
>>> trepan.api.debug()
...
(Trepan) c
>>> from trepan.debugger import debugger_obj
>>> debugger_obj
<trepan.debugger.Debugger instance at 0x7fbcacd514d0>
>>>
If however you want your own separate debugger instance, you can
create it from the debugger _class Debugger()_ from module
trepan.debugger::
$ python
>>> from trepan.debugger import Debugger
>>> dbgr = Debugger() # Add options as desired
>>> dbgr
<trepan.debugger.Debugger instance at 0x2e25320>
`dbg_opts' is an optional "options" dictionary that gets fed
trepan.Debugger(); `start_opts' are the optional "options"
dictionary that gets fed to trepan.Debugger.core.start().
]
if <ast.UnaryOp object at 0x7da1b05b37c0> begin[:]
name[Mdebugger].debugger_obj assign[=] call[name[Mdebugger].Trepan, parameter[name[dbg_opts]]]
call[name[Mdebugger].debugger_obj.core.add_ignore, parameter[name[debug], name[stop]]]
pass
variable[core] assign[=] name[Mdebugger].debugger_obj.core
variable[frame] assign[=] call[name[sys]._getframe, parameter[binary_operation[constant[0] + name[level]]]]
call[name[core].set_next, parameter[name[frame]]]
if <ast.BoolOp object at 0x7da1b05beb90> begin[:]
variable[dbg_initfiles] assign[=] call[name[start_opts]][constant[startup-profile]]
from relative_module[trepan] import module[options]
call[name[options].add_startup_file, parameter[name[dbg_initfiles]]]
for taget[name[init_cmdfile]] in starred[name[dbg_initfiles]] begin[:]
call[name[core].processor.queue_startfile, parameter[name[init_cmdfile]]]
if <ast.UnaryOp object at 0x7da1b03b9240> begin[:]
call[name[core].start, parameter[name[start_opts]]]
pass
if name[post_mortem] begin[:]
call[name[debugger_on_post_mortem], parameter[]]
pass
if compare[constant[0] equal[==] name[step_ignore]] begin[:]
variable[frame] assign[=] call[name[sys]._getframe, parameter[binary_operation[constant[1] + name[level]]]]
name[core].stop_reason assign[=] constant[at a debug() call]
variable[old_trace_hook_suspend] assign[=] name[core].trace_hook_suspend
name[core].trace_hook_suspend assign[=] constant[True]
call[name[core].processor.event_processor, parameter[name[frame], constant[line], constant[None]]]
name[core].trace_hook_suspend assign[=] name[old_trace_hook_suspend]
return[None] | keyword[def] identifier[debug] ( identifier[dbg_opts] = keyword[None] , identifier[start_opts] = keyword[None] , identifier[post_mortem] = keyword[True] ,
identifier[step_ignore] = literal[int] , identifier[level] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[Mdebugger] . identifier[debugger_obj] , identifier[Mdebugger] . identifier[Trepan] ):
identifier[Mdebugger] . identifier[debugger_obj] = identifier[Mdebugger] . identifier[Trepan] ( identifier[dbg_opts] )
identifier[Mdebugger] . identifier[debugger_obj] . identifier[core] . identifier[add_ignore] ( identifier[debug] , identifier[stop] )
keyword[pass]
identifier[core] = identifier[Mdebugger] . identifier[debugger_obj] . identifier[core]
identifier[frame] = identifier[sys] . identifier[_getframe] ( literal[int] + identifier[level] )
identifier[core] . identifier[set_next] ( identifier[frame] )
keyword[if] identifier[start_opts] keyword[and] literal[string] keyword[in] identifier[start_opts] keyword[and] identifier[start_opts] [ literal[string] ]:
identifier[dbg_initfiles] = identifier[start_opts] [ literal[string] ]
keyword[from] identifier[trepan] keyword[import] identifier[options]
identifier[options] . identifier[add_startup_file] ( identifier[dbg_initfiles] )
keyword[for] identifier[init_cmdfile] keyword[in] identifier[dbg_initfiles] :
identifier[core] . identifier[processor] . identifier[queue_startfile] ( identifier[init_cmdfile] )
keyword[if] keyword[not] identifier[core] . identifier[is_started] ():
identifier[core] . identifier[start] ( identifier[start_opts] )
keyword[pass]
keyword[if] identifier[post_mortem] :
identifier[debugger_on_post_mortem] ()
keyword[pass]
keyword[if] literal[int] == identifier[step_ignore] :
identifier[frame] = identifier[sys] . identifier[_getframe] ( literal[int] + identifier[level] )
identifier[core] . identifier[stop_reason] = literal[string]
identifier[old_trace_hook_suspend] = identifier[core] . identifier[trace_hook_suspend]
identifier[core] . identifier[trace_hook_suspend] = keyword[True]
identifier[core] . identifier[processor] . identifier[event_processor] ( identifier[frame] , literal[string] , keyword[None] )
identifier[core] . identifier[trace_hook_suspend] = identifier[old_trace_hook_suspend]
keyword[else] :
identifier[core] . identifier[step_ignore] = identifier[step_ignore] - literal[int]
keyword[pass]
keyword[return] | def debug(dbg_opts=None, start_opts=None, post_mortem=True, step_ignore=1, level=0):
"""
Enter the debugger.
Parameters
----------
level : how many stack frames go back. Usually it will be
the default 0. But sometimes though there may be calls in setup to the debugger
that you may want to skip.
step_ignore : how many line events to ignore after the
debug() call. 0 means don't even wait for the debug() call to finish.
param dbg_opts : is an optional "options" dictionary that gets fed
trepan.Debugger(); `start_opts' are the optional "options"
dictionary that gets fed to trepan.Debugger.core.start().
Use like this:
.. code-block:: python
... # Possibly some Python code
import trepan.api # Needed only once
... # Possibly some more Python code
trepan.api.debug() # You can wrap inside conditional logic too
pass # Stop will be here.
# Below is code you want to use the debugger to do things.
.... # more Python code
# If you get to a place in the program where you aren't going
# want to debug any more, but want to remove debugger trace overhead:
trepan.api.stop()
Parameter "level" specifies how many stack frames go back. Usually it will be
the default 0. But sometimes though there may be calls in setup to the debugger
that you may want to skip.
Parameter "step_ignore" specifies how many line events to ignore after the
debug() call. 0 means don't even wait for the debug() call to finish.
In situations where you want an immediate stop in the "debug" call
rather than the statement following it ("pass" above), add parameter
step_ignore=0 to debug() like this::
import trepan.api # Needed only once
# ... as before
trepan.api.debug(step_ignore=0)
# ... as before
Module variable _debugger_obj_ from module trepan.debugger is used as
the debugger instance variable; it can be subsequently used to change
settings or alter behavior. It should be of type Debugger (found in
module trepan). If not, it will get changed to that type::
$ python
>>> from trepan.debugger import debugger_obj
>>> type(debugger_obj)
<type 'NoneType'>
>>> import trepan.api
>>> trepan.api.debug()
...
(Trepan) c
>>> from trepan.debugger import debugger_obj
>>> debugger_obj
<trepan.debugger.Debugger instance at 0x7fbcacd514d0>
>>>
If however you want your own separate debugger instance, you can
create it from the debugger _class Debugger()_ from module
trepan.debugger::
$ python
>>> from trepan.debugger import Debugger
>>> dbgr = Debugger() # Add options as desired
>>> dbgr
<trepan.debugger.Debugger instance at 0x2e25320>
`dbg_opts' is an optional "options" dictionary that gets fed
trepan.Debugger(); `start_opts' are the optional "options"
dictionary that gets fed to trepan.Debugger.core.start().
"""
if not isinstance(Mdebugger.debugger_obj, Mdebugger.Trepan):
Mdebugger.debugger_obj = Mdebugger.Trepan(dbg_opts)
Mdebugger.debugger_obj.core.add_ignore(debug, stop)
pass # depends on [control=['if'], data=[]]
core = Mdebugger.debugger_obj.core
frame = sys._getframe(0 + level)
core.set_next(frame)
if start_opts and 'startup-profile' in start_opts and start_opts['startup-profile']:
dbg_initfiles = start_opts['startup-profile']
from trepan import options
options.add_startup_file(dbg_initfiles)
for init_cmdfile in dbg_initfiles:
core.processor.queue_startfile(init_cmdfile) # depends on [control=['for'], data=['init_cmdfile']] # depends on [control=['if'], data=[]]
if not core.is_started():
core.start(start_opts)
pass # depends on [control=['if'], data=[]]
if post_mortem:
debugger_on_post_mortem()
pass # depends on [control=['if'], data=[]]
if 0 == step_ignore:
frame = sys._getframe(1 + level)
core.stop_reason = 'at a debug() call'
old_trace_hook_suspend = core.trace_hook_suspend
core.trace_hook_suspend = True
core.processor.event_processor(frame, 'line', None)
core.trace_hook_suspend = old_trace_hook_suspend # depends on [control=['if'], data=[]]
else:
core.step_ignore = step_ignore - 1
pass
return |
def nextindx(self):
'''
Determine the next insert offset according to storage.
Returns:
int: The next insert offset.
'''
indx = 0
with s_lmdbslab.Scan(self.slab, self.db) as curs:
last_key = curs.last_key()
if last_key is not None:
indx = s_common.int64un(last_key) + 1
return indx | def function[nextindx, parameter[self]]:
constant[
Determine the next insert offset according to storage.
Returns:
int: The next insert offset.
]
variable[indx] assign[=] constant[0]
with call[name[s_lmdbslab].Scan, parameter[name[self].slab, name[self].db]] begin[:]
variable[last_key] assign[=] call[name[curs].last_key, parameter[]]
if compare[name[last_key] is_not constant[None]] begin[:]
variable[indx] assign[=] binary_operation[call[name[s_common].int64un, parameter[name[last_key]]] + constant[1]]
return[name[indx]] | keyword[def] identifier[nextindx] ( identifier[self] ):
literal[string]
identifier[indx] = literal[int]
keyword[with] identifier[s_lmdbslab] . identifier[Scan] ( identifier[self] . identifier[slab] , identifier[self] . identifier[db] ) keyword[as] identifier[curs] :
identifier[last_key] = identifier[curs] . identifier[last_key] ()
keyword[if] identifier[last_key] keyword[is] keyword[not] keyword[None] :
identifier[indx] = identifier[s_common] . identifier[int64un] ( identifier[last_key] )+ literal[int]
keyword[return] identifier[indx] | def nextindx(self):
"""
Determine the next insert offset according to storage.
Returns:
int: The next insert offset.
"""
indx = 0
with s_lmdbslab.Scan(self.slab, self.db) as curs:
last_key = curs.last_key()
if last_key is not None:
indx = s_common.int64un(last_key) + 1 # depends on [control=['if'], data=['last_key']] # depends on [control=['with'], data=['curs']]
return indx |
def return_dict(self):
"""Output dictionary for ``make_plot.py`` input.
Iterates through the entire MainContainer class turning its contents
into dictionary form. This dictionary becomes the input for ``make_plot.py``.
If `print_input` attribute is True, the entire dictionary will be printed
prior to returning the dicitonary.
Returns:
- **output_dict** (*dict*): Dicitonary for input into ``make_plot.py``.
"""
output_dict = {}
output_dict['general'] = self._iterate_through_class(self.general.__dict__)
output_dict['figure'] = self._iterate_through_class(self.figure.__dict__)
if self.total_plots > 1:
trans_dict = ({
str(i): self._iterate_through_class(axis.__dict__) for i, axis
in enumerate(self.ax)})
output_dict['plot_info'] = trans_dict
else:
output_dict['plot_info'] = {'0': self._iterate_through_class(self.ax.__dict__)}
if self.print_input:
print(output_dict)
return output_dict | def function[return_dict, parameter[self]]:
constant[Output dictionary for ``make_plot.py`` input.
Iterates through the entire MainContainer class turning its contents
into dictionary form. This dictionary becomes the input for ``make_plot.py``.
If `print_input` attribute is True, the entire dictionary will be printed
prior to returning the dicitonary.
Returns:
- **output_dict** (*dict*): Dicitonary for input into ``make_plot.py``.
]
variable[output_dict] assign[=] dictionary[[], []]
call[name[output_dict]][constant[general]] assign[=] call[name[self]._iterate_through_class, parameter[name[self].general.__dict__]]
call[name[output_dict]][constant[figure]] assign[=] call[name[self]._iterate_through_class, parameter[name[self].figure.__dict__]]
if compare[name[self].total_plots greater[>] constant[1]] begin[:]
variable[trans_dict] assign[=] <ast.DictComp object at 0x7da1b0bda470>
call[name[output_dict]][constant[plot_info]] assign[=] name[trans_dict]
if name[self].print_input begin[:]
call[name[print], parameter[name[output_dict]]]
return[name[output_dict]] | keyword[def] identifier[return_dict] ( identifier[self] ):
literal[string]
identifier[output_dict] ={}
identifier[output_dict] [ literal[string] ]= identifier[self] . identifier[_iterate_through_class] ( identifier[self] . identifier[general] . identifier[__dict__] )
identifier[output_dict] [ literal[string] ]= identifier[self] . identifier[_iterate_through_class] ( identifier[self] . identifier[figure] . identifier[__dict__] )
keyword[if] identifier[self] . identifier[total_plots] > literal[int] :
identifier[trans_dict] =({
identifier[str] ( identifier[i] ): identifier[self] . identifier[_iterate_through_class] ( identifier[axis] . identifier[__dict__] ) keyword[for] identifier[i] , identifier[axis]
keyword[in] identifier[enumerate] ( identifier[self] . identifier[ax] )})
identifier[output_dict] [ literal[string] ]= identifier[trans_dict]
keyword[else] :
identifier[output_dict] [ literal[string] ]={ literal[string] : identifier[self] . identifier[_iterate_through_class] ( identifier[self] . identifier[ax] . identifier[__dict__] )}
keyword[if] identifier[self] . identifier[print_input] :
identifier[print] ( identifier[output_dict] )
keyword[return] identifier[output_dict] | def return_dict(self):
"""Output dictionary for ``make_plot.py`` input.
Iterates through the entire MainContainer class turning its contents
into dictionary form. This dictionary becomes the input for ``make_plot.py``.
If `print_input` attribute is True, the entire dictionary will be printed
prior to returning the dicitonary.
Returns:
- **output_dict** (*dict*): Dicitonary for input into ``make_plot.py``.
"""
output_dict = {}
output_dict['general'] = self._iterate_through_class(self.general.__dict__)
output_dict['figure'] = self._iterate_through_class(self.figure.__dict__)
if self.total_plots > 1:
trans_dict = {str(i): self._iterate_through_class(axis.__dict__) for (i, axis) in enumerate(self.ax)}
output_dict['plot_info'] = trans_dict # depends on [control=['if'], data=[]]
else:
output_dict['plot_info'] = {'0': self._iterate_through_class(self.ax.__dict__)}
if self.print_input:
print(output_dict) # depends on [control=['if'], data=[]]
return output_dict |
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
bytecode will try to use file-hash based invalidation (PEP-552) on
supported interpreter versions (CPython 2.7+).
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile,
hashed_invalidation=bc_hashed_invalidation)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir) | def function[install, parameter[self, paths, maker]]:
constant[
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
bytecode will try to use file-hash based invalidation (PEP-552) on
supported interpreter versions (CPython 2.7+).
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
]
variable[dry_run] assign[=] name[maker].dry_run
variable[warner] assign[=] call[name[kwargs].get, parameter[constant[warner]]]
variable[lib_only] assign[=] call[name[kwargs].get, parameter[constant[lib_only], constant[False]]]
variable[bc_hashed_invalidation] assign[=] call[name[kwargs].get, parameter[constant[bytecode_hashed_invalidation], constant[False]]]
variable[pathname] assign[=] call[name[os].path.join, parameter[name[self].dirname, name[self].filename]]
variable[name_ver] assign[=] binary_operation[constant[%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1e976a0>, <ast.Attribute object at 0x7da1b1e97640>]]]
variable[data_dir] assign[=] binary_operation[constant[%s.data] <ast.Mod object at 0x7da2590d6920> name[name_ver]]
variable[info_dir] assign[=] binary_operation[constant[%s.dist-info] <ast.Mod object at 0x7da2590d6920> name[name_ver]]
variable[metadata_name] assign[=] call[name[posixpath].join, parameter[name[info_dir], name[METADATA_FILENAME]]]
variable[wheel_metadata_name] assign[=] call[name[posixpath].join, parameter[name[info_dir], constant[WHEEL]]]
variable[record_name] assign[=] call[name[posixpath].join, parameter[name[info_dir], constant[RECORD]]]
variable[wrapper] assign[=] call[name[codecs].getreader, parameter[constant[utf-8]]]
with call[name[ZipFile], parameter[name[pathname], constant[r]]] begin[:]
with call[name[zf].open, parameter[name[wheel_metadata_name]]] begin[:]
variable[wf] assign[=] call[name[wrapper], parameter[name[bwf]]]
variable[message] assign[=] call[name[message_from_file], parameter[name[wf]]]
variable[wv] assign[=] call[call[name[message]][constant[Wheel-Version]].split, parameter[constant[.], constant[1]]]
variable[file_version] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da1b1e967a0>]]
if <ast.BoolOp object at 0x7da1b1e965f0> begin[:]
call[name[warner], parameter[name[self].wheel_version, name[file_version]]]
if compare[call[name[message]][constant[Root-Is-Purelib]] equal[==] constant[true]] begin[:]
variable[libdir] assign[=] call[name[paths]][constant[purelib]]
variable[records] assign[=] dictionary[[], []]
with call[name[zf].open, parameter[name[record_name]]] begin[:]
with call[name[CSVReader], parameter[]] begin[:]
for taget[name[row]] in starred[name[reader]] begin[:]
variable[p] assign[=] call[name[row]][constant[0]]
call[name[records]][name[p]] assign[=] name[row]
variable[data_pfx] assign[=] call[name[posixpath].join, parameter[name[data_dir], constant[]]]
variable[info_pfx] assign[=] call[name[posixpath].join, parameter[name[info_dir], constant[]]]
variable[script_pfx] assign[=] call[name[posixpath].join, parameter[name[data_dir], constant[scripts], constant[]]]
variable[fileop] assign[=] call[name[FileOperator], parameter[]]
name[fileop].record assign[=] constant[True]
variable[bc] assign[=] <ast.UnaryOp object at 0x7da1b1e7ace0>
variable[outfiles] assign[=] list[[]]
variable[workdir] assign[=] call[name[tempfile].mkdtemp, parameter[]]
name[maker].source_dir assign[=] name[workdir]
name[maker].target_dir assign[=] constant[None]
<ast.Try object at 0x7da1b1e7a950> | keyword[def] identifier[install] ( identifier[self] , identifier[paths] , identifier[maker] ,** identifier[kwargs] ):
literal[string]
identifier[dry_run] = identifier[maker] . identifier[dry_run]
identifier[warner] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[lib_only] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )
identifier[bc_hashed_invalidation] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )
identifier[pathname] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[dirname] , identifier[self] . identifier[filename] )
identifier[name_ver] = literal[string] %( identifier[self] . identifier[name] , identifier[self] . identifier[version] )
identifier[data_dir] = literal[string] % identifier[name_ver]
identifier[info_dir] = literal[string] % identifier[name_ver]
identifier[metadata_name] = identifier[posixpath] . identifier[join] ( identifier[info_dir] , identifier[METADATA_FILENAME] )
identifier[wheel_metadata_name] = identifier[posixpath] . identifier[join] ( identifier[info_dir] , literal[string] )
identifier[record_name] = identifier[posixpath] . identifier[join] ( identifier[info_dir] , literal[string] )
identifier[wrapper] = identifier[codecs] . identifier[getreader] ( literal[string] )
keyword[with] identifier[ZipFile] ( identifier[pathname] , literal[string] ) keyword[as] identifier[zf] :
keyword[with] identifier[zf] . identifier[open] ( identifier[wheel_metadata_name] ) keyword[as] identifier[bwf] :
identifier[wf] = identifier[wrapper] ( identifier[bwf] )
identifier[message] = identifier[message_from_file] ( identifier[wf] )
identifier[wv] = identifier[message] [ literal[string] ]. identifier[split] ( literal[string] , literal[int] )
identifier[file_version] = identifier[tuple] ([ identifier[int] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[wv] ])
keyword[if] ( identifier[file_version] != identifier[self] . identifier[wheel_version] ) keyword[and] identifier[warner] :
identifier[warner] ( identifier[self] . identifier[wheel_version] , identifier[file_version] )
keyword[if] identifier[message] [ literal[string] ]== literal[string] :
identifier[libdir] = identifier[paths] [ literal[string] ]
keyword[else] :
identifier[libdir] = identifier[paths] [ literal[string] ]
identifier[records] ={}
keyword[with] identifier[zf] . identifier[open] ( identifier[record_name] ) keyword[as] identifier[bf] :
keyword[with] identifier[CSVReader] ( identifier[stream] = identifier[bf] ) keyword[as] identifier[reader] :
keyword[for] identifier[row] keyword[in] identifier[reader] :
identifier[p] = identifier[row] [ literal[int] ]
identifier[records] [ identifier[p] ]= identifier[row]
identifier[data_pfx] = identifier[posixpath] . identifier[join] ( identifier[data_dir] , literal[string] )
identifier[info_pfx] = identifier[posixpath] . identifier[join] ( identifier[info_dir] , literal[string] )
identifier[script_pfx] = identifier[posixpath] . identifier[join] ( identifier[data_dir] , literal[string] , literal[string] )
identifier[fileop] = identifier[FileOperator] ( identifier[dry_run] = identifier[dry_run] )
identifier[fileop] . identifier[record] = keyword[True]
identifier[bc] = keyword[not] identifier[sys] . identifier[dont_write_bytecode]
identifier[outfiles] =[]
identifier[workdir] = identifier[tempfile] . identifier[mkdtemp] ()
identifier[maker] . identifier[source_dir] = identifier[workdir]
identifier[maker] . identifier[target_dir] = keyword[None]
keyword[try] :
keyword[for] identifier[zinfo] keyword[in] identifier[zf] . identifier[infolist] ():
identifier[arcname] = identifier[zinfo] . identifier[filename]
keyword[if] identifier[isinstance] ( identifier[arcname] , identifier[text_type] ):
identifier[u_arcname] = identifier[arcname]
keyword[else] :
identifier[u_arcname] = identifier[arcname] . identifier[decode] ( literal[string] )
keyword[if] identifier[u_arcname] . identifier[endswith] ( literal[string] ):
keyword[continue]
identifier[row] = identifier[records] [ identifier[u_arcname] ]
keyword[if] identifier[row] [ literal[int] ] keyword[and] identifier[str] ( identifier[zinfo] . identifier[file_size] )!= identifier[row] [ literal[int] ]:
keyword[raise] identifier[DistlibException] ( literal[string]
literal[string] % identifier[u_arcname] )
keyword[if] identifier[row] [ literal[int] ]:
identifier[kind] , identifier[value] = identifier[row] [ literal[int] ]. identifier[split] ( literal[string] , literal[int] )
keyword[with] identifier[zf] . identifier[open] ( identifier[arcname] ) keyword[as] identifier[bf] :
identifier[data] = identifier[bf] . identifier[read] ()
identifier[_] , identifier[digest] = identifier[self] . identifier[get_hash] ( identifier[data] , identifier[kind] )
keyword[if] identifier[digest] != identifier[value] :
keyword[raise] identifier[DistlibException] ( literal[string]
literal[string] % identifier[arcname] )
keyword[if] identifier[lib_only] keyword[and] identifier[u_arcname] . identifier[startswith] (( identifier[info_pfx] , identifier[data_pfx] )):
identifier[logger] . identifier[debug] ( literal[string] , identifier[u_arcname] )
keyword[continue]
identifier[is_script] =( identifier[u_arcname] . identifier[startswith] ( identifier[script_pfx] )
keyword[and] keyword[not] identifier[u_arcname] . identifier[endswith] ( literal[string] ))
keyword[if] identifier[u_arcname] . identifier[startswith] ( identifier[data_pfx] ):
identifier[_] , identifier[where] , identifier[rp] = identifier[u_arcname] . identifier[split] ( literal[string] , literal[int] )
identifier[outfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[paths] [ identifier[where] ], identifier[convert_path] ( identifier[rp] ))
keyword[else] :
keyword[if] identifier[u_arcname] keyword[in] ( identifier[wheel_metadata_name] , identifier[record_name] ):
keyword[continue]
identifier[outfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[libdir] , identifier[convert_path] ( identifier[u_arcname] ))
keyword[if] keyword[not] identifier[is_script] :
keyword[with] identifier[zf] . identifier[open] ( identifier[arcname] ) keyword[as] identifier[bf] :
identifier[fileop] . identifier[copy_stream] ( identifier[bf] , identifier[outfile] )
identifier[outfiles] . identifier[append] ( identifier[outfile] )
keyword[if] keyword[not] identifier[dry_run] keyword[and] identifier[row] [ literal[int] ]:
keyword[with] identifier[open] ( identifier[outfile] , literal[string] ) keyword[as] identifier[bf] :
identifier[data] = identifier[bf] . identifier[read] ()
identifier[_] , identifier[newdigest] = identifier[self] . identifier[get_hash] ( identifier[data] , identifier[kind] )
keyword[if] identifier[newdigest] != identifier[digest] :
keyword[raise] identifier[DistlibException] ( literal[string]
literal[string]
literal[string] % identifier[outfile] )
keyword[if] identifier[bc] keyword[and] identifier[outfile] . identifier[endswith] ( literal[string] ):
keyword[try] :
identifier[pyc] = identifier[fileop] . identifier[byte_compile] ( identifier[outfile] ,
identifier[hashed_invalidation] = identifier[bc_hashed_invalidation] )
identifier[outfiles] . identifier[append] ( identifier[pyc] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[warning] ( literal[string] ,
identifier[exc_info] = keyword[True] )
keyword[else] :
identifier[fn] = identifier[os] . identifier[path] . identifier[basename] ( identifier[convert_path] ( identifier[arcname] ))
identifier[workname] = identifier[os] . identifier[path] . identifier[join] ( identifier[workdir] , identifier[fn] )
keyword[with] identifier[zf] . identifier[open] ( identifier[arcname] ) keyword[as] identifier[bf] :
identifier[fileop] . identifier[copy_stream] ( identifier[bf] , identifier[workname] )
identifier[dn] , identifier[fn] = identifier[os] . identifier[path] . identifier[split] ( identifier[outfile] )
identifier[maker] . identifier[target_dir] = identifier[dn]
identifier[filenames] = identifier[maker] . identifier[make] ( identifier[fn] )
identifier[fileop] . identifier[set_executable_mode] ( identifier[filenames] )
identifier[outfiles] . identifier[extend] ( identifier[filenames] )
keyword[if] identifier[lib_only] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[dist] = keyword[None]
keyword[else] :
identifier[commands] = keyword[None]
identifier[file_version] = identifier[self] . identifier[info] [ literal[string] ]
keyword[if] identifier[file_version] == literal[string] :
identifier[ep] = identifier[posixpath] . identifier[join] ( identifier[info_dir] , literal[string] )
keyword[try] :
keyword[with] identifier[zf] . identifier[open] ( identifier[ep] ) keyword[as] identifier[bwf] :
identifier[epdata] = identifier[read_exports] ( identifier[bwf] )
identifier[commands] ={}
keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] ):
identifier[k] = literal[string] % identifier[key]
keyword[if] identifier[k] keyword[in] identifier[epdata] :
identifier[commands] [ literal[string] % identifier[key] ]= identifier[d] ={}
keyword[for] identifier[v] keyword[in] identifier[epdata] [ identifier[k] ]. identifier[values] ():
identifier[s] = literal[string] %( identifier[v] . identifier[prefix] , identifier[v] . identifier[suffix] )
keyword[if] identifier[v] . identifier[flags] :
identifier[s] += literal[string] % identifier[v] . identifier[flags]
identifier[d] [ identifier[v] . identifier[name] ]= identifier[s]
keyword[except] identifier[Exception] :
identifier[logger] . identifier[warning] ( literal[string]
literal[string]
literal[string] )
keyword[else] :
keyword[try] :
keyword[with] identifier[zf] . identifier[open] ( identifier[metadata_name] ) keyword[as] identifier[bwf] :
identifier[wf] = identifier[wrapper] ( identifier[bwf] )
identifier[commands] = identifier[json] . identifier[load] ( identifier[wf] ). identifier[get] ( literal[string] )
keyword[if] identifier[commands] :
identifier[commands] = identifier[commands] . identifier[get] ( literal[string] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[warning] ( literal[string]
literal[string] )
keyword[if] identifier[commands] :
identifier[console_scripts] = identifier[commands] . identifier[get] ( literal[string] ,{})
identifier[gui_scripts] = identifier[commands] . identifier[get] ( literal[string] ,{})
keyword[if] identifier[console_scripts] keyword[or] identifier[gui_scripts] :
identifier[script_dir] = identifier[paths] . identifier[get] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[script_dir] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[maker] . identifier[target_dir] = identifier[script_dir]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[console_scripts] . identifier[items] ():
identifier[script] = literal[string] %( identifier[k] , identifier[v] )
identifier[filenames] = identifier[maker] . identifier[make] ( identifier[script] )
identifier[fileop] . identifier[set_executable_mode] ( identifier[filenames] )
keyword[if] identifier[gui_scripts] :
identifier[options] ={ literal[string] : keyword[True] }
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[gui_scripts] . identifier[items] ():
identifier[script] = literal[string] %( identifier[k] , identifier[v] )
identifier[filenames] = identifier[maker] . identifier[make] ( identifier[script] , identifier[options] )
identifier[fileop] . identifier[set_executable_mode] ( identifier[filenames] )
identifier[p] = identifier[os] . identifier[path] . identifier[join] ( identifier[libdir] , identifier[info_dir] )
identifier[dist] = identifier[InstalledDistribution] ( identifier[p] )
identifier[paths] = identifier[dict] ( identifier[paths] )
keyword[del] identifier[paths] [ literal[string] ]
keyword[del] identifier[paths] [ literal[string] ]
identifier[paths] [ literal[string] ]= identifier[libdir]
identifier[p] = identifier[dist] . identifier[write_shared_locations] ( identifier[paths] , identifier[dry_run] )
keyword[if] identifier[p] :
identifier[outfiles] . identifier[append] ( identifier[p] )
identifier[dist] . identifier[write_installed_files] ( identifier[outfiles] , identifier[paths] [ literal[string] ],
identifier[dry_run] )
keyword[return] identifier[dist]
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] )
identifier[fileop] . identifier[rollback] ()
keyword[raise]
keyword[finally] :
identifier[shutil] . identifier[rmtree] ( identifier[workdir] ) | def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
bytecode will try to use file-hash based invalidation (PEP-552) on
supported interpreter versions (CPython 2.7+).
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf) # depends on [control=['with'], data=['bwf']]
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version != self.wheel_version and warner:
warner(self.wheel_version, file_version) # depends on [control=['if'], data=[]]
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib'] # depends on [control=['if'], data=[]]
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row # depends on [control=['for'], data=['row']] # depends on [control=['with'], data=['reader']] # depends on [control=['with'], data=['bf']]
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname # depends on [control=['if'], data=[]]
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue # depends on [control=['if'], data=[]]
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for %s' % u_arcname) # depends on [control=['if'], data=[]]
if row[1]:
(kind, value) = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read() # depends on [control=['with'], data=['bf']]
(_, digest) = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for %s' % arcname) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue # depends on [control=['if'], data=[]]
is_script = u_arcname.startswith(script_pfx) and (not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
(_, where, rp) = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp)) # depends on [control=['if'], data=[]]
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue # depends on [control=['if'], data=[]]
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile) # depends on [control=['with'], data=['bf']]
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
(_, newdigest) = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch on write for %s' % outfile) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['bf']] # depends on [control=['if'], data=[]]
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile, hashed_invalidation=bc_hashed_invalidation)
outfiles.append(pyc) # depends on [control=['try'], data=[]]
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed', exc_info=True) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname) # depends on [control=['with'], data=['bf']]
(dn, fn) = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames) # depends on [control=['for'], data=['zinfo']]
if lib_only:
logger.debug('lib_only: returning None')
dist = None # depends on [control=['if'], data=[]]
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf) # depends on [control=['with'], data=['bwf']]
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags # depends on [control=['if'], data=[]]
d[v.name] = s # depends on [control=['for'], data=['v']] # depends on [control=['if'], data=['k', 'epdata']] # depends on [control=['for'], data=['key']] # depends on [control=['try'], data=[]]
except Exception:
logger.warning('Unable to read legacy script metadata, so cannot generate scripts') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands') # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['bwf']] # depends on [control=['try'], data=[]]
except Exception:
logger.warning('Unable to read JSON metadata, so cannot generate scripts') # depends on [control=['except'], data=[]]
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not specified') # depends on [control=['if'], data=[]]
maker.target_dir = script_dir
for (k, v) in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames) # depends on [control=['for'], data=[]]
if gui_scripts:
options = {'gui': True}
for (k, v) in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p) # depends on [control=['if'], data=[]]
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'], dry_run)
return dist # depends on [control=['try'], data=[]]
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise # depends on [control=['except'], data=[]]
finally:
shutil.rmtree(workdir) # depends on [control=['with'], data=['zf']] |
def assertSignalOrdering(self, *expected_events):
"""
Assert that a signals were fired in a specific sequence.
:param expected_events:
A (varadic) list of events describing the signals that were fired
Each element is a 3-tuple (signal, args, kwargs) that describes
the event.
.. note::
If you are using :meth:`assertSignalFired()` then the return value
of that method is a single event that can be passed to this method
"""
expected_order = [self._events_seen.index(event)
for event in expected_events]
actual_order = sorted(expected_order)
self.assertEqual(
expected_order, actual_order,
"\nExpected order of fired signals:\n{}\n"
"Actual order observed:\n{}".format(
"\n".join(
"\t{}: {}".format(i, event)
for i, event in enumerate(expected_events, 1)),
"\n".join(
"\t{}: {}".format(i, event)
for i, event in enumerate(
(self._events_seen[idx] for idx in actual_order), 1)))) | def function[assertSignalOrdering, parameter[self]]:
constant[
Assert that a signals were fired in a specific sequence.
:param expected_events:
A (varadic) list of events describing the signals that were fired
Each element is a 3-tuple (signal, args, kwargs) that describes
the event.
.. note::
If you are using :meth:`assertSignalFired()` then the return value
of that method is a single event that can be passed to this method
]
variable[expected_order] assign[=] <ast.ListComp object at 0x7da18fe92da0>
variable[actual_order] assign[=] call[name[sorted], parameter[name[expected_order]]]
call[name[self].assertEqual, parameter[name[expected_order], name[actual_order], call[constant[
Expected order of fired signals:
{}
Actual order observed:
{}].format, parameter[call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da18fe916c0>]], call[constant[
].join, parameter[<ast.GeneratorExp object at 0x7da18fe921a0>]]]]]] | keyword[def] identifier[assertSignalOrdering] ( identifier[self] ,* identifier[expected_events] ):
literal[string]
identifier[expected_order] =[ identifier[self] . identifier[_events_seen] . identifier[index] ( identifier[event] )
keyword[for] identifier[event] keyword[in] identifier[expected_events] ]
identifier[actual_order] = identifier[sorted] ( identifier[expected_order] )
identifier[self] . identifier[assertEqual] (
identifier[expected_order] , identifier[actual_order] ,
literal[string]
literal[string] . identifier[format] (
literal[string] . identifier[join] (
literal[string] . identifier[format] ( identifier[i] , identifier[event] )
keyword[for] identifier[i] , identifier[event] keyword[in] identifier[enumerate] ( identifier[expected_events] , literal[int] )),
literal[string] . identifier[join] (
literal[string] . identifier[format] ( identifier[i] , identifier[event] )
keyword[for] identifier[i] , identifier[event] keyword[in] identifier[enumerate] (
( identifier[self] . identifier[_events_seen] [ identifier[idx] ] keyword[for] identifier[idx] keyword[in] identifier[actual_order] ), literal[int] )))) | def assertSignalOrdering(self, *expected_events):
"""
Assert that a signals were fired in a specific sequence.
:param expected_events:
A (varadic) list of events describing the signals that were fired
Each element is a 3-tuple (signal, args, kwargs) that describes
the event.
.. note::
If you are using :meth:`assertSignalFired()` then the return value
of that method is a single event that can be passed to this method
"""
expected_order = [self._events_seen.index(event) for event in expected_events]
actual_order = sorted(expected_order)
self.assertEqual(expected_order, actual_order, '\nExpected order of fired signals:\n{}\nActual order observed:\n{}'.format('\n'.join(('\t{}: {}'.format(i, event) for (i, event) in enumerate(expected_events, 1))), '\n'.join(('\t{}: {}'.format(i, event) for (i, event) in enumerate((self._events_seen[idx] for idx in actual_order), 1))))) |
def public_key_to_connection_id(self, public_key):
"""
Get stored connection id for a public key.
"""
with self._connections_lock:
for connection_id, connection_info in self._connections.items():
if connection_info.public_key == public_key:
return connection_id
return None | def function[public_key_to_connection_id, parameter[self, public_key]]:
constant[
Get stored connection id for a public key.
]
with name[self]._connections_lock begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f00ca60>, <ast.Name object at 0x7da18f00c490>]]] in starred[call[name[self]._connections.items, parameter[]]] begin[:]
if compare[name[connection_info].public_key equal[==] name[public_key]] begin[:]
return[name[connection_id]]
return[constant[None]] | keyword[def] identifier[public_key_to_connection_id] ( identifier[self] , identifier[public_key] ):
literal[string]
keyword[with] identifier[self] . identifier[_connections_lock] :
keyword[for] identifier[connection_id] , identifier[connection_info] keyword[in] identifier[self] . identifier[_connections] . identifier[items] ():
keyword[if] identifier[connection_info] . identifier[public_key] == identifier[public_key] :
keyword[return] identifier[connection_id]
keyword[return] keyword[None] | def public_key_to_connection_id(self, public_key):
"""
Get stored connection id for a public key.
"""
with self._connections_lock:
for (connection_id, connection_info) in self._connections.items():
if connection_info.public_key == public_key:
return connection_id # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return None # depends on [control=['with'], data=[]] |
def get_instructions(self, x, first_line=None):
"""Iterator for the opcodes in methods, functions or code
Generates a series of Instruction named tuples giving the details of
each operations in the supplied code.
If *first_line* is not None, it indicates the line number that should
be reported for the first source line in the disassembled code.
Otherwise, the source line information (if any) is taken directly from
the disassembled code object.
"""
co = get_code_object(x)
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(self.opc.findlinestarts(co))
if first_line is not None:
line_offset = first_line - co.co_firstlineno
else:
line_offset = 0
return get_instructions_bytes(co.co_code, self.opc, co.co_varnames,
co.co_names, co.co_consts, cell_names, linestarts,
line_offset) | def function[get_instructions, parameter[self, x, first_line]]:
constant[Iterator for the opcodes in methods, functions or code
Generates a series of Instruction named tuples giving the details of
each operations in the supplied code.
If *first_line* is not None, it indicates the line number that should
be reported for the first source line in the disassembled code.
Otherwise, the source line information (if any) is taken directly from
the disassembled code object.
]
variable[co] assign[=] call[name[get_code_object], parameter[name[x]]]
variable[cell_names] assign[=] binary_operation[name[co].co_cellvars + name[co].co_freevars]
variable[linestarts] assign[=] call[name[dict], parameter[call[name[self].opc.findlinestarts, parameter[name[co]]]]]
if compare[name[first_line] is_not constant[None]] begin[:]
variable[line_offset] assign[=] binary_operation[name[first_line] - name[co].co_firstlineno]
return[call[name[get_instructions_bytes], parameter[name[co].co_code, name[self].opc, name[co].co_varnames, name[co].co_names, name[co].co_consts, name[cell_names], name[linestarts], name[line_offset]]]] | keyword[def] identifier[get_instructions] ( identifier[self] , identifier[x] , identifier[first_line] = keyword[None] ):
literal[string]
identifier[co] = identifier[get_code_object] ( identifier[x] )
identifier[cell_names] = identifier[co] . identifier[co_cellvars] + identifier[co] . identifier[co_freevars]
identifier[linestarts] = identifier[dict] ( identifier[self] . identifier[opc] . identifier[findlinestarts] ( identifier[co] ))
keyword[if] identifier[first_line] keyword[is] keyword[not] keyword[None] :
identifier[line_offset] = identifier[first_line] - identifier[co] . identifier[co_firstlineno]
keyword[else] :
identifier[line_offset] = literal[int]
keyword[return] identifier[get_instructions_bytes] ( identifier[co] . identifier[co_code] , identifier[self] . identifier[opc] , identifier[co] . identifier[co_varnames] ,
identifier[co] . identifier[co_names] , identifier[co] . identifier[co_consts] , identifier[cell_names] , identifier[linestarts] ,
identifier[line_offset] ) | def get_instructions(self, x, first_line=None):
"""Iterator for the opcodes in methods, functions or code
Generates a series of Instruction named tuples giving the details of
each operations in the supplied code.
If *first_line* is not None, it indicates the line number that should
be reported for the first source line in the disassembled code.
Otherwise, the source line information (if any) is taken directly from
the disassembled code object.
"""
co = get_code_object(x)
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(self.opc.findlinestarts(co))
if first_line is not None:
line_offset = first_line - co.co_firstlineno # depends on [control=['if'], data=['first_line']]
else:
line_offset = 0
return get_instructions_bytes(co.co_code, self.opc, co.co_varnames, co.co_names, co.co_consts, cell_names, linestarts, line_offset) |
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix | def function[_ensure_sparse_format, parameter[spmatrix, accept_sparse, dtype, copy, force_all_finite]]:
constant[Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
]
if compare[name[accept_sparse] in list[[<ast.Constant object at 0x7da2044c3850>, <ast.Constant object at 0x7da2044c31c0>]]] begin[:]
<ast.Raise object at 0x7da2044c0520>
if compare[name[dtype] is constant[None]] begin[:]
variable[dtype] assign[=] name[spmatrix].dtype
variable[changed_format] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b13b4b20> begin[:]
variable[spmatrix] assign[=] call[name[spmatrix].asformat, parameter[call[name[accept_sparse]][constant[0]]]]
variable[changed_format] assign[=] constant[True]
if compare[name[dtype] not_equal[!=] name[spmatrix].dtype] begin[:]
variable[spmatrix] assign[=] call[name[spmatrix].astype, parameter[name[dtype]]]
if name[force_all_finite] begin[:]
if <ast.UnaryOp object at 0x7da1b13b6e90> begin[:]
call[name[warnings].warn, parameter[binary_operation[constant[Can't check %s sparse matrix for nan or inf.] <ast.Mod object at 0x7da2590d6920> name[spmatrix].format]]]
return[name[spmatrix]] | keyword[def] identifier[_ensure_sparse_format] ( identifier[spmatrix] , identifier[accept_sparse] , identifier[dtype] , identifier[copy] ,
identifier[force_all_finite] ):
literal[string]
keyword[if] identifier[accept_sparse] keyword[in] [ keyword[None] , keyword[False] ]:
keyword[raise] identifier[TypeError] ( literal[string]
literal[string]
literal[string] )
keyword[if] identifier[dtype] keyword[is] keyword[None] :
identifier[dtype] = identifier[spmatrix] . identifier[dtype]
identifier[changed_format] = keyword[False]
keyword[if] ( identifier[isinstance] ( identifier[accept_sparse] ,( identifier[list] , identifier[tuple] ))
keyword[and] identifier[spmatrix] . identifier[format] keyword[not] keyword[in] identifier[accept_sparse] ):
identifier[spmatrix] = identifier[spmatrix] . identifier[asformat] ( identifier[accept_sparse] [ literal[int] ])
identifier[changed_format] = keyword[True]
keyword[if] identifier[dtype] != identifier[spmatrix] . identifier[dtype] :
identifier[spmatrix] = identifier[spmatrix] . identifier[astype] ( identifier[dtype] )
keyword[elif] identifier[copy] keyword[and] keyword[not] identifier[changed_format] :
identifier[spmatrix] = identifier[spmatrix] . identifier[copy] ()
keyword[if] identifier[force_all_finite] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[spmatrix] , literal[string] ):
identifier[warnings] . identifier[warn] ( literal[string]
% identifier[spmatrix] . identifier[format] )
keyword[else] :
identifier[_assert_all_finite] ( identifier[spmatrix] . identifier[data] )
keyword[return] identifier[spmatrix] | def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy, force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense data is required. Use X.toarray() to convert to a dense numpy array.') # depends on [control=['if'], data=[]]
if dtype is None:
dtype = spmatrix.dtype # depends on [control=['if'], data=['dtype']]
changed_format = False
if isinstance(accept_sparse, (list, tuple)) and spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True # depends on [control=['if'], data=[]]
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype) # depends on [control=['if'], data=['dtype']]
elif copy and (not changed_format):
# force copy
spmatrix = spmatrix.copy() # depends on [control=['if'], data=[]]
if force_all_finite:
if not hasattr(spmatrix, 'data'):
warnings.warn("Can't check %s sparse matrix for nan or inf." % spmatrix.format) # depends on [control=['if'], data=[]]
else:
_assert_all_finite(spmatrix.data) # depends on [control=['if'], data=[]]
return spmatrix |
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(self.compiler, node, self, **kwargs) | def function[branch, parameter[self, node]]:
constant[create a new Identifiers for a new Node, with
this Identifiers as the parent.]
return[call[name[_Identifiers], parameter[name[self].compiler, name[node], name[self]]]] | keyword[def] identifier[branch] ( identifier[self] , identifier[node] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[_Identifiers] ( identifier[self] . identifier[compiler] , identifier[node] , identifier[self] ,** identifier[kwargs] ) | def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(self.compiler, node, self, **kwargs) |
def coherence(self, other, fftlength=None, overlap=None,
window='hann', **kwargs):
"""Calculate the frequency-coherence between this `TimeSeries`
and another.
Parameters
----------
other : `TimeSeries`
`TimeSeries` signal to calculate coherence with
fftlength : `float`, optional
number of seconds in single FFT, defaults to a single FFT
covering the full duration
overlap : `float`, optional
number of seconds of overlap between FFTs, defaults to the
recommended overlap for the given window (if given), or 0
window : `str`, `numpy.ndarray`, optional
window function to apply to timeseries prior to FFT,
see :func:`scipy.signal.get_window` for details on acceptable
formats
**kwargs
any other keyword arguments accepted by
:func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``,
and ``noverlap`` which are superceded by the above keyword
arguments
Returns
-------
coherence : `~gwpy.frequencyseries.FrequencySeries`
the coherence `FrequencySeries` of this `TimeSeries`
with the other
Notes
-----
If `self` and `other` have difference
:attr:`TimeSeries.sample_rate` values, the higher sampled
`TimeSeries` will be down-sampled to match the lower.
See Also
--------
:func:`matplotlib.mlab.cohere`
for details of the coherence calculator
"""
from matplotlib import mlab
from ..frequencyseries import FrequencySeries
# check sampling rates
if self.sample_rate.to('Hertz') != other.sample_rate.to('Hertz'):
sampling = min(self.sample_rate.value, other.sample_rate.value)
# resample higher rate series
if self.sample_rate.value == sampling:
other = other.resample(sampling)
self_ = self
else:
self_ = self.resample(sampling)
else:
sampling = self.sample_rate.value
self_ = self
# check fft lengths
if overlap is None:
overlap = 0
else:
overlap = int((overlap * self_.sample_rate).decompose().value)
if fftlength is None:
fftlength = int(self_.size/2. + overlap/2.)
else:
fftlength = int((fftlength * self_.sample_rate).decompose().value)
if window is not None:
kwargs['window'] = signal.get_window(window, fftlength)
coh, freqs = mlab.cohere(self_.value, other.value, NFFT=fftlength,
Fs=sampling, noverlap=overlap, **kwargs)
out = coh.view(FrequencySeries)
out.xindex = freqs
out.epoch = self.epoch
out.name = 'Coherence between %s and %s' % (self.name, other.name)
out.unit = 'coherence'
return out | def function[coherence, parameter[self, other, fftlength, overlap, window]]:
constant[Calculate the frequency-coherence between this `TimeSeries`
and another.
Parameters
----------
other : `TimeSeries`
`TimeSeries` signal to calculate coherence with
fftlength : `float`, optional
number of seconds in single FFT, defaults to a single FFT
covering the full duration
overlap : `float`, optional
number of seconds of overlap between FFTs, defaults to the
recommended overlap for the given window (if given), or 0
window : `str`, `numpy.ndarray`, optional
window function to apply to timeseries prior to FFT,
see :func:`scipy.signal.get_window` for details on acceptable
formats
**kwargs
any other keyword arguments accepted by
:func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``,
and ``noverlap`` which are superceded by the above keyword
arguments
Returns
-------
coherence : `~gwpy.frequencyseries.FrequencySeries`
the coherence `FrequencySeries` of this `TimeSeries`
with the other
Notes
-----
If `self` and `other` have difference
:attr:`TimeSeries.sample_rate` values, the higher sampled
`TimeSeries` will be down-sampled to match the lower.
See Also
--------
:func:`matplotlib.mlab.cohere`
for details of the coherence calculator
]
from relative_module[matplotlib] import module[mlab]
from relative_module[frequencyseries] import module[FrequencySeries]
if compare[call[name[self].sample_rate.to, parameter[constant[Hertz]]] not_equal[!=] call[name[other].sample_rate.to, parameter[constant[Hertz]]]] begin[:]
variable[sampling] assign[=] call[name[min], parameter[name[self].sample_rate.value, name[other].sample_rate.value]]
if compare[name[self].sample_rate.value equal[==] name[sampling]] begin[:]
variable[other] assign[=] call[name[other].resample, parameter[name[sampling]]]
variable[self_] assign[=] name[self]
if compare[name[overlap] is constant[None]] begin[:]
variable[overlap] assign[=] constant[0]
if compare[name[fftlength] is constant[None]] begin[:]
variable[fftlength] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[self_].size / constant[2.0]] + binary_operation[name[overlap] / constant[2.0]]]]]
if compare[name[window] is_not constant[None]] begin[:]
call[name[kwargs]][constant[window]] assign[=] call[name[signal].get_window, parameter[name[window], name[fftlength]]]
<ast.Tuple object at 0x7da20e9b0a00> assign[=] call[name[mlab].cohere, parameter[name[self_].value, name[other].value]]
variable[out] assign[=] call[name[coh].view, parameter[name[FrequencySeries]]]
name[out].xindex assign[=] name[freqs]
name[out].epoch assign[=] name[self].epoch
name[out].name assign[=] binary_operation[constant[Coherence between %s and %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20e9b1780>, <ast.Attribute object at 0x7da20e9b2890>]]]
name[out].unit assign[=] constant[coherence]
return[name[out]] | keyword[def] identifier[coherence] ( identifier[self] , identifier[other] , identifier[fftlength] = keyword[None] , identifier[overlap] = keyword[None] ,
identifier[window] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[matplotlib] keyword[import] identifier[mlab]
keyword[from] .. identifier[frequencyseries] keyword[import] identifier[FrequencySeries]
keyword[if] identifier[self] . identifier[sample_rate] . identifier[to] ( literal[string] )!= identifier[other] . identifier[sample_rate] . identifier[to] ( literal[string] ):
identifier[sampling] = identifier[min] ( identifier[self] . identifier[sample_rate] . identifier[value] , identifier[other] . identifier[sample_rate] . identifier[value] )
keyword[if] identifier[self] . identifier[sample_rate] . identifier[value] == identifier[sampling] :
identifier[other] = identifier[other] . identifier[resample] ( identifier[sampling] )
identifier[self_] = identifier[self]
keyword[else] :
identifier[self_] = identifier[self] . identifier[resample] ( identifier[sampling] )
keyword[else] :
identifier[sampling] = identifier[self] . identifier[sample_rate] . identifier[value]
identifier[self_] = identifier[self]
keyword[if] identifier[overlap] keyword[is] keyword[None] :
identifier[overlap] = literal[int]
keyword[else] :
identifier[overlap] = identifier[int] (( identifier[overlap] * identifier[self_] . identifier[sample_rate] ). identifier[decompose] (). identifier[value] )
keyword[if] identifier[fftlength] keyword[is] keyword[None] :
identifier[fftlength] = identifier[int] ( identifier[self_] . identifier[size] / literal[int] + identifier[overlap] / literal[int] )
keyword[else] :
identifier[fftlength] = identifier[int] (( identifier[fftlength] * identifier[self_] . identifier[sample_rate] ). identifier[decompose] (). identifier[value] )
keyword[if] identifier[window] keyword[is] keyword[not] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[signal] . identifier[get_window] ( identifier[window] , identifier[fftlength] )
identifier[coh] , identifier[freqs] = identifier[mlab] . identifier[cohere] ( identifier[self_] . identifier[value] , identifier[other] . identifier[value] , identifier[NFFT] = identifier[fftlength] ,
identifier[Fs] = identifier[sampling] , identifier[noverlap] = identifier[overlap] ,** identifier[kwargs] )
identifier[out] = identifier[coh] . identifier[view] ( identifier[FrequencySeries] )
identifier[out] . identifier[xindex] = identifier[freqs]
identifier[out] . identifier[epoch] = identifier[self] . identifier[epoch]
identifier[out] . identifier[name] = literal[string] %( identifier[self] . identifier[name] , identifier[other] . identifier[name] )
identifier[out] . identifier[unit] = literal[string]
keyword[return] identifier[out] | def coherence(self, other, fftlength=None, overlap=None, window='hann', **kwargs):
"""Calculate the frequency-coherence between this `TimeSeries`
and another.
Parameters
----------
other : `TimeSeries`
`TimeSeries` signal to calculate coherence with
fftlength : `float`, optional
number of seconds in single FFT, defaults to a single FFT
covering the full duration
overlap : `float`, optional
number of seconds of overlap between FFTs, defaults to the
recommended overlap for the given window (if given), or 0
window : `str`, `numpy.ndarray`, optional
window function to apply to timeseries prior to FFT,
see :func:`scipy.signal.get_window` for details on acceptable
formats
**kwargs
any other keyword arguments accepted by
:func:`matplotlib.mlab.cohere` except ``NFFT``, ``window``,
and ``noverlap`` which are superceded by the above keyword
arguments
Returns
-------
coherence : `~gwpy.frequencyseries.FrequencySeries`
the coherence `FrequencySeries` of this `TimeSeries`
with the other
Notes
-----
If `self` and `other` have difference
:attr:`TimeSeries.sample_rate` values, the higher sampled
`TimeSeries` will be down-sampled to match the lower.
See Also
--------
:func:`matplotlib.mlab.cohere`
for details of the coherence calculator
"""
from matplotlib import mlab
from ..frequencyseries import FrequencySeries
# check sampling rates
if self.sample_rate.to('Hertz') != other.sample_rate.to('Hertz'):
sampling = min(self.sample_rate.value, other.sample_rate.value)
# resample higher rate series
if self.sample_rate.value == sampling:
other = other.resample(sampling)
self_ = self # depends on [control=['if'], data=['sampling']]
else:
self_ = self.resample(sampling) # depends on [control=['if'], data=[]]
else:
sampling = self.sample_rate.value
self_ = self
# check fft lengths
if overlap is None:
overlap = 0 # depends on [control=['if'], data=['overlap']]
else:
overlap = int((overlap * self_.sample_rate).decompose().value)
if fftlength is None:
fftlength = int(self_.size / 2.0 + overlap / 2.0) # depends on [control=['if'], data=['fftlength']]
else:
fftlength = int((fftlength * self_.sample_rate).decompose().value)
if window is not None:
kwargs['window'] = signal.get_window(window, fftlength) # depends on [control=['if'], data=['window']]
(coh, freqs) = mlab.cohere(self_.value, other.value, NFFT=fftlength, Fs=sampling, noverlap=overlap, **kwargs)
out = coh.view(FrequencySeries)
out.xindex = freqs
out.epoch = self.epoch
out.name = 'Coherence between %s and %s' % (self.name, other.name)
out.unit = 'coherence'
return out |
def _get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if hasattr(self, 'get_path_from_parent'):
return self.get_path_from_parent(parent)
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent) or []
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path | def function[_get_path_from_parent, parameter[self, parent]]:
constant[
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
]
if call[name[hasattr], parameter[name[self], constant[get_path_from_parent]]] begin[:]
return[call[name[self].get_path_from_parent, parameter[name[parent]]]]
if compare[name[self].model is name[parent]] begin[:]
return[list[[]]]
variable[model] assign[=] name[self].concrete_model
variable[chain] assign[=] <ast.BoolOp object at 0x7da2047ebbe0>
call[name[chain].reverse, parameter[]]
call[name[chain].append, parameter[name[model]]]
variable[path] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da2047eaad0>, <ast.Name object at 0x7da2047eafb0>]]] in starred[call[name[enumerate], parameter[call[name[chain]][<ast.Slice object at 0x7da2047ea140>]]]] begin[:]
variable[child] assign[=] call[name[chain]][binary_operation[name[i] + constant[1]]]
variable[link] assign[=] call[name[child]._meta.get_ancestor_link, parameter[name[ancestor]]]
call[name[path].extend, parameter[call[name[link].get_reverse_path_info, parameter[]]]]
return[name[path]] | keyword[def] identifier[_get_path_from_parent] ( identifier[self] , identifier[parent] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[return] identifier[self] . identifier[get_path_from_parent] ( identifier[parent] )
keyword[if] identifier[self] . identifier[model] keyword[is] identifier[parent] :
keyword[return] []
identifier[model] = identifier[self] . identifier[concrete_model]
identifier[chain] = identifier[model] . identifier[_meta] . identifier[get_base_chain] ( identifier[parent] ) keyword[or] []
identifier[chain] . identifier[reverse] ()
identifier[chain] . identifier[append] ( identifier[model] )
identifier[path] =[]
keyword[for] identifier[i] , identifier[ancestor] keyword[in] identifier[enumerate] ( identifier[chain] [:- literal[int] ]):
identifier[child] = identifier[chain] [ identifier[i] + literal[int] ]
identifier[link] = identifier[child] . identifier[_meta] . identifier[get_ancestor_link] ( identifier[ancestor] )
identifier[path] . identifier[extend] ( identifier[link] . identifier[get_reverse_path_info] ())
keyword[return] identifier[path] | def _get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if hasattr(self, 'get_path_from_parent'):
return self.get_path_from_parent(parent) # depends on [control=['if'], data=[]]
if self.model is parent:
return [] # depends on [control=['if'], data=[]]
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent) or []
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for (i, ancestor) in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info()) # depends on [control=['for'], data=[]]
return path |
def delete(self):
"""Delete this object.
:returns: The json response from the server.
"""
url = self.reddit_session.config['del']
data = {'id': self.fullname}
response = self.reddit_session.request_json(url, data=data)
self.reddit_session.evict(self.reddit_session.config['user'])
return response | def function[delete, parameter[self]]:
constant[Delete this object.
:returns: The json response from the server.
]
variable[url] assign[=] call[name[self].reddit_session.config][constant[del]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b38e0>], [<ast.Attribute object at 0x7da20e9b05b0>]]
variable[response] assign[=] call[name[self].reddit_session.request_json, parameter[name[url]]]
call[name[self].reddit_session.evict, parameter[call[name[self].reddit_session.config][constant[user]]]]
return[name[response]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
identifier[url] = identifier[self] . identifier[reddit_session] . identifier[config] [ literal[string] ]
identifier[data] ={ literal[string] : identifier[self] . identifier[fullname] }
identifier[response] = identifier[self] . identifier[reddit_session] . identifier[request_json] ( identifier[url] , identifier[data] = identifier[data] )
identifier[self] . identifier[reddit_session] . identifier[evict] ( identifier[self] . identifier[reddit_session] . identifier[config] [ literal[string] ])
keyword[return] identifier[response] | def delete(self):
"""Delete this object.
:returns: The json response from the server.
"""
url = self.reddit_session.config['del']
data = {'id': self.fullname}
response = self.reddit_session.request_json(url, data=data)
self.reddit_session.evict(self.reddit_session.config['user'])
return response |
def delete(self):
"""Removes a node and all it's descendants."""
self.__class__.objects.filter(pk=self.pk).delete() | def function[delete, parameter[self]]:
constant[Removes a node and all it's descendants.]
call[call[name[self].__class__.objects.filter, parameter[]].delete, parameter[]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
identifier[self] . identifier[__class__] . identifier[objects] . identifier[filter] ( identifier[pk] = identifier[self] . identifier[pk] ). identifier[delete] () | def delete(self):
"""Removes a node and all it's descendants."""
self.__class__.objects.filter(pk=self.pk).delete() |
def save(self, *args, **kwargs):
"""Keep the unique order in sync."""
objects = self.get_filtered_manager()
old_pos = getattr(self, '_original_sort_order', None)
new_pos = self.sort_order
if old_pos is None and self._unique_togethers_changed():
self.sort_order = None
new_pos = None
try:
with transaction.atomic():
self._save(objects, old_pos, new_pos)
except IntegrityError:
with transaction.atomic():
old_pos = objects.filter(pk=self.pk).values_list(
'sort_order', flat=True)[0]
self._save(objects, old_pos, new_pos)
# Call the "real" save() method.
super(Orderable, self).save(*args, **kwargs) | def function[save, parameter[self]]:
constant[Keep the unique order in sync.]
variable[objects] assign[=] call[name[self].get_filtered_manager, parameter[]]
variable[old_pos] assign[=] call[name[getattr], parameter[name[self], constant[_original_sort_order], constant[None]]]
variable[new_pos] assign[=] name[self].sort_order
if <ast.BoolOp object at 0x7da18bcc9e70> begin[:]
name[self].sort_order assign[=] constant[None]
variable[new_pos] assign[=] constant[None]
<ast.Try object at 0x7da18bcc8670>
call[call[name[super], parameter[name[Orderable], name[self]]].save, parameter[<ast.Starred object at 0x7da2041d8520>]] | keyword[def] identifier[save] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[objects] = identifier[self] . identifier[get_filtered_manager] ()
identifier[old_pos] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] )
identifier[new_pos] = identifier[self] . identifier[sort_order]
keyword[if] identifier[old_pos] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[_unique_togethers_changed] ():
identifier[self] . identifier[sort_order] = keyword[None]
identifier[new_pos] = keyword[None]
keyword[try] :
keyword[with] identifier[transaction] . identifier[atomic] ():
identifier[self] . identifier[_save] ( identifier[objects] , identifier[old_pos] , identifier[new_pos] )
keyword[except] identifier[IntegrityError] :
keyword[with] identifier[transaction] . identifier[atomic] ():
identifier[old_pos] = identifier[objects] . identifier[filter] ( identifier[pk] = identifier[self] . identifier[pk] ). identifier[values_list] (
literal[string] , identifier[flat] = keyword[True] )[ literal[int] ]
identifier[self] . identifier[_save] ( identifier[objects] , identifier[old_pos] , identifier[new_pos] )
identifier[super] ( identifier[Orderable] , identifier[self] ). identifier[save] (* identifier[args] ,** identifier[kwargs] ) | def save(self, *args, **kwargs):
"""Keep the unique order in sync."""
objects = self.get_filtered_manager()
old_pos = getattr(self, '_original_sort_order', None)
new_pos = self.sort_order
if old_pos is None and self._unique_togethers_changed():
self.sort_order = None
new_pos = None # depends on [control=['if'], data=[]]
try:
with transaction.atomic():
self._save(objects, old_pos, new_pos) # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except IntegrityError:
with transaction.atomic():
old_pos = objects.filter(pk=self.pk).values_list('sort_order', flat=True)[0]
self._save(objects, old_pos, new_pos) # depends on [control=['with'], data=[]] # depends on [control=['except'], data=[]]
# Call the "real" save() method.
super(Orderable, self).save(*args, **kwargs) |
def fit(
self,
img_data,
save_freq=-1,
pic_freq=-1,
n_epochs=100,
batch_size=50,
weight_decay=True,
model_path='./GAN_training_model/',
img_path='./GAN_training_images/',
img_out_width=10,
mirroring=False
):
'''Fit the GAN model to the image data.
Parameters
----------
img_data : array-like shape (n_images, n_colors, image_width, image_height)
Images used to fit VAE model.
save_freq [optional] : int
Sets the number of epochs to wait before saving the model and optimizer states.
Also saves image files of randomly generated images using those states in a
separate directory. Does not save if negative valued.
pic_freq [optional] : int
Sets the number of batches to wait before displaying a picture or randomly
generated images using the current model state.
Does not display if negative valued.
n_epochs [optional] : int
Gives the number of training epochs to run through for the fitting
process.
batch_size [optional] : int
The size of the batch to use when training. Note: generally larger
batch sizes will result in fater epoch iteration, but at the const
of lower granulatity when updating the layer weights.
weight_decay [optional] : bool
Flag that controls adding weight decay hooks to the optimizer.
model_path [optional] : str
Directory where the model and optimizer state files will be saved.
img_path [optional] : str
Directory where the end of epoch training image files will be saved.
img_out_width : int
Controls the number of randomly genreated images per row in the output
saved imags.
mirroring [optional] : bool
Controls whether images are randomly mirrored along the verical axis with
a .5 probability. Artificially increases images variance for training set.
'''
width = img_out_width
self.dec_opt.setup(self.dec)
self.disc_opt.setup(self.disc)
if weight_decay:
self.dec_opt.add_hook(chainer.optimizer.WeightDecay(0.00001))
self.disc_opt.add_hook(chainer.optimizer.WeightDecay(0.00001))
n_data = img_data.shape[0]
batch_iter = list(range(0, n_data, batch_size))
n_batches = len(batch_iter)
c_samples = np.random.standard_normal((width, self.latent_width)).astype(np.float32)
save_counter = 0
for epoch in range(1, n_epochs + 1):
print('epoch: %i' % epoch)
t1 = time.time()
indexes = np.random.permutation(n_data)
last_loss_dec = 0.
last_loss_disc = 0.
count = 0
for i in tqdm.tqdm(batch_iter):
x = img_data[indexes[i: i + batch_size]]
size = x.shape[0]
if mirroring:
for j in range(size):
if np.random.randint(2):
x[j, :, :, :] = x[j, :, :, ::-1]
x_batch = Variable(x)
zeros = Variable(np.zeros(size, dtype=np.int32))
ones = Variable(np.ones(size, dtype=np.int32))
if self.flag_gpu:
x_batch.to_gpu()
zeros.to_gpu()
ones.to_gpu()
disc_samp, disc_batch = self._forward(x_batch)
L_dec = F.softmax_cross_entropy(disc_samp, ones)
L_disc = F.softmax_cross_entropy(disc_samp, zeros)
L_disc += F.softmax_cross_entropy(disc_batch, ones)
L_disc /= 2.
self.dec_opt.zero_grads()
L_dec.backward()
self.dec_opt.update()
self.disc_opt.zero_grads()
L_disc.backward()
self.disc_opt.update()
last_loss_dec += L_dec.data
last_loss_disc += L_disc.data
count += 1
if pic_freq > 0:
assert type(pic_freq) == int, "pic_freq must be an integer."
if count % pic_freq == 0:
fig = self._plot_img(
c_samples,
img_path=img_path,
epoch=epoch
)
display(fig)
if save_freq > 0:
save_counter += 1
assert type(save_freq) == int, "save_freq must be an integer."
if epoch % save_freq == 0:
name = "gan_epoch%s" % str(epoch)
if save_counter == 1:
save_meta = True
else:
save_meta = False
self.save(model_path, name, save_meta=save_meta)
fig = self._plot_img(
c_samples,
img_path=img_path,
epoch=epoch,
batch=n_batches,
save_pic=True
)
msg = "dec_loss = {0} , disc_loss = {1}"
print(msg.format(last_loss_dec/n_batches, last_loss_disc/n_batches))
t_diff = time.time()-t1
print("time: %f\n\n" % t_diff) | def function[fit, parameter[self, img_data, save_freq, pic_freq, n_epochs, batch_size, weight_decay, model_path, img_path, img_out_width, mirroring]]:
constant[Fit the GAN model to the image data.
Parameters
----------
img_data : array-like shape (n_images, n_colors, image_width, image_height)
Images used to fit VAE model.
save_freq [optional] : int
Sets the number of epochs to wait before saving the model and optimizer states.
Also saves image files of randomly generated images using those states in a
separate directory. Does not save if negative valued.
pic_freq [optional] : int
Sets the number of batches to wait before displaying a picture or randomly
generated images using the current model state.
Does not display if negative valued.
n_epochs [optional] : int
Gives the number of training epochs to run through for the fitting
process.
batch_size [optional] : int
The size of the batch to use when training. Note: generally larger
batch sizes will result in fater epoch iteration, but at the const
of lower granulatity when updating the layer weights.
weight_decay [optional] : bool
Flag that controls adding weight decay hooks to the optimizer.
model_path [optional] : str
Directory where the model and optimizer state files will be saved.
img_path [optional] : str
Directory where the end of epoch training image files will be saved.
img_out_width : int
Controls the number of randomly genreated images per row in the output
saved imags.
mirroring [optional] : bool
Controls whether images are randomly mirrored along the verical axis with
a .5 probability. Artificially increases images variance for training set.
]
variable[width] assign[=] name[img_out_width]
call[name[self].dec_opt.setup, parameter[name[self].dec]]
call[name[self].disc_opt.setup, parameter[name[self].disc]]
if name[weight_decay] begin[:]
call[name[self].dec_opt.add_hook, parameter[call[name[chainer].optimizer.WeightDecay, parameter[constant[1e-05]]]]]
call[name[self].disc_opt.add_hook, parameter[call[name[chainer].optimizer.WeightDecay, parameter[constant[1e-05]]]]]
variable[n_data] assign[=] call[name[img_data].shape][constant[0]]
variable[batch_iter] assign[=] call[name[list], parameter[call[name[range], parameter[constant[0], name[n_data], name[batch_size]]]]]
variable[n_batches] assign[=] call[name[len], parameter[name[batch_iter]]]
variable[c_samples] assign[=] call[call[name[np].random.standard_normal, parameter[tuple[[<ast.Name object at 0x7da1b18482b0>, <ast.Attribute object at 0x7da1b184b250>]]]].astype, parameter[name[np].float32]]
variable[save_counter] assign[=] constant[0]
for taget[name[epoch]] in starred[call[name[range], parameter[constant[1], binary_operation[name[n_epochs] + constant[1]]]]] begin[:]
call[name[print], parameter[binary_operation[constant[epoch: %i] <ast.Mod object at 0x7da2590d6920> name[epoch]]]]
variable[t1] assign[=] call[name[time].time, parameter[]]
variable[indexes] assign[=] call[name[np].random.permutation, parameter[name[n_data]]]
variable[last_loss_dec] assign[=] constant[0.0]
variable[last_loss_disc] assign[=] constant[0.0]
variable[count] assign[=] constant[0]
for taget[name[i]] in starred[call[name[tqdm].tqdm, parameter[name[batch_iter]]]] begin[:]
variable[x] assign[=] call[name[img_data]][call[name[indexes]][<ast.Slice object at 0x7da18dc98250>]]
variable[size] assign[=] call[name[x].shape][constant[0]]
if name[mirroring] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[name[size]]]] begin[:]
if call[name[np].random.randint, parameter[constant[2]]] begin[:]
call[name[x]][tuple[[<ast.Name object at 0x7da18dc9ada0>, <ast.Slice object at 0x7da18dc9afb0>, <ast.Slice object at 0x7da18dc9b490>, <ast.Slice object at 0x7da18dc98460>]]] assign[=] call[name[x]][tuple[[<ast.Name object at 0x7da18dc9a2c0>, <ast.Slice object at 0x7da18dc987f0>, <ast.Slice object at 0x7da18dc98220>, <ast.Slice object at 0x7da18dc98040>]]]
variable[x_batch] assign[=] call[name[Variable], parameter[name[x]]]
variable[zeros] assign[=] call[name[Variable], parameter[call[name[np].zeros, parameter[name[size]]]]]
variable[ones] assign[=] call[name[Variable], parameter[call[name[np].ones, parameter[name[size]]]]]
if name[self].flag_gpu begin[:]
call[name[x_batch].to_gpu, parameter[]]
call[name[zeros].to_gpu, parameter[]]
call[name[ones].to_gpu, parameter[]]
<ast.Tuple object at 0x7da18dc99bd0> assign[=] call[name[self]._forward, parameter[name[x_batch]]]
variable[L_dec] assign[=] call[name[F].softmax_cross_entropy, parameter[name[disc_samp], name[ones]]]
variable[L_disc] assign[=] call[name[F].softmax_cross_entropy, parameter[name[disc_samp], name[zeros]]]
<ast.AugAssign object at 0x7da20c6c4580>
<ast.AugAssign object at 0x7da20c6c4a00>
call[name[self].dec_opt.zero_grads, parameter[]]
call[name[L_dec].backward, parameter[]]
call[name[self].dec_opt.update, parameter[]]
call[name[self].disc_opt.zero_grads, parameter[]]
call[name[L_disc].backward, parameter[]]
call[name[self].disc_opt.update, parameter[]]
<ast.AugAssign object at 0x7da20c6c5240>
<ast.AugAssign object at 0x7da20c6c48b0>
<ast.AugAssign object at 0x7da20c6c7fa0>
if compare[name[pic_freq] greater[>] constant[0]] begin[:]
assert[compare[call[name[type], parameter[name[pic_freq]]] equal[==] name[int]]]
if compare[binary_operation[name[count] <ast.Mod object at 0x7da2590d6920> name[pic_freq]] equal[==] constant[0]] begin[:]
variable[fig] assign[=] call[name[self]._plot_img, parameter[name[c_samples]]]
call[name[display], parameter[name[fig]]]
if compare[name[save_freq] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20c6c77c0>
assert[compare[call[name[type], parameter[name[save_freq]]] equal[==] name[int]]]
if compare[binary_operation[name[epoch] <ast.Mod object at 0x7da2590d6920> name[save_freq]] equal[==] constant[0]] begin[:]
variable[name] assign[=] binary_operation[constant[gan_epoch%s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[epoch]]]]
if compare[name[save_counter] equal[==] constant[1]] begin[:]
variable[save_meta] assign[=] constant[True]
call[name[self].save, parameter[name[model_path], name[name]]]
variable[fig] assign[=] call[name[self]._plot_img, parameter[name[c_samples]]]
variable[msg] assign[=] constant[dec_loss = {0} , disc_loss = {1}]
call[name[print], parameter[call[name[msg].format, parameter[binary_operation[name[last_loss_dec] / name[n_batches]], binary_operation[name[last_loss_disc] / name[n_batches]]]]]]
variable[t_diff] assign[=] binary_operation[call[name[time].time, parameter[]] - name[t1]]
call[name[print], parameter[binary_operation[constant[time: %f
] <ast.Mod object at 0x7da2590d6920> name[t_diff]]]] | keyword[def] identifier[fit] (
identifier[self] ,
identifier[img_data] ,
identifier[save_freq] =- literal[int] ,
identifier[pic_freq] =- literal[int] ,
identifier[n_epochs] = literal[int] ,
identifier[batch_size] = literal[int] ,
identifier[weight_decay] = keyword[True] ,
identifier[model_path] = literal[string] ,
identifier[img_path] = literal[string] ,
identifier[img_out_width] = literal[int] ,
identifier[mirroring] = keyword[False]
):
literal[string]
identifier[width] = identifier[img_out_width]
identifier[self] . identifier[dec_opt] . identifier[setup] ( identifier[self] . identifier[dec] )
identifier[self] . identifier[disc_opt] . identifier[setup] ( identifier[self] . identifier[disc] )
keyword[if] identifier[weight_decay] :
identifier[self] . identifier[dec_opt] . identifier[add_hook] ( identifier[chainer] . identifier[optimizer] . identifier[WeightDecay] ( literal[int] ))
identifier[self] . identifier[disc_opt] . identifier[add_hook] ( identifier[chainer] . identifier[optimizer] . identifier[WeightDecay] ( literal[int] ))
identifier[n_data] = identifier[img_data] . identifier[shape] [ literal[int] ]
identifier[batch_iter] = identifier[list] ( identifier[range] ( literal[int] , identifier[n_data] , identifier[batch_size] ))
identifier[n_batches] = identifier[len] ( identifier[batch_iter] )
identifier[c_samples] = identifier[np] . identifier[random] . identifier[standard_normal] (( identifier[width] , identifier[self] . identifier[latent_width] )). identifier[astype] ( identifier[np] . identifier[float32] )
identifier[save_counter] = literal[int]
keyword[for] identifier[epoch] keyword[in] identifier[range] ( literal[int] , identifier[n_epochs] + literal[int] ):
identifier[print] ( literal[string] % identifier[epoch] )
identifier[t1] = identifier[time] . identifier[time] ()
identifier[indexes] = identifier[np] . identifier[random] . identifier[permutation] ( identifier[n_data] )
identifier[last_loss_dec] = literal[int]
identifier[last_loss_disc] = literal[int]
identifier[count] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[tqdm] . identifier[tqdm] ( identifier[batch_iter] ):
identifier[x] = identifier[img_data] [ identifier[indexes] [ identifier[i] : identifier[i] + identifier[batch_size] ]]
identifier[size] = identifier[x] . identifier[shape] [ literal[int] ]
keyword[if] identifier[mirroring] :
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[size] ):
keyword[if] identifier[np] . identifier[random] . identifier[randint] ( literal[int] ):
identifier[x] [ identifier[j] ,:,:,:]= identifier[x] [ identifier[j] ,:,:,::- literal[int] ]
identifier[x_batch] = identifier[Variable] ( identifier[x] )
identifier[zeros] = identifier[Variable] ( identifier[np] . identifier[zeros] ( identifier[size] , identifier[dtype] = identifier[np] . identifier[int32] ))
identifier[ones] = identifier[Variable] ( identifier[np] . identifier[ones] ( identifier[size] , identifier[dtype] = identifier[np] . identifier[int32] ))
keyword[if] identifier[self] . identifier[flag_gpu] :
identifier[x_batch] . identifier[to_gpu] ()
identifier[zeros] . identifier[to_gpu] ()
identifier[ones] . identifier[to_gpu] ()
identifier[disc_samp] , identifier[disc_batch] = identifier[self] . identifier[_forward] ( identifier[x_batch] )
identifier[L_dec] = identifier[F] . identifier[softmax_cross_entropy] ( identifier[disc_samp] , identifier[ones] )
identifier[L_disc] = identifier[F] . identifier[softmax_cross_entropy] ( identifier[disc_samp] , identifier[zeros] )
identifier[L_disc] += identifier[F] . identifier[softmax_cross_entropy] ( identifier[disc_batch] , identifier[ones] )
identifier[L_disc] /= literal[int]
identifier[self] . identifier[dec_opt] . identifier[zero_grads] ()
identifier[L_dec] . identifier[backward] ()
identifier[self] . identifier[dec_opt] . identifier[update] ()
identifier[self] . identifier[disc_opt] . identifier[zero_grads] ()
identifier[L_disc] . identifier[backward] ()
identifier[self] . identifier[disc_opt] . identifier[update] ()
identifier[last_loss_dec] += identifier[L_dec] . identifier[data]
identifier[last_loss_disc] += identifier[L_disc] . identifier[data]
identifier[count] += literal[int]
keyword[if] identifier[pic_freq] > literal[int] :
keyword[assert] identifier[type] ( identifier[pic_freq] )== identifier[int] , literal[string]
keyword[if] identifier[count] % identifier[pic_freq] == literal[int] :
identifier[fig] = identifier[self] . identifier[_plot_img] (
identifier[c_samples] ,
identifier[img_path] = identifier[img_path] ,
identifier[epoch] = identifier[epoch]
)
identifier[display] ( identifier[fig] )
keyword[if] identifier[save_freq] > literal[int] :
identifier[save_counter] += literal[int]
keyword[assert] identifier[type] ( identifier[save_freq] )== identifier[int] , literal[string]
keyword[if] identifier[epoch] % identifier[save_freq] == literal[int] :
identifier[name] = literal[string] % identifier[str] ( identifier[epoch] )
keyword[if] identifier[save_counter] == literal[int] :
identifier[save_meta] = keyword[True]
keyword[else] :
identifier[save_meta] = keyword[False]
identifier[self] . identifier[save] ( identifier[model_path] , identifier[name] , identifier[save_meta] = identifier[save_meta] )
identifier[fig] = identifier[self] . identifier[_plot_img] (
identifier[c_samples] ,
identifier[img_path] = identifier[img_path] ,
identifier[epoch] = identifier[epoch] ,
identifier[batch] = identifier[n_batches] ,
identifier[save_pic] = keyword[True]
)
identifier[msg] = literal[string]
identifier[print] ( identifier[msg] . identifier[format] ( identifier[last_loss_dec] / identifier[n_batches] , identifier[last_loss_disc] / identifier[n_batches] ))
identifier[t_diff] = identifier[time] . identifier[time] ()- identifier[t1]
identifier[print] ( literal[string] % identifier[t_diff] ) | def fit(self, img_data, save_freq=-1, pic_freq=-1, n_epochs=100, batch_size=50, weight_decay=True, model_path='./GAN_training_model/', img_path='./GAN_training_images/', img_out_width=10, mirroring=False):
"""Fit the GAN model to the image data.
Parameters
----------
img_data : array-like shape (n_images, n_colors, image_width, image_height)
Images used to fit VAE model.
save_freq [optional] : int
Sets the number of epochs to wait before saving the model and optimizer states.
Also saves image files of randomly generated images using those states in a
separate directory. Does not save if negative valued.
pic_freq [optional] : int
Sets the number of batches to wait before displaying a picture or randomly
generated images using the current model state.
Does not display if negative valued.
n_epochs [optional] : int
Gives the number of training epochs to run through for the fitting
process.
batch_size [optional] : int
The size of the batch to use when training. Note: generally larger
batch sizes will result in fater epoch iteration, but at the const
of lower granulatity when updating the layer weights.
weight_decay [optional] : bool
Flag that controls adding weight decay hooks to the optimizer.
model_path [optional] : str
Directory where the model and optimizer state files will be saved.
img_path [optional] : str
Directory where the end of epoch training image files will be saved.
img_out_width : int
Controls the number of randomly genreated images per row in the output
saved imags.
mirroring [optional] : bool
Controls whether images are randomly mirrored along the verical axis with
a .5 probability. Artificially increases images variance for training set.
"""
width = img_out_width
self.dec_opt.setup(self.dec)
self.disc_opt.setup(self.disc)
if weight_decay:
self.dec_opt.add_hook(chainer.optimizer.WeightDecay(1e-05))
self.disc_opt.add_hook(chainer.optimizer.WeightDecay(1e-05)) # depends on [control=['if'], data=[]]
n_data = img_data.shape[0]
batch_iter = list(range(0, n_data, batch_size))
n_batches = len(batch_iter)
c_samples = np.random.standard_normal((width, self.latent_width)).astype(np.float32)
save_counter = 0
for epoch in range(1, n_epochs + 1):
print('epoch: %i' % epoch)
t1 = time.time()
indexes = np.random.permutation(n_data)
last_loss_dec = 0.0
last_loss_disc = 0.0
count = 0
for i in tqdm.tqdm(batch_iter):
x = img_data[indexes[i:i + batch_size]]
size = x.shape[0]
if mirroring:
for j in range(size):
if np.random.randint(2):
x[j, :, :, :] = x[j, :, :, ::-1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]]
x_batch = Variable(x)
zeros = Variable(np.zeros(size, dtype=np.int32))
ones = Variable(np.ones(size, dtype=np.int32))
if self.flag_gpu:
x_batch.to_gpu()
zeros.to_gpu()
ones.to_gpu() # depends on [control=['if'], data=[]]
(disc_samp, disc_batch) = self._forward(x_batch)
L_dec = F.softmax_cross_entropy(disc_samp, ones)
L_disc = F.softmax_cross_entropy(disc_samp, zeros)
L_disc += F.softmax_cross_entropy(disc_batch, ones)
L_disc /= 2.0
self.dec_opt.zero_grads()
L_dec.backward()
self.dec_opt.update()
self.disc_opt.zero_grads()
L_disc.backward()
self.disc_opt.update()
last_loss_dec += L_dec.data
last_loss_disc += L_disc.data
count += 1
if pic_freq > 0:
assert type(pic_freq) == int, 'pic_freq must be an integer.'
if count % pic_freq == 0:
fig = self._plot_img(c_samples, img_path=img_path, epoch=epoch)
display(fig) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['pic_freq']] # depends on [control=['for'], data=['i']]
if save_freq > 0:
save_counter += 1
assert type(save_freq) == int, 'save_freq must be an integer.'
if epoch % save_freq == 0:
name = 'gan_epoch%s' % str(epoch)
if save_counter == 1:
save_meta = True # depends on [control=['if'], data=[]]
else:
save_meta = False
self.save(model_path, name, save_meta=save_meta)
fig = self._plot_img(c_samples, img_path=img_path, epoch=epoch, batch=n_batches, save_pic=True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['save_freq']]
msg = 'dec_loss = {0} , disc_loss = {1}'
print(msg.format(last_loss_dec / n_batches, last_loss_disc / n_batches))
t_diff = time.time() - t1
print('time: %f\n\n' % t_diff) # depends on [control=['for'], data=['epoch']] |
def execute(self):
"""
Decode, and execute one instruction pointed by register PC
"""
if issymbolic(self.PC):
raise ConcretizeRegister(self, 'PC', policy='ALL')
if not self.memory.access_ok(self.PC, 'x'):
raise InvalidMemoryAccess(self.PC, 'x')
self._publish('will_decode_instruction', self.PC)
insn = self.decode_instruction(self.PC)
self._last_pc = self.PC
self._publish('will_execute_instruction', self.PC, insn)
# FIXME (theo) why just return here?
if insn.address != self.PC:
return
name = self.canonicalize_instruction_name(insn)
if logger.level == logging.DEBUG:
logger.debug(self.render_instruction(insn))
for l in self.render_registers():
register_logger.debug(l)
try:
if self._concrete and 'SYSCALL' in name:
self.emu.sync_unicorn_to_manticore()
if self._concrete and 'SYSCALL' not in name:
self.emulate(insn)
if self.PC == self._break_unicorn_at:
logger.debug("Switching from Unicorn to Manticore")
self._break_unicorn_at = None
self._concrete = False
else:
implementation = getattr(self, name, None)
if implementation is not None:
implementation(*insn.operands)
else:
text_bytes = ' '.join('%02x' % x for x in insn.bytes)
logger.warning("Unimplemented instruction: 0x%016x:\t%s\t%s\t%s",
insn.address, text_bytes, insn.mnemonic, insn.op_str)
self.backup_emulate(insn)
except (Interruption, Syscall) as e:
e.on_handled = lambda: self._publish_instruction_as_executed(insn)
raise e
else:
self._publish_instruction_as_executed(insn) | def function[execute, parameter[self]]:
constant[
Decode, and execute one instruction pointed by register PC
]
if call[name[issymbolic], parameter[name[self].PC]] begin[:]
<ast.Raise object at 0x7da20e9579a0>
if <ast.UnaryOp object at 0x7da20e9542e0> begin[:]
<ast.Raise object at 0x7da20e954d00>
call[name[self]._publish, parameter[constant[will_decode_instruction], name[self].PC]]
variable[insn] assign[=] call[name[self].decode_instruction, parameter[name[self].PC]]
name[self]._last_pc assign[=] name[self].PC
call[name[self]._publish, parameter[constant[will_execute_instruction], name[self].PC, name[insn]]]
if compare[name[insn].address not_equal[!=] name[self].PC] begin[:]
return[None]
variable[name] assign[=] call[name[self].canonicalize_instruction_name, parameter[name[insn]]]
if compare[name[logger].level equal[==] name[logging].DEBUG] begin[:]
call[name[logger].debug, parameter[call[name[self].render_instruction, parameter[name[insn]]]]]
for taget[name[l]] in starred[call[name[self].render_registers, parameter[]]] begin[:]
call[name[register_logger].debug, parameter[name[l]]]
<ast.Try object at 0x7da20e9540d0> | keyword[def] identifier[execute] ( identifier[self] ):
literal[string]
keyword[if] identifier[issymbolic] ( identifier[self] . identifier[PC] ):
keyword[raise] identifier[ConcretizeRegister] ( identifier[self] , literal[string] , identifier[policy] = literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[memory] . identifier[access_ok] ( identifier[self] . identifier[PC] , literal[string] ):
keyword[raise] identifier[InvalidMemoryAccess] ( identifier[self] . identifier[PC] , literal[string] )
identifier[self] . identifier[_publish] ( literal[string] , identifier[self] . identifier[PC] )
identifier[insn] = identifier[self] . identifier[decode_instruction] ( identifier[self] . identifier[PC] )
identifier[self] . identifier[_last_pc] = identifier[self] . identifier[PC]
identifier[self] . identifier[_publish] ( literal[string] , identifier[self] . identifier[PC] , identifier[insn] )
keyword[if] identifier[insn] . identifier[address] != identifier[self] . identifier[PC] :
keyword[return]
identifier[name] = identifier[self] . identifier[canonicalize_instruction_name] ( identifier[insn] )
keyword[if] identifier[logger] . identifier[level] == identifier[logging] . identifier[DEBUG] :
identifier[logger] . identifier[debug] ( identifier[self] . identifier[render_instruction] ( identifier[insn] ))
keyword[for] identifier[l] keyword[in] identifier[self] . identifier[render_registers] ():
identifier[register_logger] . identifier[debug] ( identifier[l] )
keyword[try] :
keyword[if] identifier[self] . identifier[_concrete] keyword[and] literal[string] keyword[in] identifier[name] :
identifier[self] . identifier[emu] . identifier[sync_unicorn_to_manticore] ()
keyword[if] identifier[self] . identifier[_concrete] keyword[and] literal[string] keyword[not] keyword[in] identifier[name] :
identifier[self] . identifier[emulate] ( identifier[insn] )
keyword[if] identifier[self] . identifier[PC] == identifier[self] . identifier[_break_unicorn_at] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_break_unicorn_at] = keyword[None]
identifier[self] . identifier[_concrete] = keyword[False]
keyword[else] :
identifier[implementation] = identifier[getattr] ( identifier[self] , identifier[name] , keyword[None] )
keyword[if] identifier[implementation] keyword[is] keyword[not] keyword[None] :
identifier[implementation] (* identifier[insn] . identifier[operands] )
keyword[else] :
identifier[text_bytes] = literal[string] . identifier[join] ( literal[string] % identifier[x] keyword[for] identifier[x] keyword[in] identifier[insn] . identifier[bytes] )
identifier[logger] . identifier[warning] ( literal[string] ,
identifier[insn] . identifier[address] , identifier[text_bytes] , identifier[insn] . identifier[mnemonic] , identifier[insn] . identifier[op_str] )
identifier[self] . identifier[backup_emulate] ( identifier[insn] )
keyword[except] ( identifier[Interruption] , identifier[Syscall] ) keyword[as] identifier[e] :
identifier[e] . identifier[on_handled] = keyword[lambda] : identifier[self] . identifier[_publish_instruction_as_executed] ( identifier[insn] )
keyword[raise] identifier[e]
keyword[else] :
identifier[self] . identifier[_publish_instruction_as_executed] ( identifier[insn] ) | def execute(self):
"""
Decode, and execute one instruction pointed by register PC
"""
if issymbolic(self.PC):
raise ConcretizeRegister(self, 'PC', policy='ALL') # depends on [control=['if'], data=[]]
if not self.memory.access_ok(self.PC, 'x'):
raise InvalidMemoryAccess(self.PC, 'x') # depends on [control=['if'], data=[]]
self._publish('will_decode_instruction', self.PC)
insn = self.decode_instruction(self.PC)
self._last_pc = self.PC
self._publish('will_execute_instruction', self.PC, insn)
# FIXME (theo) why just return here?
if insn.address != self.PC:
return # depends on [control=['if'], data=[]]
name = self.canonicalize_instruction_name(insn)
if logger.level == logging.DEBUG:
logger.debug(self.render_instruction(insn))
for l in self.render_registers():
register_logger.debug(l) # depends on [control=['for'], data=['l']] # depends on [control=['if'], data=[]]
try:
if self._concrete and 'SYSCALL' in name:
self.emu.sync_unicorn_to_manticore() # depends on [control=['if'], data=[]]
if self._concrete and 'SYSCALL' not in name:
self.emulate(insn)
if self.PC == self._break_unicorn_at:
logger.debug('Switching from Unicorn to Manticore')
self._break_unicorn_at = None
self._concrete = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
implementation = getattr(self, name, None)
if implementation is not None:
implementation(*insn.operands) # depends on [control=['if'], data=['implementation']]
else:
text_bytes = ' '.join(('%02x' % x for x in insn.bytes))
logger.warning('Unimplemented instruction: 0x%016x:\t%s\t%s\t%s', insn.address, text_bytes, insn.mnemonic, insn.op_str)
self.backup_emulate(insn) # depends on [control=['try'], data=[]]
except (Interruption, Syscall) as e:
e.on_handled = lambda : self._publish_instruction_as_executed(insn)
raise e # depends on [control=['except'], data=['e']]
else:
self._publish_instruction_as_executed(insn) |
def check_format(self, sm_format):
"""
Return ``True`` if the given sync map format is allowed,
and ``False`` otherwise.
:param sm_format: the sync map format to be checked
:type sm_format: Unicode string
:rtype: bool
"""
if sm_format not in SyncMapFormat.ALLOWED_VALUES:
self.print_error(u"Sync map format '%s' is not allowed" % (sm_format))
self.print_info(u"Allowed formats:")
self.print_generic(u" ".join(SyncMapFormat.ALLOWED_VALUES))
return False
return True | def function[check_format, parameter[self, sm_format]]:
constant[
Return ``True`` if the given sync map format is allowed,
and ``False`` otherwise.
:param sm_format: the sync map format to be checked
:type sm_format: Unicode string
:rtype: bool
]
if compare[name[sm_format] <ast.NotIn object at 0x7da2590d7190> name[SyncMapFormat].ALLOWED_VALUES] begin[:]
call[name[self].print_error, parameter[binary_operation[constant[Sync map format '%s' is not allowed] <ast.Mod object at 0x7da2590d6920> name[sm_format]]]]
call[name[self].print_info, parameter[constant[Allowed formats:]]]
call[name[self].print_generic, parameter[call[constant[ ].join, parameter[name[SyncMapFormat].ALLOWED_VALUES]]]]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[check_format] ( identifier[self] , identifier[sm_format] ):
literal[string]
keyword[if] identifier[sm_format] keyword[not] keyword[in] identifier[SyncMapFormat] . identifier[ALLOWED_VALUES] :
identifier[self] . identifier[print_error] ( literal[string] %( identifier[sm_format] ))
identifier[self] . identifier[print_info] ( literal[string] )
identifier[self] . identifier[print_generic] ( literal[string] . identifier[join] ( identifier[SyncMapFormat] . identifier[ALLOWED_VALUES] ))
keyword[return] keyword[False]
keyword[return] keyword[True] | def check_format(self, sm_format):
"""
Return ``True`` if the given sync map format is allowed,
and ``False`` otherwise.
:param sm_format: the sync map format to be checked
:type sm_format: Unicode string
:rtype: bool
"""
if sm_format not in SyncMapFormat.ALLOWED_VALUES:
self.print_error(u"Sync map format '%s' is not allowed" % sm_format)
self.print_info(u'Allowed formats:')
self.print_generic(u' '.join(SyncMapFormat.ALLOWED_VALUES))
return False # depends on [control=['if'], data=['sm_format']]
return True |
def create_logout_response(self, request, bindings=None, status=None,
sign=False, issuer=None, sign_alg=None,
digest_alg=None):
""" Create a LogoutResponse.
:param request: The request this is a response to
:param bindings: Which bindings that can be used for the response
If None the preferred bindings are gathered from the configuration
:param status: The return status of the response operation
If None the operation is regarded as a Success.
:param issuer: The issuer of the message
:return: HTTP args
"""
rinfo = self.response_args(request, bindings)
if not issuer:
issuer = self._issuer()
response = self._status_response(samlp.LogoutResponse, issuer, status,
sign, sign_alg=sign_alg,
digest_alg=digest_alg, **rinfo)
logger.info("Response: %s", response)
return response | def function[create_logout_response, parameter[self, request, bindings, status, sign, issuer, sign_alg, digest_alg]]:
constant[ Create a LogoutResponse.
:param request: The request this is a response to
:param bindings: Which bindings that can be used for the response
If None the preferred bindings are gathered from the configuration
:param status: The return status of the response operation
If None the operation is regarded as a Success.
:param issuer: The issuer of the message
:return: HTTP args
]
variable[rinfo] assign[=] call[name[self].response_args, parameter[name[request], name[bindings]]]
if <ast.UnaryOp object at 0x7da1b20b64a0> begin[:]
variable[issuer] assign[=] call[name[self]._issuer, parameter[]]
variable[response] assign[=] call[name[self]._status_response, parameter[name[samlp].LogoutResponse, name[issuer], name[status], name[sign]]]
call[name[logger].info, parameter[constant[Response: %s], name[response]]]
return[name[response]] | keyword[def] identifier[create_logout_response] ( identifier[self] , identifier[request] , identifier[bindings] = keyword[None] , identifier[status] = keyword[None] ,
identifier[sign] = keyword[False] , identifier[issuer] = keyword[None] , identifier[sign_alg] = keyword[None] ,
identifier[digest_alg] = keyword[None] ):
literal[string]
identifier[rinfo] = identifier[self] . identifier[response_args] ( identifier[request] , identifier[bindings] )
keyword[if] keyword[not] identifier[issuer] :
identifier[issuer] = identifier[self] . identifier[_issuer] ()
identifier[response] = identifier[self] . identifier[_status_response] ( identifier[samlp] . identifier[LogoutResponse] , identifier[issuer] , identifier[status] ,
identifier[sign] , identifier[sign_alg] = identifier[sign_alg] ,
identifier[digest_alg] = identifier[digest_alg] ,** identifier[rinfo] )
identifier[logger] . identifier[info] ( literal[string] , identifier[response] )
keyword[return] identifier[response] | def create_logout_response(self, request, bindings=None, status=None, sign=False, issuer=None, sign_alg=None, digest_alg=None):
""" Create a LogoutResponse.
:param request: The request this is a response to
:param bindings: Which bindings that can be used for the response
If None the preferred bindings are gathered from the configuration
:param status: The return status of the response operation
If None the operation is regarded as a Success.
:param issuer: The issuer of the message
:return: HTTP args
"""
rinfo = self.response_args(request, bindings)
if not issuer:
issuer = self._issuer() # depends on [control=['if'], data=[]]
response = self._status_response(samlp.LogoutResponse, issuer, status, sign, sign_alg=sign_alg, digest_alg=digest_alg, **rinfo)
logger.info('Response: %s', response)
return response |
def set_alarm_state(self, state):
"""
:param state: a boolean of ture (on) or false ('off')
:return: nothing
"""
values = {"desired_state": {"alarm_enabled": state}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) | def function[set_alarm_state, parameter[self, state]]:
constant[
:param state: a boolean of ture (on) or false ('off')
:return: nothing
]
variable[values] assign[=] dictionary[[<ast.Constant object at 0x7da1b255fac0>], [<ast.Dict object at 0x7da1b255e7d0>]]
variable[response] assign[=] call[name[self].api_interface.set_device_state, parameter[name[self], name[values]]]
call[name[self]._update_state_from_response, parameter[name[response]]] | keyword[def] identifier[set_alarm_state] ( identifier[self] , identifier[state] ):
literal[string]
identifier[values] ={ literal[string] :{ literal[string] : identifier[state] }}
identifier[response] = identifier[self] . identifier[api_interface] . identifier[set_device_state] ( identifier[self] , identifier[values] )
identifier[self] . identifier[_update_state_from_response] ( identifier[response] ) | def set_alarm_state(self, state):
"""
:param state: a boolean of ture (on) or false ('off')
:return: nothing
"""
values = {'desired_state': {'alarm_enabled': state}}
response = self.api_interface.set_device_state(self, values)
self._update_state_from_response(response) |
def decode(self, probs, sizes=None):
"""
Returns the argmax decoding given the probability matrix. Removes
repeated elements in the sequence, as well as blanks.
Arguments:
probs: Tensor of character probabilities from the network. Expected shape of seq_length x batch x output_dim
sizes(optional): Size of each sequence in the mini-batch
Returns:
strings: sequences of the model's best guess for the transcription on inputs
"""
_, max_probs = torch.max(probs.transpose(0, 1), 2)
strings = self.convert_to_strings(max_probs.view(max_probs.size(0), max_probs.size(1)), sizes)
return self.process_strings(strings, remove_repetitions=True) | def function[decode, parameter[self, probs, sizes]]:
constant[
Returns the argmax decoding given the probability matrix. Removes
repeated elements in the sequence, as well as blanks.
Arguments:
probs: Tensor of character probabilities from the network. Expected shape of seq_length x batch x output_dim
sizes(optional): Size of each sequence in the mini-batch
Returns:
strings: sequences of the model's best guess for the transcription on inputs
]
<ast.Tuple object at 0x7da18dc069b0> assign[=] call[name[torch].max, parameter[call[name[probs].transpose, parameter[constant[0], constant[1]]], constant[2]]]
variable[strings] assign[=] call[name[self].convert_to_strings, parameter[call[name[max_probs].view, parameter[call[name[max_probs].size, parameter[constant[0]]], call[name[max_probs].size, parameter[constant[1]]]]], name[sizes]]]
return[call[name[self].process_strings, parameter[name[strings]]]] | keyword[def] identifier[decode] ( identifier[self] , identifier[probs] , identifier[sizes] = keyword[None] ):
literal[string]
identifier[_] , identifier[max_probs] = identifier[torch] . identifier[max] ( identifier[probs] . identifier[transpose] ( literal[int] , literal[int] ), literal[int] )
identifier[strings] = identifier[self] . identifier[convert_to_strings] ( identifier[max_probs] . identifier[view] ( identifier[max_probs] . identifier[size] ( literal[int] ), identifier[max_probs] . identifier[size] ( literal[int] )), identifier[sizes] )
keyword[return] identifier[self] . identifier[process_strings] ( identifier[strings] , identifier[remove_repetitions] = keyword[True] ) | def decode(self, probs, sizes=None):
"""
Returns the argmax decoding given the probability matrix. Removes
repeated elements in the sequence, as well as blanks.
Arguments:
probs: Tensor of character probabilities from the network. Expected shape of seq_length x batch x output_dim
sizes(optional): Size of each sequence in the mini-batch
Returns:
strings: sequences of the model's best guess for the transcription on inputs
"""
(_, max_probs) = torch.max(probs.transpose(0, 1), 2)
strings = self.convert_to_strings(max_probs.view(max_probs.size(0), max_probs.size(1)), sizes)
return self.process_strings(strings, remove_repetitions=True) |
def _find_ramps(self) -> List[Ramp]:
"""Calculate (self.pathing_grid - self.placement_grid) (for sets) and then find ramps by comparing heights."""
rampDict = {
Point2((x, y)): self.pathing_grid[(x, y)] == 0 and self.placement_grid[(x, y)] == 0
for x in range(self.pathing_grid.width)
for y in range(self.pathing_grid.height)
}
rampPoints = {p for p in rampDict if rampDict[p]} # filter only points part of ramp
rampGroups = self._find_groups(rampPoints)
return [Ramp(group, self) for group in rampGroups] | def function[_find_ramps, parameter[self]]:
constant[Calculate (self.pathing_grid - self.placement_grid) (for sets) and then find ramps by comparing heights.]
variable[rampDict] assign[=] <ast.DictComp object at 0x7da20c6e72e0>
variable[rampPoints] assign[=] <ast.SetComp object at 0x7da18bc70fd0>
variable[rampGroups] assign[=] call[name[self]._find_groups, parameter[name[rampPoints]]]
return[<ast.ListComp object at 0x7da18bc70fa0>] | keyword[def] identifier[_find_ramps] ( identifier[self] )-> identifier[List] [ identifier[Ramp] ]:
literal[string]
identifier[rampDict] ={
identifier[Point2] (( identifier[x] , identifier[y] )): identifier[self] . identifier[pathing_grid] [( identifier[x] , identifier[y] )]== literal[int] keyword[and] identifier[self] . identifier[placement_grid] [( identifier[x] , identifier[y] )]== literal[int]
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[self] . identifier[pathing_grid] . identifier[width] )
keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[self] . identifier[pathing_grid] . identifier[height] )
}
identifier[rampPoints] ={ identifier[p] keyword[for] identifier[p] keyword[in] identifier[rampDict] keyword[if] identifier[rampDict] [ identifier[p] ]}
identifier[rampGroups] = identifier[self] . identifier[_find_groups] ( identifier[rampPoints] )
keyword[return] [ identifier[Ramp] ( identifier[group] , identifier[self] ) keyword[for] identifier[group] keyword[in] identifier[rampGroups] ] | def _find_ramps(self) -> List[Ramp]:
"""Calculate (self.pathing_grid - self.placement_grid) (for sets) and then find ramps by comparing heights."""
rampDict = {Point2((x, y)): self.pathing_grid[x, y] == 0 and self.placement_grid[x, y] == 0 for x in range(self.pathing_grid.width) for y in range(self.pathing_grid.height)}
rampPoints = {p for p in rampDict if rampDict[p]} # filter only points part of ramp
rampGroups = self._find_groups(rampPoints)
return [Ramp(group, self) for group in rampGroups] |
def exit(self, timeperiods, hosts, services):
"""Remove ref in scheduled downtime and raise downtime log entry (exit)
:param hosts: hosts objects to get item ref
:type hosts: alignak.objects.host.Hosts
:param services: services objects to get item ref
:type services: alignak.objects.service.Services
:return: [], always | None
:rtype: list
"""
if self.ref in hosts:
item = hosts[self.ref]
else:
item = services[self.ref]
broks = []
# If not is_in_effect means that ot was probably a flexible downtime which was
# not triggered. In this case, nothing special to do...
if self.is_in_effect is True:
# This was a fixed or a flexible+triggered downtime
self.is_in_effect = False
item.scheduled_downtime_depth -= 1
if item.scheduled_downtime_depth == 0:
item.raise_exit_downtime_log_entry()
notification_period = timeperiods[item.notification_period]
# Notification author data
# todo: note that alias and name are not implemented yet
author_data = {
'author': self.author, 'author_name': u'Not available',
'author_alias': u'Not available', 'author_comment': self.comment
}
item.create_notifications(u'DOWNTIMEEND', notification_period, hosts, services,
author_data=author_data)
item.in_scheduled_downtime = False
if self.ref in hosts:
broks.append(self.get_expire_brok(item.get_name()))
else:
broks.append(self.get_expire_brok(item.host_name, item.get_name()))
item.del_comment(self.comment_id)
self.can_be_deleted = True
# when a downtime ends and the concerned item was a problem
# a notification should be sent with the next critical check
# So we should set a flag here which informs the consume_result function
# to send a notification
item.in_scheduled_downtime_during_last_check = True
return broks | def function[exit, parameter[self, timeperiods, hosts, services]]:
constant[Remove ref in scheduled downtime and raise downtime log entry (exit)
:param hosts: hosts objects to get item ref
:type hosts: alignak.objects.host.Hosts
:param services: services objects to get item ref
:type services: alignak.objects.service.Services
:return: [], always | None
:rtype: list
]
if compare[name[self].ref in name[hosts]] begin[:]
variable[item] assign[=] call[name[hosts]][name[self].ref]
variable[broks] assign[=] list[[]]
if compare[name[self].is_in_effect is constant[True]] begin[:]
name[self].is_in_effect assign[=] constant[False]
<ast.AugAssign object at 0x7da237eee740>
if compare[name[item].scheduled_downtime_depth equal[==] constant[0]] begin[:]
call[name[item].raise_exit_downtime_log_entry, parameter[]]
variable[notification_period] assign[=] call[name[timeperiods]][name[item].notification_period]
variable[author_data] assign[=] dictionary[[<ast.Constant object at 0x7da207f03280>, <ast.Constant object at 0x7da207f02ec0>, <ast.Constant object at 0x7da207f033d0>, <ast.Constant object at 0x7da207f008e0>], [<ast.Attribute object at 0x7da207f019c0>, <ast.Constant object at 0x7da207f03460>, <ast.Constant object at 0x7da207f01c60>, <ast.Attribute object at 0x7da207f02680>]]
call[name[item].create_notifications, parameter[constant[DOWNTIMEEND], name[notification_period], name[hosts], name[services]]]
name[item].in_scheduled_downtime assign[=] constant[False]
if compare[name[self].ref in name[hosts]] begin[:]
call[name[broks].append, parameter[call[name[self].get_expire_brok, parameter[call[name[item].get_name, parameter[]]]]]]
call[name[item].del_comment, parameter[name[self].comment_id]]
name[self].can_be_deleted assign[=] constant[True]
name[item].in_scheduled_downtime_during_last_check assign[=] constant[True]
return[name[broks]] | keyword[def] identifier[exit] ( identifier[self] , identifier[timeperiods] , identifier[hosts] , identifier[services] ):
literal[string]
keyword[if] identifier[self] . identifier[ref] keyword[in] identifier[hosts] :
identifier[item] = identifier[hosts] [ identifier[self] . identifier[ref] ]
keyword[else] :
identifier[item] = identifier[services] [ identifier[self] . identifier[ref] ]
identifier[broks] =[]
keyword[if] identifier[self] . identifier[is_in_effect] keyword[is] keyword[True] :
identifier[self] . identifier[is_in_effect] = keyword[False]
identifier[item] . identifier[scheduled_downtime_depth] -= literal[int]
keyword[if] identifier[item] . identifier[scheduled_downtime_depth] == literal[int] :
identifier[item] . identifier[raise_exit_downtime_log_entry] ()
identifier[notification_period] = identifier[timeperiods] [ identifier[item] . identifier[notification_period] ]
identifier[author_data] ={
literal[string] : identifier[self] . identifier[author] , literal[string] : literal[string] ,
literal[string] : literal[string] , literal[string] : identifier[self] . identifier[comment]
}
identifier[item] . identifier[create_notifications] ( literal[string] , identifier[notification_period] , identifier[hosts] , identifier[services] ,
identifier[author_data] = identifier[author_data] )
identifier[item] . identifier[in_scheduled_downtime] = keyword[False]
keyword[if] identifier[self] . identifier[ref] keyword[in] identifier[hosts] :
identifier[broks] . identifier[append] ( identifier[self] . identifier[get_expire_brok] ( identifier[item] . identifier[get_name] ()))
keyword[else] :
identifier[broks] . identifier[append] ( identifier[self] . identifier[get_expire_brok] ( identifier[item] . identifier[host_name] , identifier[item] . identifier[get_name] ()))
identifier[item] . identifier[del_comment] ( identifier[self] . identifier[comment_id] )
identifier[self] . identifier[can_be_deleted] = keyword[True]
identifier[item] . identifier[in_scheduled_downtime_during_last_check] = keyword[True]
keyword[return] identifier[broks] | def exit(self, timeperiods, hosts, services):
"""Remove ref in scheduled downtime and raise downtime log entry (exit)
:param hosts: hosts objects to get item ref
:type hosts: alignak.objects.host.Hosts
:param services: services objects to get item ref
:type services: alignak.objects.service.Services
:return: [], always | None
:rtype: list
"""
if self.ref in hosts:
item = hosts[self.ref] # depends on [control=['if'], data=['hosts']]
else:
item = services[self.ref]
broks = []
# If not is_in_effect means that ot was probably a flexible downtime which was
# not triggered. In this case, nothing special to do...
if self.is_in_effect is True:
# This was a fixed or a flexible+triggered downtime
self.is_in_effect = False
item.scheduled_downtime_depth -= 1
if item.scheduled_downtime_depth == 0:
item.raise_exit_downtime_log_entry()
notification_period = timeperiods[item.notification_period]
# Notification author data
# todo: note that alias and name are not implemented yet
author_data = {'author': self.author, 'author_name': u'Not available', 'author_alias': u'Not available', 'author_comment': self.comment}
item.create_notifications(u'DOWNTIMEEND', notification_period, hosts, services, author_data=author_data)
item.in_scheduled_downtime = False
if self.ref in hosts:
broks.append(self.get_expire_brok(item.get_name())) # depends on [control=['if'], data=[]]
else:
broks.append(self.get_expire_brok(item.host_name, item.get_name())) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
item.del_comment(self.comment_id)
self.can_be_deleted = True
# when a downtime ends and the concerned item was a problem
# a notification should be sent with the next critical check
# So we should set a flag here which informs the consume_result function
# to send a notification
item.in_scheduled_downtime_during_last_check = True
return broks |
def _read_rrd(self, rrd_path, hostname, device_name, tags):
''' Main metric fetching method '''
metric_count = 0
try:
info = self._get_rrd_info(rrd_path)
except Exception:
# Unable to read RRD file, ignore it
self.log.exception("Unable to read RRD file at %s" % rrd_path)
return metric_count
# Find the consolidation functions for the RRD metrics
c_funcs = set([v for k, v in info.items() if k.endswith('.cf')])
for c in list(c_funcs):
last_ts_key = '%s.%s' % (rrd_path, c)
if last_ts_key not in self.last_ts:
self.last_ts[last_ts_key] = int(time.time())
continue
start = self.last_ts[last_ts_key]
last_ts = start
try:
fetched = self._get_rrd_fetch(rrd_path, c, start)
except rrdtool.error:
# Start time was out of range, skip this RRD
self.log.warn("Time %s out of range for %s" % (rrd_path, start))
return metric_count
# Extract the data
(start_ts, end_ts, interval) = fetched[0]
metric_names = fetched[1]
points = fetched[2]
for k, m_name in enumerate(metric_names):
m_name = self._format_metric_name(m_name, c)
for i, p in enumerate(points):
ts = start_ts + (i * interval)
if p[k] is None:
continue
# Save this metric as a gauge
val = self._transform_metric(m_name, p[k])
self.gauge(m_name, val, hostname=hostname, device_name=device_name, tags=tags)
metric_count += 1
last_ts = ts + interval
# Update the last timestamp based on the last valid metric
self.last_ts[last_ts_key] = last_ts
return metric_count | def function[_read_rrd, parameter[self, rrd_path, hostname, device_name, tags]]:
constant[ Main metric fetching method ]
variable[metric_count] assign[=] constant[0]
<ast.Try object at 0x7da20c6e4ee0>
variable[c_funcs] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da20c6e6710>]]
for taget[name[c]] in starred[call[name[list], parameter[name[c_funcs]]]] begin[:]
variable[last_ts_key] assign[=] binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6e5690>, <ast.Name object at 0x7da20c6e7dc0>]]]
if compare[name[last_ts_key] <ast.NotIn object at 0x7da2590d7190> name[self].last_ts] begin[:]
call[name[self].last_ts][name[last_ts_key]] assign[=] call[name[int], parameter[call[name[time].time, parameter[]]]]
continue
variable[start] assign[=] call[name[self].last_ts][name[last_ts_key]]
variable[last_ts] assign[=] name[start]
<ast.Try object at 0x7da20c6e5c60>
<ast.Tuple object at 0x7da20c6e6530> assign[=] call[name[fetched]][constant[0]]
variable[metric_names] assign[=] call[name[fetched]][constant[1]]
variable[points] assign[=] call[name[fetched]][constant[2]]
for taget[tuple[[<ast.Name object at 0x7da20c6e6020>, <ast.Name object at 0x7da20c6e6e60>]]] in starred[call[name[enumerate], parameter[name[metric_names]]]] begin[:]
variable[m_name] assign[=] call[name[self]._format_metric_name, parameter[name[m_name], name[c]]]
for taget[tuple[[<ast.Name object at 0x7da20c6e63e0>, <ast.Name object at 0x7da20c6e4250>]]] in starred[call[name[enumerate], parameter[name[points]]]] begin[:]
variable[ts] assign[=] binary_operation[name[start_ts] + binary_operation[name[i] * name[interval]]]
if compare[call[name[p]][name[k]] is constant[None]] begin[:]
continue
variable[val] assign[=] call[name[self]._transform_metric, parameter[name[m_name], call[name[p]][name[k]]]]
call[name[self].gauge, parameter[name[m_name], name[val]]]
<ast.AugAssign object at 0x7da20c6e7850>
variable[last_ts] assign[=] binary_operation[name[ts] + name[interval]]
call[name[self].last_ts][name[last_ts_key]] assign[=] name[last_ts]
return[name[metric_count]] | keyword[def] identifier[_read_rrd] ( identifier[self] , identifier[rrd_path] , identifier[hostname] , identifier[device_name] , identifier[tags] ):
literal[string]
identifier[metric_count] = literal[int]
keyword[try] :
identifier[info] = identifier[self] . identifier[_get_rrd_info] ( identifier[rrd_path] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[log] . identifier[exception] ( literal[string] % identifier[rrd_path] )
keyword[return] identifier[metric_count]
identifier[c_funcs] = identifier[set] ([ identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[info] . identifier[items] () keyword[if] identifier[k] . identifier[endswith] ( literal[string] )])
keyword[for] identifier[c] keyword[in] identifier[list] ( identifier[c_funcs] ):
identifier[last_ts_key] = literal[string] %( identifier[rrd_path] , identifier[c] )
keyword[if] identifier[last_ts_key] keyword[not] keyword[in] identifier[self] . identifier[last_ts] :
identifier[self] . identifier[last_ts] [ identifier[last_ts_key] ]= identifier[int] ( identifier[time] . identifier[time] ())
keyword[continue]
identifier[start] = identifier[self] . identifier[last_ts] [ identifier[last_ts_key] ]
identifier[last_ts] = identifier[start]
keyword[try] :
identifier[fetched] = identifier[self] . identifier[_get_rrd_fetch] ( identifier[rrd_path] , identifier[c] , identifier[start] )
keyword[except] identifier[rrdtool] . identifier[error] :
identifier[self] . identifier[log] . identifier[warn] ( literal[string] %( identifier[rrd_path] , identifier[start] ))
keyword[return] identifier[metric_count]
( identifier[start_ts] , identifier[end_ts] , identifier[interval] )= identifier[fetched] [ literal[int] ]
identifier[metric_names] = identifier[fetched] [ literal[int] ]
identifier[points] = identifier[fetched] [ literal[int] ]
keyword[for] identifier[k] , identifier[m_name] keyword[in] identifier[enumerate] ( identifier[metric_names] ):
identifier[m_name] = identifier[self] . identifier[_format_metric_name] ( identifier[m_name] , identifier[c] )
keyword[for] identifier[i] , identifier[p] keyword[in] identifier[enumerate] ( identifier[points] ):
identifier[ts] = identifier[start_ts] +( identifier[i] * identifier[interval] )
keyword[if] identifier[p] [ identifier[k] ] keyword[is] keyword[None] :
keyword[continue]
identifier[val] = identifier[self] . identifier[_transform_metric] ( identifier[m_name] , identifier[p] [ identifier[k] ])
identifier[self] . identifier[gauge] ( identifier[m_name] , identifier[val] , identifier[hostname] = identifier[hostname] , identifier[device_name] = identifier[device_name] , identifier[tags] = identifier[tags] )
identifier[metric_count] += literal[int]
identifier[last_ts] = identifier[ts] + identifier[interval]
identifier[self] . identifier[last_ts] [ identifier[last_ts_key] ]= identifier[last_ts]
keyword[return] identifier[metric_count] | def _read_rrd(self, rrd_path, hostname, device_name, tags):
""" Main metric fetching method """
metric_count = 0
try:
info = self._get_rrd_info(rrd_path) # depends on [control=['try'], data=[]]
except Exception:
# Unable to read RRD file, ignore it
self.log.exception('Unable to read RRD file at %s' % rrd_path)
return metric_count # depends on [control=['except'], data=[]]
# Find the consolidation functions for the RRD metrics
c_funcs = set([v for (k, v) in info.items() if k.endswith('.cf')])
for c in list(c_funcs):
last_ts_key = '%s.%s' % (rrd_path, c)
if last_ts_key not in self.last_ts:
self.last_ts[last_ts_key] = int(time.time())
continue # depends on [control=['if'], data=['last_ts_key']]
start = self.last_ts[last_ts_key]
last_ts = start
try:
fetched = self._get_rrd_fetch(rrd_path, c, start) # depends on [control=['try'], data=[]]
except rrdtool.error:
# Start time was out of range, skip this RRD
self.log.warn('Time %s out of range for %s' % (rrd_path, start))
return metric_count # depends on [control=['except'], data=[]]
# Extract the data
(start_ts, end_ts, interval) = fetched[0]
metric_names = fetched[1]
points = fetched[2]
for (k, m_name) in enumerate(metric_names):
m_name = self._format_metric_name(m_name, c)
for (i, p) in enumerate(points):
ts = start_ts + i * interval
if p[k] is None:
continue # depends on [control=['if'], data=[]]
# Save this metric as a gauge
val = self._transform_metric(m_name, p[k])
self.gauge(m_name, val, hostname=hostname, device_name=device_name, tags=tags)
metric_count += 1
last_ts = ts + interval # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
# Update the last timestamp based on the last valid metric
self.last_ts[last_ts_key] = last_ts # depends on [control=['for'], data=['c']]
return metric_count |
def get_window_name(self, win_id):
"""
Get a window's name, if any.
"""
window = window_t(win_id)
name_ptr = ctypes.c_char_p()
name_len = ctypes.c_int(0)
name_type = ctypes.c_int(0)
_libxdo.xdo_get_window_name(
self._xdo, window, ctypes.byref(name_ptr),
ctypes.byref(name_len), ctypes.byref(name_type))
name = name_ptr.value
_libX11.XFree(name_ptr) # Free the string allocated by Xlib
return name | def function[get_window_name, parameter[self, win_id]]:
constant[
Get a window's name, if any.
]
variable[window] assign[=] call[name[window_t], parameter[name[win_id]]]
variable[name_ptr] assign[=] call[name[ctypes].c_char_p, parameter[]]
variable[name_len] assign[=] call[name[ctypes].c_int, parameter[constant[0]]]
variable[name_type] assign[=] call[name[ctypes].c_int, parameter[constant[0]]]
call[name[_libxdo].xdo_get_window_name, parameter[name[self]._xdo, name[window], call[name[ctypes].byref, parameter[name[name_ptr]]], call[name[ctypes].byref, parameter[name[name_len]]], call[name[ctypes].byref, parameter[name[name_type]]]]]
variable[name] assign[=] name[name_ptr].value
call[name[_libX11].XFree, parameter[name[name_ptr]]]
return[name[name]] | keyword[def] identifier[get_window_name] ( identifier[self] , identifier[win_id] ):
literal[string]
identifier[window] = identifier[window_t] ( identifier[win_id] )
identifier[name_ptr] = identifier[ctypes] . identifier[c_char_p] ()
identifier[name_len] = identifier[ctypes] . identifier[c_int] ( literal[int] )
identifier[name_type] = identifier[ctypes] . identifier[c_int] ( literal[int] )
identifier[_libxdo] . identifier[xdo_get_window_name] (
identifier[self] . identifier[_xdo] , identifier[window] , identifier[ctypes] . identifier[byref] ( identifier[name_ptr] ),
identifier[ctypes] . identifier[byref] ( identifier[name_len] ), identifier[ctypes] . identifier[byref] ( identifier[name_type] ))
identifier[name] = identifier[name_ptr] . identifier[value]
identifier[_libX11] . identifier[XFree] ( identifier[name_ptr] )
keyword[return] identifier[name] | def get_window_name(self, win_id):
"""
Get a window's name, if any.
"""
window = window_t(win_id)
name_ptr = ctypes.c_char_p()
name_len = ctypes.c_int(0)
name_type = ctypes.c_int(0)
_libxdo.xdo_get_window_name(self._xdo, window, ctypes.byref(name_ptr), ctypes.byref(name_len), ctypes.byref(name_type))
name = name_ptr.value
_libX11.XFree(name_ptr) # Free the string allocated by Xlib
return name |
def sg_mean(tensor, opt):
r"""Computes the mean of elements across axis of a tensor.
See `tf.reduce_mean()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_mean(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | def function[sg_mean, parameter[tensor, opt]]:
constant[Computes the mean of elements across axis of a tensor.
See `tf.reduce_mean()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
]
return[call[name[tf].reduce_mean, parameter[name[tensor]]]] | keyword[def] identifier[sg_mean] ( identifier[tensor] , identifier[opt] ):
literal[string]
keyword[return] identifier[tf] . identifier[reduce_mean] ( identifier[tensor] , identifier[axis] = identifier[opt] . identifier[axis] , identifier[keep_dims] = identifier[opt] . identifier[keep_dims] , identifier[name] = identifier[opt] . identifier[name] ) | def sg_mean(tensor, opt):
"""Computes the mean of elements across axis of a tensor.
See `tf.reduce_mean()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_mean(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) |
def fit_predict(self, X, y=None, **kwargs):
"""Compute cluster centroids and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X, **kwargs).predict(X, **kwargs) | def function[fit_predict, parameter[self, X, y]]:
constant[Compute cluster centroids and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
]
return[call[call[name[self].fit, parameter[name[X]]].predict, parameter[name[X]]]] | keyword[def] identifier[fit_predict] ( identifier[self] , identifier[X] , identifier[y] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[fit] ( identifier[X] ,** identifier[kwargs] ). identifier[predict] ( identifier[X] ,** identifier[kwargs] ) | def fit_predict(self, X, y=None, **kwargs):
"""Compute cluster centroids and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X, **kwargs).predict(X, **kwargs) |
def in_dir(config_dir=CONFIG_DIR, extensions=['.yml', '.yaml', '.json']):
"""Return a list of configs in ``config_dir``.
:param config_dir: directory to search
:type config_dir: str
:param extensions: filetypes to check (e.g. ``['.yaml', '.json']``).
:type extensions: list
:rtype: list
"""
configs = []
for filename in os.listdir(config_dir):
if is_config_file(filename, extensions) and not filename.startswith('.'):
configs.append(filename)
return configs | def function[in_dir, parameter[config_dir, extensions]]:
constant[Return a list of configs in ``config_dir``.
:param config_dir: directory to search
:type config_dir: str
:param extensions: filetypes to check (e.g. ``['.yaml', '.json']``).
:type extensions: list
:rtype: list
]
variable[configs] assign[=] list[[]]
for taget[name[filename]] in starred[call[name[os].listdir, parameter[name[config_dir]]]] begin[:]
if <ast.BoolOp object at 0x7da18ede7d90> begin[:]
call[name[configs].append, parameter[name[filename]]]
return[name[configs]] | keyword[def] identifier[in_dir] ( identifier[config_dir] = identifier[CONFIG_DIR] , identifier[extensions] =[ literal[string] , literal[string] , literal[string] ]):
literal[string]
identifier[configs] =[]
keyword[for] identifier[filename] keyword[in] identifier[os] . identifier[listdir] ( identifier[config_dir] ):
keyword[if] identifier[is_config_file] ( identifier[filename] , identifier[extensions] ) keyword[and] keyword[not] identifier[filename] . identifier[startswith] ( literal[string] ):
identifier[configs] . identifier[append] ( identifier[filename] )
keyword[return] identifier[configs] | def in_dir(config_dir=CONFIG_DIR, extensions=['.yml', '.yaml', '.json']):
"""Return a list of configs in ``config_dir``.
:param config_dir: directory to search
:type config_dir: str
:param extensions: filetypes to check (e.g. ``['.yaml', '.json']``).
:type extensions: list
:rtype: list
"""
configs = []
for filename in os.listdir(config_dir):
if is_config_file(filename, extensions) and (not filename.startswith('.')):
configs.append(filename) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']]
return configs |
def get_count(self, using):
# TODO maybe can be removed soon
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('pk'), alias='x_sf_count', is_summary=True) # pylint: disable=no-member
number = obj.get_aggregation(using, ['x_sf_count'])['x_sf_count'] # pylint: disable=no-member
if number is None:
number = 0
return number | def function[get_count, parameter[self, using]]:
constant[
Performs a COUNT() query using the current filter constraints.
]
variable[obj] assign[=] call[name[self].clone, parameter[]]
call[name[obj].add_annotation, parameter[call[name[Count], parameter[constant[pk]]]]]
variable[number] assign[=] call[call[name[obj].get_aggregation, parameter[name[using], list[[<ast.Constant object at 0x7da1b1208250>]]]]][constant[x_sf_count]]
if compare[name[number] is constant[None]] begin[:]
variable[number] assign[=] constant[0]
return[name[number]] | keyword[def] identifier[get_count] ( identifier[self] , identifier[using] ):
literal[string]
identifier[obj] = identifier[self] . identifier[clone] ()
identifier[obj] . identifier[add_annotation] ( identifier[Count] ( literal[string] ), identifier[alias] = literal[string] , identifier[is_summary] = keyword[True] )
identifier[number] = identifier[obj] . identifier[get_aggregation] ( identifier[using] ,[ literal[string] ])[ literal[string] ]
keyword[if] identifier[number] keyword[is] keyword[None] :
identifier[number] = literal[int]
keyword[return] identifier[number] | def get_count(self, using):
# TODO maybe can be removed soon
'\n Performs a COUNT() query using the current filter constraints.\n '
obj = self.clone()
obj.add_annotation(Count('pk'), alias='x_sf_count', is_summary=True) # pylint: disable=no-member
number = obj.get_aggregation(using, ['x_sf_count'])['x_sf_count'] # pylint: disable=no-member
if number is None:
number = 0 # depends on [control=['if'], data=['number']]
return number |
def start_readout(self, *args, **kwargs):
''' Starting the FIFO readout.
Starting of the FIFO readout is executed only once by a random thread.
Starting of the FIFO readout is synchronized between all threads reading out the FIFO.
'''
# Pop parameters for fifo_readout.start
callback = kwargs.pop('callback', self.handle_data)
errback = kwargs.pop('errback', self.handle_err)
reset_rx = kwargs.pop('reset_rx', True)
reset_fifo = kwargs.pop('reset_fifo', True)
fill_buffer = kwargs.pop('fill_buffer', False)
no_data_timeout = kwargs.pop('no_data_timeout', None)
enabled_fe_channels = kwargs.pop('enabled_fe_channels', self._enabled_fe_channels)
if args or kwargs:
self.set_scan_parameters(*args, **kwargs)
if self._scan_threads and self.current_module_handle not in [t.name for t in self._scan_threads]:
raise RuntimeError('Thread name "%s" is not valid.' % t.name)
if self._scan_threads and self.current_module_handle in self._curr_readout_threads:
raise RuntimeError('Thread "%s" is already actively reading FIFO.')
with self._readout_lock:
self._curr_readout_threads.append(self.current_module_handle)
self._starting_readout_event.clear()
while not self._starting_readout_event.wait(0.01):
if self.abort_run.is_set():
break
with self._readout_lock:
if len(set(self._curr_readout_threads) & set([t.name for t in self._scan_threads if t.is_alive()])) == len(set([t.name for t in self._scan_threads if t.is_alive()])) or not self._scan_threads:
if not self.fifo_readout.is_running:
self.fifo_readout.start(fifos=self._selected_fifos, callback=callback, errback=errback, reset_rx=reset_rx, reset_fifo=reset_fifo, fill_buffer=fill_buffer, no_data_timeout=no_data_timeout, filter_func=self._filter, converter_func=self._converter, fifo_select=self._readout_fifos, enabled_fe_channels=enabled_fe_channels)
self._starting_readout_event.set() | def function[start_readout, parameter[self]]:
constant[ Starting the FIFO readout.
Starting of the FIFO readout is executed only once by a random thread.
Starting of the FIFO readout is synchronized between all threads reading out the FIFO.
]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self].handle_data]]
variable[errback] assign[=] call[name[kwargs].pop, parameter[constant[errback], name[self].handle_err]]
variable[reset_rx] assign[=] call[name[kwargs].pop, parameter[constant[reset_rx], constant[True]]]
variable[reset_fifo] assign[=] call[name[kwargs].pop, parameter[constant[reset_fifo], constant[True]]]
variable[fill_buffer] assign[=] call[name[kwargs].pop, parameter[constant[fill_buffer], constant[False]]]
variable[no_data_timeout] assign[=] call[name[kwargs].pop, parameter[constant[no_data_timeout], constant[None]]]
variable[enabled_fe_channels] assign[=] call[name[kwargs].pop, parameter[constant[enabled_fe_channels], name[self]._enabled_fe_channels]]
if <ast.BoolOp object at 0x7da1b11de1d0> begin[:]
call[name[self].set_scan_parameters, parameter[<ast.Starred object at 0x7da1b11dfb80>]]
if <ast.BoolOp object at 0x7da1b11df430> begin[:]
<ast.Raise object at 0x7da1b11dc100>
if <ast.BoolOp object at 0x7da1b11dc670> begin[:]
<ast.Raise object at 0x7da1b11dc790>
with name[self]._readout_lock begin[:]
call[name[self]._curr_readout_threads.append, parameter[name[self].current_module_handle]]
call[name[self]._starting_readout_event.clear, parameter[]]
while <ast.UnaryOp object at 0x7da1b11de080> begin[:]
if call[name[self].abort_run.is_set, parameter[]] begin[:]
break
with name[self]._readout_lock begin[:]
if <ast.BoolOp object at 0x7da1b11dcfd0> begin[:]
if <ast.UnaryOp object at 0x7da1b11ddb40> begin[:]
call[name[self].fifo_readout.start, parameter[]]
call[name[self]._starting_readout_event.set, parameter[]] | keyword[def] identifier[start_readout] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[handle_data] )
identifier[errback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[handle_err] )
identifier[reset_rx] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] )
identifier[reset_fifo] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] )
identifier[fill_buffer] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[no_data_timeout] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[enabled_fe_channels] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_enabled_fe_channels] )
keyword[if] identifier[args] keyword[or] identifier[kwargs] :
identifier[self] . identifier[set_scan_parameters] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[self] . identifier[_scan_threads] keyword[and] identifier[self] . identifier[current_module_handle] keyword[not] keyword[in] [ identifier[t] . identifier[name] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[_scan_threads] ]:
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[t] . identifier[name] )
keyword[if] identifier[self] . identifier[_scan_threads] keyword[and] identifier[self] . identifier[current_module_handle] keyword[in] identifier[self] . identifier[_curr_readout_threads] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[with] identifier[self] . identifier[_readout_lock] :
identifier[self] . identifier[_curr_readout_threads] . identifier[append] ( identifier[self] . identifier[current_module_handle] )
identifier[self] . identifier[_starting_readout_event] . identifier[clear] ()
keyword[while] keyword[not] identifier[self] . identifier[_starting_readout_event] . identifier[wait] ( literal[int] ):
keyword[if] identifier[self] . identifier[abort_run] . identifier[is_set] ():
keyword[break]
keyword[with] identifier[self] . identifier[_readout_lock] :
keyword[if] identifier[len] ( identifier[set] ( identifier[self] . identifier[_curr_readout_threads] )& identifier[set] ([ identifier[t] . identifier[name] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[_scan_threads] keyword[if] identifier[t] . identifier[is_alive] ()]))== identifier[len] ( identifier[set] ([ identifier[t] . identifier[name] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[_scan_threads] keyword[if] identifier[t] . identifier[is_alive] ()])) keyword[or] keyword[not] identifier[self] . identifier[_scan_threads] :
keyword[if] keyword[not] identifier[self] . identifier[fifo_readout] . identifier[is_running] :
identifier[self] . identifier[fifo_readout] . identifier[start] ( identifier[fifos] = identifier[self] . identifier[_selected_fifos] , identifier[callback] = identifier[callback] , identifier[errback] = identifier[errback] , identifier[reset_rx] = identifier[reset_rx] , identifier[reset_fifo] = identifier[reset_fifo] , identifier[fill_buffer] = identifier[fill_buffer] , identifier[no_data_timeout] = identifier[no_data_timeout] , identifier[filter_func] = identifier[self] . identifier[_filter] , identifier[converter_func] = identifier[self] . identifier[_converter] , identifier[fifo_select] = identifier[self] . identifier[_readout_fifos] , identifier[enabled_fe_channels] = identifier[enabled_fe_channels] )
identifier[self] . identifier[_starting_readout_event] . identifier[set] () | def start_readout(self, *args, **kwargs):
""" Starting the FIFO readout.
Starting of the FIFO readout is executed only once by a random thread.
Starting of the FIFO readout is synchronized between all threads reading out the FIFO.
"""
# Pop parameters for fifo_readout.start
callback = kwargs.pop('callback', self.handle_data)
errback = kwargs.pop('errback', self.handle_err)
reset_rx = kwargs.pop('reset_rx', True)
reset_fifo = kwargs.pop('reset_fifo', True)
fill_buffer = kwargs.pop('fill_buffer', False)
no_data_timeout = kwargs.pop('no_data_timeout', None)
enabled_fe_channels = kwargs.pop('enabled_fe_channels', self._enabled_fe_channels)
if args or kwargs:
self.set_scan_parameters(*args, **kwargs) # depends on [control=['if'], data=[]]
if self._scan_threads and self.current_module_handle not in [t.name for t in self._scan_threads]:
raise RuntimeError('Thread name "%s" is not valid.' % t.name) # depends on [control=['if'], data=[]]
if self._scan_threads and self.current_module_handle in self._curr_readout_threads:
raise RuntimeError('Thread "%s" is already actively reading FIFO.') # depends on [control=['if'], data=[]]
with self._readout_lock:
self._curr_readout_threads.append(self.current_module_handle) # depends on [control=['with'], data=[]]
self._starting_readout_event.clear()
while not self._starting_readout_event.wait(0.01):
if self.abort_run.is_set():
break # depends on [control=['if'], data=[]]
with self._readout_lock:
if len(set(self._curr_readout_threads) & set([t.name for t in self._scan_threads if t.is_alive()])) == len(set([t.name for t in self._scan_threads if t.is_alive()])) or not self._scan_threads:
if not self.fifo_readout.is_running:
self.fifo_readout.start(fifos=self._selected_fifos, callback=callback, errback=errback, reset_rx=reset_rx, reset_fifo=reset_fifo, fill_buffer=fill_buffer, no_data_timeout=no_data_timeout, filter_func=self._filter, converter_func=self._converter, fifo_select=self._readout_fifos, enabled_fe_channels=enabled_fe_channels)
self._starting_readout_event.set() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['while'], data=[]] |
def status():
'''Show version, available job types and name of service.
**Results:**
:rtype: A dictionary with the following keys
:param version: Version of the service provider
:type version: float
:param job_types: Available job types
:type job_types: list of strings
:param name: Name of the service
:type name: string
:param stats: Shows stats for jobs in queue
:type stats: dictionary
'''
job_types = async_types.keys() + sync_types.keys()
counts = {}
for job_status in job_statuses:
counts[job_status] = db.ENGINE.execute(
db.JOBS_TABLE.count()
.where(db.JOBS_TABLE.c.status == job_status)
).first()[0]
return flask.jsonify(
version=0.1,
job_types=job_types,
name=app.config.get('NAME', 'example'),
stats=counts
) | def function[status, parameter[]]:
constant[Show version, available job types and name of service.
**Results:**
:rtype: A dictionary with the following keys
:param version: Version of the service provider
:type version: float
:param job_types: Available job types
:type job_types: list of strings
:param name: Name of the service
:type name: string
:param stats: Shows stats for jobs in queue
:type stats: dictionary
]
variable[job_types] assign[=] binary_operation[call[name[async_types].keys, parameter[]] + call[name[sync_types].keys, parameter[]]]
variable[counts] assign[=] dictionary[[], []]
for taget[name[job_status]] in starred[name[job_statuses]] begin[:]
call[name[counts]][name[job_status]] assign[=] call[call[call[name[db].ENGINE.execute, parameter[call[call[name[db].JOBS_TABLE.count, parameter[]].where, parameter[compare[name[db].JOBS_TABLE.c.status equal[==] name[job_status]]]]]].first, parameter[]]][constant[0]]
return[call[name[flask].jsonify, parameter[]]] | keyword[def] identifier[status] ():
literal[string]
identifier[job_types] = identifier[async_types] . identifier[keys] ()+ identifier[sync_types] . identifier[keys] ()
identifier[counts] ={}
keyword[for] identifier[job_status] keyword[in] identifier[job_statuses] :
identifier[counts] [ identifier[job_status] ]= identifier[db] . identifier[ENGINE] . identifier[execute] (
identifier[db] . identifier[JOBS_TABLE] . identifier[count] ()
. identifier[where] ( identifier[db] . identifier[JOBS_TABLE] . identifier[c] . identifier[status] == identifier[job_status] )
). identifier[first] ()[ literal[int] ]
keyword[return] identifier[flask] . identifier[jsonify] (
identifier[version] = literal[int] ,
identifier[job_types] = identifier[job_types] ,
identifier[name] = identifier[app] . identifier[config] . identifier[get] ( literal[string] , literal[string] ),
identifier[stats] = identifier[counts]
) | def status():
"""Show version, available job types and name of service.
**Results:**
:rtype: A dictionary with the following keys
:param version: Version of the service provider
:type version: float
:param job_types: Available job types
:type job_types: list of strings
:param name: Name of the service
:type name: string
:param stats: Shows stats for jobs in queue
:type stats: dictionary
"""
job_types = async_types.keys() + sync_types.keys()
counts = {}
for job_status in job_statuses:
counts[job_status] = db.ENGINE.execute(db.JOBS_TABLE.count().where(db.JOBS_TABLE.c.status == job_status)).first()[0] # depends on [control=['for'], data=['job_status']]
return flask.jsonify(version=0.1, job_types=job_types, name=app.config.get('NAME', 'example'), stats=counts) |
def __translate(self, parameter_values):
"""
This method is unused and a Work In Progress
"""
template_copy = self.template
sam_parser = Parser()
sam_translator = Translator(managed_policy_map=self.__managed_policy_map(),
sam_parser=sam_parser,
# Default plugins are already initialized within the Translator
plugins=self.extra_plugins)
return sam_translator.translate(sam_template=template_copy,
parameter_values=parameter_values) | def function[__translate, parameter[self, parameter_values]]:
constant[
This method is unused and a Work In Progress
]
variable[template_copy] assign[=] name[self].template
variable[sam_parser] assign[=] call[name[Parser], parameter[]]
variable[sam_translator] assign[=] call[name[Translator], parameter[]]
return[call[name[sam_translator].translate, parameter[]]] | keyword[def] identifier[__translate] ( identifier[self] , identifier[parameter_values] ):
literal[string]
identifier[template_copy] = identifier[self] . identifier[template]
identifier[sam_parser] = identifier[Parser] ()
identifier[sam_translator] = identifier[Translator] ( identifier[managed_policy_map] = identifier[self] . identifier[__managed_policy_map] (),
identifier[sam_parser] = identifier[sam_parser] ,
identifier[plugins] = identifier[self] . identifier[extra_plugins] )
keyword[return] identifier[sam_translator] . identifier[translate] ( identifier[sam_template] = identifier[template_copy] ,
identifier[parameter_values] = identifier[parameter_values] ) | def __translate(self, parameter_values):
"""
This method is unused and a Work In Progress
"""
template_copy = self.template
sam_parser = Parser()
# Default plugins are already initialized within the Translator
sam_translator = Translator(managed_policy_map=self.__managed_policy_map(), sam_parser=sam_parser, plugins=self.extra_plugins)
return sam_translator.translate(sam_template=template_copy, parameter_values=parameter_values) |
def _qualified_names(modname):
"""Split the names of the given module into subparts
For example,
_qualified_names('pylint.checkers.ImportsChecker')
returns
['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']
"""
names = modname.split(".")
return [".".join(names[0 : i + 1]) for i in range(len(names))] | def function[_qualified_names, parameter[modname]]:
constant[Split the names of the given module into subparts
For example,
_qualified_names('pylint.checkers.ImportsChecker')
returns
['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']
]
variable[names] assign[=] call[name[modname].split, parameter[constant[.]]]
return[<ast.ListComp object at 0x7da1b02869b0>] | keyword[def] identifier[_qualified_names] ( identifier[modname] ):
literal[string]
identifier[names] = identifier[modname] . identifier[split] ( literal[string] )
keyword[return] [ literal[string] . identifier[join] ( identifier[names] [ literal[int] : identifier[i] + literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[names] ))] | def _qualified_names(modname):
"""Split the names of the given module into subparts
For example,
_qualified_names('pylint.checkers.ImportsChecker')
returns
['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']
"""
names = modname.split('.')
return ['.'.join(names[0:i + 1]) for i in range(len(names))] |
def calc_freefree_snu_ujy(ne, t, width, elongation, dist, ghz):
"""Calculate a flux density from pure free-free emission.
"""
hz = ghz * 1e9
eta = calc_freefree_eta(ne, t, hz)
kappa = calc_freefree_kappa(ne, t, hz)
snu = calc_snu(eta, kappa, width, elongation, dist)
ujy = snu * cgs.jypercgs * 1e6
return ujy | def function[calc_freefree_snu_ujy, parameter[ne, t, width, elongation, dist, ghz]]:
constant[Calculate a flux density from pure free-free emission.
]
variable[hz] assign[=] binary_operation[name[ghz] * constant[1000000000.0]]
variable[eta] assign[=] call[name[calc_freefree_eta], parameter[name[ne], name[t], name[hz]]]
variable[kappa] assign[=] call[name[calc_freefree_kappa], parameter[name[ne], name[t], name[hz]]]
variable[snu] assign[=] call[name[calc_snu], parameter[name[eta], name[kappa], name[width], name[elongation], name[dist]]]
variable[ujy] assign[=] binary_operation[binary_operation[name[snu] * name[cgs].jypercgs] * constant[1000000.0]]
return[name[ujy]] | keyword[def] identifier[calc_freefree_snu_ujy] ( identifier[ne] , identifier[t] , identifier[width] , identifier[elongation] , identifier[dist] , identifier[ghz] ):
literal[string]
identifier[hz] = identifier[ghz] * literal[int]
identifier[eta] = identifier[calc_freefree_eta] ( identifier[ne] , identifier[t] , identifier[hz] )
identifier[kappa] = identifier[calc_freefree_kappa] ( identifier[ne] , identifier[t] , identifier[hz] )
identifier[snu] = identifier[calc_snu] ( identifier[eta] , identifier[kappa] , identifier[width] , identifier[elongation] , identifier[dist] )
identifier[ujy] = identifier[snu] * identifier[cgs] . identifier[jypercgs] * literal[int]
keyword[return] identifier[ujy] | def calc_freefree_snu_ujy(ne, t, width, elongation, dist, ghz):
"""Calculate a flux density from pure free-free emission.
"""
hz = ghz * 1000000000.0
eta = calc_freefree_eta(ne, t, hz)
kappa = calc_freefree_kappa(ne, t, hz)
snu = calc_snu(eta, kappa, width, elongation, dist)
ujy = snu * cgs.jypercgs * 1000000.0
return ujy |
def send_command_return(self, obj, command, *arguments):
""" Send command and wait for single line output. """
index_command = obj._build_index_command(command, *arguments)
return obj._extract_return(command, self.chassis_list[obj.chassis].sendQuery(index_command)) | def function[send_command_return, parameter[self, obj, command]]:
constant[ Send command and wait for single line output. ]
variable[index_command] assign[=] call[name[obj]._build_index_command, parameter[name[command], <ast.Starred object at 0x7da18f09ece0>]]
return[call[name[obj]._extract_return, parameter[name[command], call[call[name[self].chassis_list][name[obj].chassis].sendQuery, parameter[name[index_command]]]]]] | keyword[def] identifier[send_command_return] ( identifier[self] , identifier[obj] , identifier[command] ,* identifier[arguments] ):
literal[string]
identifier[index_command] = identifier[obj] . identifier[_build_index_command] ( identifier[command] ,* identifier[arguments] )
keyword[return] identifier[obj] . identifier[_extract_return] ( identifier[command] , identifier[self] . identifier[chassis_list] [ identifier[obj] . identifier[chassis] ]. identifier[sendQuery] ( identifier[index_command] )) | def send_command_return(self, obj, command, *arguments):
""" Send command and wait for single line output. """
index_command = obj._build_index_command(command, *arguments)
return obj._extract_return(command, self.chassis_list[obj.chassis].sendQuery(index_command)) |
def chunk_string(string, length):
"""
Splits a string into fixed-length chunks.
This function returns a generator, using a generator comprehension. The
generator returns the string sliced, from 0 + a multiple of the length
of the chunks, to the length of the chunks + a multiple of the length
of the chunks.
Reference: http://stackoverflow.com/questions/18854620
"""
return (string[0 + i:length + i] for i in range(0, len(string), length)) | def function[chunk_string, parameter[string, length]]:
constant[
Splits a string into fixed-length chunks.
This function returns a generator, using a generator comprehension. The
generator returns the string sliced, from 0 + a multiple of the length
of the chunks, to the length of the chunks + a multiple of the length
of the chunks.
Reference: http://stackoverflow.com/questions/18854620
]
return[<ast.GeneratorExp object at 0x7da1b271d8a0>] | keyword[def] identifier[chunk_string] ( identifier[string] , identifier[length] ):
literal[string]
keyword[return] ( identifier[string] [ literal[int] + identifier[i] : identifier[length] + identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[string] ), identifier[length] )) | def chunk_string(string, length):
"""
Splits a string into fixed-length chunks.
This function returns a generator, using a generator comprehension. The
generator returns the string sliced, from 0 + a multiple of the length
of the chunks, to the length of the chunks + a multiple of the length
of the chunks.
Reference: http://stackoverflow.com/questions/18854620
"""
return (string[0 + i:length + i] for i in range(0, len(string), length)) |
def convert_node(self, node):
"""
Convert the given rupture node into a hazardlib rupture, depending
on the node tag.
:param node: a node representing a rupture
"""
convert = getattr(self, 'convert_' + striptag(node.tag))
return convert(node) | def function[convert_node, parameter[self, node]]:
constant[
Convert the given rupture node into a hazardlib rupture, depending
on the node tag.
:param node: a node representing a rupture
]
variable[convert] assign[=] call[name[getattr], parameter[name[self], binary_operation[constant[convert_] + call[name[striptag], parameter[name[node].tag]]]]]
return[call[name[convert], parameter[name[node]]]] | keyword[def] identifier[convert_node] ( identifier[self] , identifier[node] ):
literal[string]
identifier[convert] = identifier[getattr] ( identifier[self] , literal[string] + identifier[striptag] ( identifier[node] . identifier[tag] ))
keyword[return] identifier[convert] ( identifier[node] ) | def convert_node(self, node):
"""
Convert the given rupture node into a hazardlib rupture, depending
on the node tag.
:param node: a node representing a rupture
"""
convert = getattr(self, 'convert_' + striptag(node.tag))
return convert(node) |
def is_resource_class_resource_attribute(rc, attr_name):
"""
Checks if the given attribute name is a resource attribute (i.e., either
a member or a collection attribute) of the given registered resource.
"""
attr = get_resource_class_attribute(rc, attr_name)
return attr != RESOURCE_ATTRIBUTE_KINDS.TERMINAL | def function[is_resource_class_resource_attribute, parameter[rc, attr_name]]:
constant[
Checks if the given attribute name is a resource attribute (i.e., either
a member or a collection attribute) of the given registered resource.
]
variable[attr] assign[=] call[name[get_resource_class_attribute], parameter[name[rc], name[attr_name]]]
return[compare[name[attr] not_equal[!=] name[RESOURCE_ATTRIBUTE_KINDS].TERMINAL]] | keyword[def] identifier[is_resource_class_resource_attribute] ( identifier[rc] , identifier[attr_name] ):
literal[string]
identifier[attr] = identifier[get_resource_class_attribute] ( identifier[rc] , identifier[attr_name] )
keyword[return] identifier[attr] != identifier[RESOURCE_ATTRIBUTE_KINDS] . identifier[TERMINAL] | def is_resource_class_resource_attribute(rc, attr_name):
"""
Checks if the given attribute name is a resource attribute (i.e., either
a member or a collection attribute) of the given registered resource.
"""
attr = get_resource_class_attribute(rc, attr_name)
return attr != RESOURCE_ATTRIBUTE_KINDS.TERMINAL |
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude | def function[get_latitude, parameter[self, ip]]:
constant[ Get latitude ]
variable[rec] assign[=] call[name[self].get_all, parameter[name[ip]]]
return[<ast.BoolOp object at 0x7da1b0d20790>] | keyword[def] identifier[get_latitude] ( identifier[self] , identifier[ip] ):
literal[string]
identifier[rec] = identifier[self] . identifier[get_all] ( identifier[ip] )
keyword[return] identifier[rec] keyword[and] identifier[rec] . identifier[latitude] | def get_latitude(self, ip):
""" Get latitude """
rec = self.get_all(ip)
return rec and rec.latitude |
def get_shutit_pexpect_session_from_child(self, shutit_pexpect_child):
"""Given a pexpect/child object, return the shutit_pexpect_session object.
"""
shutit_global.shutit_global_object.yield_to_draw()
if not isinstance(shutit_pexpect_child, pexpect.pty_spawn.spawn):
self.fail('Wrong type in get_shutit_pexpect_session_child: ' + str(type(shutit_pexpect_child)),throw_exception=True) # pragma: no cover
for key in self.shutit_pexpect_sessions:
if self.shutit_pexpect_sessions[key].pexpect_child == shutit_pexpect_child:
return self.shutit_pexpect_sessions[key]
return self.fail('Should not get here in get_shutit_pexpect_session',throw_exception=True) | def function[get_shutit_pexpect_session_from_child, parameter[self, shutit_pexpect_child]]:
constant[Given a pexpect/child object, return the shutit_pexpect_session object.
]
call[name[shutit_global].shutit_global_object.yield_to_draw, parameter[]]
if <ast.UnaryOp object at 0x7da18fe91840> begin[:]
call[name[self].fail, parameter[binary_operation[constant[Wrong type in get_shutit_pexpect_session_child: ] + call[name[str], parameter[call[name[type], parameter[name[shutit_pexpect_child]]]]]]]]
for taget[name[key]] in starred[name[self].shutit_pexpect_sessions] begin[:]
if compare[call[name[self].shutit_pexpect_sessions][name[key]].pexpect_child equal[==] name[shutit_pexpect_child]] begin[:]
return[call[name[self].shutit_pexpect_sessions][name[key]]]
return[call[name[self].fail, parameter[constant[Should not get here in get_shutit_pexpect_session]]]] | keyword[def] identifier[get_shutit_pexpect_session_from_child] ( identifier[self] , identifier[shutit_pexpect_child] ):
literal[string]
identifier[shutit_global] . identifier[shutit_global_object] . identifier[yield_to_draw] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[shutit_pexpect_child] , identifier[pexpect] . identifier[pty_spawn] . identifier[spawn] ):
identifier[self] . identifier[fail] ( literal[string] + identifier[str] ( identifier[type] ( identifier[shutit_pexpect_child] )), identifier[throw_exception] = keyword[True] )
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[shutit_pexpect_sessions] :
keyword[if] identifier[self] . identifier[shutit_pexpect_sessions] [ identifier[key] ]. identifier[pexpect_child] == identifier[shutit_pexpect_child] :
keyword[return] identifier[self] . identifier[shutit_pexpect_sessions] [ identifier[key] ]
keyword[return] identifier[self] . identifier[fail] ( literal[string] , identifier[throw_exception] = keyword[True] ) | def get_shutit_pexpect_session_from_child(self, shutit_pexpect_child):
"""Given a pexpect/child object, return the shutit_pexpect_session object.
"""
shutit_global.shutit_global_object.yield_to_draw()
if not isinstance(shutit_pexpect_child, pexpect.pty_spawn.spawn):
self.fail('Wrong type in get_shutit_pexpect_session_child: ' + str(type(shutit_pexpect_child)), throw_exception=True) # pragma: no cover # depends on [control=['if'], data=[]]
for key in self.shutit_pexpect_sessions:
if self.shutit_pexpect_sessions[key].pexpect_child == shutit_pexpect_child:
return self.shutit_pexpect_sessions[key] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
return self.fail('Should not get here in get_shutit_pexpect_session', throw_exception=True) |
def _get_alphanumeric_index(query_string):
""" Given an input string of either int or char, returns what index in the alphabet and case it is
:param query_string: str, query string
:return: (int, str), list of the index and type
"""
# TODO: could probably rework this. it works, but it's ugly as hell.
try:
return [int(query_string), 'int']
except ValueError:
if len(query_string) == 1:
if query_string.isupper():
return [string.ascii_uppercase.index(query_string), 'char_hi']
elif query_string.islower():
return [string.ascii_lowercase.index(query_string), 'char_lo']
else:
raise IOError('The input is a string longer than one character') | def function[_get_alphanumeric_index, parameter[query_string]]:
constant[ Given an input string of either int or char, returns what index in the alphabet and case it is
:param query_string: str, query string
:return: (int, str), list of the index and type
]
<ast.Try object at 0x7da207f9b250> | keyword[def] identifier[_get_alphanumeric_index] ( identifier[query_string] ):
literal[string]
keyword[try] :
keyword[return] [ identifier[int] ( identifier[query_string] ), literal[string] ]
keyword[except] identifier[ValueError] :
keyword[if] identifier[len] ( identifier[query_string] )== literal[int] :
keyword[if] identifier[query_string] . identifier[isupper] ():
keyword[return] [ identifier[string] . identifier[ascii_uppercase] . identifier[index] ( identifier[query_string] ), literal[string] ]
keyword[elif] identifier[query_string] . identifier[islower] ():
keyword[return] [ identifier[string] . identifier[ascii_lowercase] . identifier[index] ( identifier[query_string] ), literal[string] ]
keyword[else] :
keyword[raise] identifier[IOError] ( literal[string] ) | def _get_alphanumeric_index(query_string):
""" Given an input string of either int or char, returns what index in the alphabet and case it is
:param query_string: str, query string
:return: (int, str), list of the index and type
"""
# TODO: could probably rework this. it works, but it's ugly as hell.
try:
return [int(query_string), 'int'] # depends on [control=['try'], data=[]]
except ValueError:
if len(query_string) == 1:
if query_string.isupper():
return [string.ascii_uppercase.index(query_string), 'char_hi'] # depends on [control=['if'], data=[]]
elif query_string.islower():
return [string.ascii_lowercase.index(query_string), 'char_lo'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise IOError('The input is a string longer than one character') # depends on [control=['except'], data=[]] |
def get_root_objective_bank_ids(self, alias):
"""Gets the root objective bank Ids in this hierarchy.
return: (osid.id.IdList) - the root objective bank Ids
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
url_path = self._urls.roots(alias)
return self._get_request(url_path) | def function[get_root_objective_bank_ids, parameter[self, alias]]:
constant[Gets the root objective bank Ids in this hierarchy.
return: (osid.id.IdList) - the root objective bank Ids
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
]
variable[url_path] assign[=] call[name[self]._urls.roots, parameter[name[alias]]]
return[call[name[self]._get_request, parameter[name[url_path]]]] | keyword[def] identifier[get_root_objective_bank_ids] ( identifier[self] , identifier[alias] ):
literal[string]
identifier[url_path] = identifier[self] . identifier[_urls] . identifier[roots] ( identifier[alias] )
keyword[return] identifier[self] . identifier[_get_request] ( identifier[url_path] ) | def get_root_objective_bank_ids(self, alias):
"""Gets the root objective bank Ids in this hierarchy.
return: (osid.id.IdList) - the root objective bank Ids
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
url_path = self._urls.roots(alias)
return self._get_request(url_path) |
def get_n_cluster_in_events(event_numbers):
'''Calculates the number of cluster in every given event.
An external C++ library is used since there is no sufficient solution in python possible.
Because of np.bincount # BUG #225 for values > int32 and the different handling under 32/64 bit operating systems.
Parameters
----------
event_numbers : numpy.array
List of event numbers to be checked.
Returns
-------
numpy.array
First dimension is the event number.
Second dimension is the number of cluster of the event.
'''
logging.debug("Calculate the number of cluster in every given event")
event_numbers = np.ascontiguousarray(event_numbers) # change memory alignement for c++ library
result_event_numbers = np.empty_like(event_numbers)
result_count = np.empty_like(event_numbers, dtype=np.uint32)
result_size = analysis_functions.get_n_cluster_in_events(event_numbers, result_event_numbers, result_count)
return np.vstack((result_event_numbers[:result_size], result_count[:result_size])).T | def function[get_n_cluster_in_events, parameter[event_numbers]]:
constant[Calculates the number of cluster in every given event.
An external C++ library is used since there is no sufficient solution in python possible.
Because of np.bincount # BUG #225 for values > int32 and the different handling under 32/64 bit operating systems.
Parameters
----------
event_numbers : numpy.array
List of event numbers to be checked.
Returns
-------
numpy.array
First dimension is the event number.
Second dimension is the number of cluster of the event.
]
call[name[logging].debug, parameter[constant[Calculate the number of cluster in every given event]]]
variable[event_numbers] assign[=] call[name[np].ascontiguousarray, parameter[name[event_numbers]]]
variable[result_event_numbers] assign[=] call[name[np].empty_like, parameter[name[event_numbers]]]
variable[result_count] assign[=] call[name[np].empty_like, parameter[name[event_numbers]]]
variable[result_size] assign[=] call[name[analysis_functions].get_n_cluster_in_events, parameter[name[event_numbers], name[result_event_numbers], name[result_count]]]
return[call[name[np].vstack, parameter[tuple[[<ast.Subscript object at 0x7da1b094be80>, <ast.Subscript object at 0x7da1b0948520>]]]].T] | keyword[def] identifier[get_n_cluster_in_events] ( identifier[event_numbers] ):
literal[string]
identifier[logging] . identifier[debug] ( literal[string] )
identifier[event_numbers] = identifier[np] . identifier[ascontiguousarray] ( identifier[event_numbers] )
identifier[result_event_numbers] = identifier[np] . identifier[empty_like] ( identifier[event_numbers] )
identifier[result_count] = identifier[np] . identifier[empty_like] ( identifier[event_numbers] , identifier[dtype] = identifier[np] . identifier[uint32] )
identifier[result_size] = identifier[analysis_functions] . identifier[get_n_cluster_in_events] ( identifier[event_numbers] , identifier[result_event_numbers] , identifier[result_count] )
keyword[return] identifier[np] . identifier[vstack] (( identifier[result_event_numbers] [: identifier[result_size] ], identifier[result_count] [: identifier[result_size] ])). identifier[T] | def get_n_cluster_in_events(event_numbers):
"""Calculates the number of cluster in every given event.
An external C++ library is used since there is no sufficient solution in python possible.
Because of np.bincount # BUG #225 for values > int32 and the different handling under 32/64 bit operating systems.
Parameters
----------
event_numbers : numpy.array
List of event numbers to be checked.
Returns
-------
numpy.array
First dimension is the event number.
Second dimension is the number of cluster of the event.
"""
logging.debug('Calculate the number of cluster in every given event')
event_numbers = np.ascontiguousarray(event_numbers) # change memory alignement for c++ library
result_event_numbers = np.empty_like(event_numbers)
result_count = np.empty_like(event_numbers, dtype=np.uint32)
result_size = analysis_functions.get_n_cluster_in_events(event_numbers, result_event_numbers, result_count)
return np.vstack((result_event_numbers[:result_size], result_count[:result_size])).T |
def codes_match_any(self, codes):
"""Match any code."""
for selector in self.code_selectors:
if selector.code in codes:
return True
return False | def function[codes_match_any, parameter[self, codes]]:
constant[Match any code.]
for taget[name[selector]] in starred[name[self].code_selectors] begin[:]
if compare[name[selector].code in name[codes]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[codes_match_any] ( identifier[self] , identifier[codes] ):
literal[string]
keyword[for] identifier[selector] keyword[in] identifier[self] . identifier[code_selectors] :
keyword[if] identifier[selector] . identifier[code] keyword[in] identifier[codes] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def codes_match_any(self, codes):
"""Match any code."""
for selector in self.code_selectors:
if selector.code in codes:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['selector']]
return False |
async def setup():
""" main function """
connection = await qtm.connect("127.0.0.1")
if connection is None:
return -1
async with qtm.TakeControl(connection, "password"):
state = await connection.get_state()
if state != qtm.QRTEvent.EventConnected:
await connection.new()
try:
await connection.await_event(qtm.QRTEvent.EventConnected, timeout=10)
except asyncio.TimeoutError:
LOG.error("Failed to start new measurement")
return -1
queue = asyncio.Queue()
receiver_future = asyncio.ensure_future(package_receiver(queue))
await connection.stream_frames(components=["2d"], on_packet=queue.put_nowait)
asyncio.ensure_future(shutdown(30, connection, receiver_future, queue)) | <ast.AsyncFunctionDef object at 0x7da1afe3b1c0> | keyword[async] keyword[def] identifier[setup] ():
literal[string]
identifier[connection] = keyword[await] identifier[qtm] . identifier[connect] ( literal[string] )
keyword[if] identifier[connection] keyword[is] keyword[None] :
keyword[return] - literal[int]
keyword[async] keyword[with] identifier[qtm] . identifier[TakeControl] ( identifier[connection] , literal[string] ):
identifier[state] = keyword[await] identifier[connection] . identifier[get_state] ()
keyword[if] identifier[state] != identifier[qtm] . identifier[QRTEvent] . identifier[EventConnected] :
keyword[await] identifier[connection] . identifier[new] ()
keyword[try] :
keyword[await] identifier[connection] . identifier[await_event] ( identifier[qtm] . identifier[QRTEvent] . identifier[EventConnected] , identifier[timeout] = literal[int] )
keyword[except] identifier[asyncio] . identifier[TimeoutError] :
identifier[LOG] . identifier[error] ( literal[string] )
keyword[return] - literal[int]
identifier[queue] = identifier[asyncio] . identifier[Queue] ()
identifier[receiver_future] = identifier[asyncio] . identifier[ensure_future] ( identifier[package_receiver] ( identifier[queue] ))
keyword[await] identifier[connection] . identifier[stream_frames] ( identifier[components] =[ literal[string] ], identifier[on_packet] = identifier[queue] . identifier[put_nowait] )
identifier[asyncio] . identifier[ensure_future] ( identifier[shutdown] ( literal[int] , identifier[connection] , identifier[receiver_future] , identifier[queue] )) | async def setup():
""" main function """
connection = await qtm.connect('127.0.0.1')
if connection is None:
return -1 # depends on [control=['if'], data=[]]
async with qtm.TakeControl(connection, 'password'):
state = await connection.get_state()
if state != qtm.QRTEvent.EventConnected:
await connection.new()
try:
await connection.await_event(qtm.QRTEvent.EventConnected, timeout=10) # depends on [control=['try'], data=[]]
except asyncio.TimeoutError:
LOG.error('Failed to start new measurement')
return -1 # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
queue = asyncio.Queue()
receiver_future = asyncio.ensure_future(package_receiver(queue))
await connection.stream_frames(components=['2d'], on_packet=queue.put_nowait)
asyncio.ensure_future(shutdown(30, connection, receiver_future, queue)) |
def readable(value,
allow_empty = False,
**kwargs):
"""Validate that ``value`` is a path to a readable file.
.. caution::
**Use of this validator is an anti-pattern and should be used with caution.**
Validating the readability of a file *before* attempting to read it
exposes your code to a bug called
`TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_.
This particular class of bug can expose your code to **security vulnerabilities**
and so this validator should only be used if you are an advanced user.
A better pattern to use when reading from a file is to apply the principle of
EAFP ("easier to ask forgiveness than permission"), and simply attempt to
write to the file using a ``try ... except`` block:
.. code-block:: python
try:
with open('path/to/filename.txt', mode = 'r') as file_object:
# read from file here
except (OSError, IOError) as error:
# Handle an error if unable to write.
:param value: The path to a file on the local filesystem whose readability
is to be validated.
:type value: Path-like object
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: Validated path-like object or :obj:`None <python:None>`
:rtype: Path-like object or :obj:`None <python:None>`
:raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value``
is empty
:raises NotPathlikeError: if ``value`` is not a path-like object
:raises PathExistsError: if ``value`` does not exist on the local filesystem
:raises NotAFileError: if ``value`` is not a valid file
:raises NotReadableError: if ``value`` cannot be opened for reading
"""
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
value = file_exists(value, force_run = True) # pylint: disable=E1123
try:
with open(value, mode='r'):
pass
except (OSError, IOError):
raise errors.NotReadableError('file at %s could not be opened for '
'reading' % value)
return value | def function[readable, parameter[value, allow_empty]]:
constant[Validate that ``value`` is a path to a readable file.
.. caution::
**Use of this validator is an anti-pattern and should be used with caution.**
Validating the readability of a file *before* attempting to read it
exposes your code to a bug called
`TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_.
This particular class of bug can expose your code to **security vulnerabilities**
and so this validator should only be used if you are an advanced user.
A better pattern to use when reading from a file is to apply the principle of
EAFP ("easier to ask forgiveness than permission"), and simply attempt to
write to the file using a ``try ... except`` block:
.. code-block:: python
try:
with open('path/to/filename.txt', mode = 'r') as file_object:
# read from file here
except (OSError, IOError) as error:
# Handle an error if unable to write.
:param value: The path to a file on the local filesystem whose readability
is to be validated.
:type value: Path-like object
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: Validated path-like object or :obj:`None <python:None>`
:rtype: Path-like object or :obj:`None <python:None>`
:raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value``
is empty
:raises NotPathlikeError: if ``value`` is not a path-like object
:raises PathExistsError: if ``value`` does not exist on the local filesystem
:raises NotAFileError: if ``value`` is not a valid file
:raises NotReadableError: if ``value`` cannot be opened for reading
]
if <ast.BoolOp object at 0x7da1b06f08b0> begin[:]
<ast.Raise object at 0x7da1b06f2530>
variable[value] assign[=] call[name[file_exists], parameter[name[value]]]
<ast.Try object at 0x7da1b06f0af0>
return[name[value]] | keyword[def] identifier[readable] ( identifier[value] ,
identifier[allow_empty] = keyword[False] ,
** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[value] keyword[and] keyword[not] identifier[allow_empty] :
keyword[raise] identifier[errors] . identifier[EmptyValueError] ( literal[string] % identifier[value] )
keyword[elif] keyword[not] identifier[value] :
keyword[return] keyword[None]
identifier[value] = identifier[file_exists] ( identifier[value] , identifier[force_run] = keyword[True] )
keyword[try] :
keyword[with] identifier[open] ( identifier[value] , identifier[mode] = literal[string] ):
keyword[pass]
keyword[except] ( identifier[OSError] , identifier[IOError] ):
keyword[raise] identifier[errors] . identifier[NotReadableError] ( literal[string]
literal[string] % identifier[value] )
keyword[return] identifier[value] | def readable(value, allow_empty=False, **kwargs):
"""Validate that ``value`` is a path to a readable file.
.. caution::
**Use of this validator is an anti-pattern and should be used with caution.**
Validating the readability of a file *before* attempting to read it
exposes your code to a bug called
`TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_.
This particular class of bug can expose your code to **security vulnerabilities**
and so this validator should only be used if you are an advanced user.
A better pattern to use when reading from a file is to apply the principle of
EAFP ("easier to ask forgiveness than permission"), and simply attempt to
write to the file using a ``try ... except`` block:
.. code-block:: python
try:
with open('path/to/filename.txt', mode = 'r') as file_object:
# read from file here
except (OSError, IOError) as error:
# Handle an error if unable to write.
:param value: The path to a file on the local filesystem whose readability
is to be validated.
:type value: Path-like object
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: Validated path-like object or :obj:`None <python:None>`
:rtype: Path-like object or :obj:`None <python:None>`
:raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value``
is empty
:raises NotPathlikeError: if ``value`` is not a path-like object
:raises PathExistsError: if ``value`` does not exist on the local filesystem
:raises NotAFileError: if ``value`` is not a valid file
:raises NotReadableError: if ``value`` cannot be opened for reading
"""
if not value and (not allow_empty):
raise errors.EmptyValueError('value (%s) was empty' % value) # depends on [control=['if'], data=[]]
elif not value:
return None # depends on [control=['if'], data=[]]
value = file_exists(value, force_run=True) # pylint: disable=E1123
try:
with open(value, mode='r'):
pass # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except (OSError, IOError):
raise errors.NotReadableError('file at %s could not be opened for reading' % value) # depends on [control=['except'], data=[]]
return value |
def cancelEdit( self ):
"""
Rejects the current edit and shows the parts widget.
"""
if ( self._partsWidget.isVisible() ):
return False
self._completerTree.hide()
self.completer().popup().hide()
self.setText(self._originalText)
return True | def function[cancelEdit, parameter[self]]:
constant[
Rejects the current edit and shows the parts widget.
]
if call[name[self]._partsWidget.isVisible, parameter[]] begin[:]
return[constant[False]]
call[name[self]._completerTree.hide, parameter[]]
call[call[call[name[self].completer, parameter[]].popup, parameter[]].hide, parameter[]]
call[name[self].setText, parameter[name[self]._originalText]]
return[constant[True]] | keyword[def] identifier[cancelEdit] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[_partsWidget] . identifier[isVisible] ()):
keyword[return] keyword[False]
identifier[self] . identifier[_completerTree] . identifier[hide] ()
identifier[self] . identifier[completer] (). identifier[popup] (). identifier[hide] ()
identifier[self] . identifier[setText] ( identifier[self] . identifier[_originalText] )
keyword[return] keyword[True] | def cancelEdit(self):
"""
Rejects the current edit and shows the parts widget.
"""
if self._partsWidget.isVisible():
return False # depends on [control=['if'], data=[]]
self._completerTree.hide()
self.completer().popup().hide()
self.setText(self._originalText)
return True |
def to_html(self, codebase):
"""
Convert this `FunctionDoc` to HTML.
"""
body = ''
for section in ('params', 'options', 'exceptions'):
val = getattr(self, section)
if val:
body += '<h5>%s</h5>\n<dl class = "%s">%s</dl>' % (
printable(section), section,
'\n'.join(param.to_html() for param in val))
body += codebase.build_see_html(self.see, 'h5', self)
return ('<a name = "%s" />\n<div class = "function">\n' +
'<h4>%s</h4>\n%s\n%s\n</div>\n') % (self.name, self.name,
htmlize_paragraphs(codebase.translate_links(self.doc, self)), body) | def function[to_html, parameter[self, codebase]]:
constant[
Convert this `FunctionDoc` to HTML.
]
variable[body] assign[=] constant[]
for taget[name[section]] in starred[tuple[[<ast.Constant object at 0x7da18f58f6a0>, <ast.Constant object at 0x7da18f58d900>, <ast.Constant object at 0x7da18f58ebf0>]]] begin[:]
variable[val] assign[=] call[name[getattr], parameter[name[self], name[section]]]
if name[val] begin[:]
<ast.AugAssign object at 0x7da18f58f370>
<ast.AugAssign object at 0x7da18f58d480>
return[binary_operation[binary_operation[constant[<a name = "%s" />
<div class = "function">
] + constant[<h4>%s</h4>
%s
%s
</div>
]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f58e590>, <ast.Attribute object at 0x7da18f58d600>, <ast.Call object at 0x7da18f58e200>, <ast.Name object at 0x7da18f58f6d0>]]]] | keyword[def] identifier[to_html] ( identifier[self] , identifier[codebase] ):
literal[string]
identifier[body] = literal[string]
keyword[for] identifier[section] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[val] = identifier[getattr] ( identifier[self] , identifier[section] )
keyword[if] identifier[val] :
identifier[body] += literal[string] %(
identifier[printable] ( identifier[section] ), identifier[section] ,
literal[string] . identifier[join] ( identifier[param] . identifier[to_html] () keyword[for] identifier[param] keyword[in] identifier[val] ))
identifier[body] += identifier[codebase] . identifier[build_see_html] ( identifier[self] . identifier[see] , literal[string] , identifier[self] )
keyword[return] ( literal[string] +
literal[string] )%( identifier[self] . identifier[name] , identifier[self] . identifier[name] ,
identifier[htmlize_paragraphs] ( identifier[codebase] . identifier[translate_links] ( identifier[self] . identifier[doc] , identifier[self] )), identifier[body] ) | def to_html(self, codebase):
"""
Convert this `FunctionDoc` to HTML.
"""
body = ''
for section in ('params', 'options', 'exceptions'):
val = getattr(self, section)
if val:
body += '<h5>%s</h5>\n<dl class = "%s">%s</dl>' % (printable(section), section, '\n'.join((param.to_html() for param in val))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['section']]
body += codebase.build_see_html(self.see, 'h5', self)
return ('<a name = "%s" />\n<div class = "function">\n' + '<h4>%s</h4>\n%s\n%s\n</div>\n') % (self.name, self.name, htmlize_paragraphs(codebase.translate_links(self.doc, self)), body) |
def show_letter(letter, text_color=None, back_color=None):
'''
Displays a single letter on the LED matrix.
letter
The letter to display
text_color
The color in which the letter is shown. Defaults to '[255, 255, 255]' (white).
back_color
The background color of the display. Defaults to '[0, 0, 0]' (black).
CLI Example:
.. code-block:: bash
salt 'raspberry' sensehat.show_letter O
salt 'raspberry' sensehat.show_letter X '[255, 0, 0]'
salt 'raspberry' sensehat.show_letter B '[0, 0, 255]' '[255, 255, 0]'
'''
text_color = text_color or [255, 255, 255]
back_color = back_color or [0, 0, 0]
_sensehat.show_letter(letter, text_color, back_color)
return {'letter': letter} | def function[show_letter, parameter[letter, text_color, back_color]]:
constant[
Displays a single letter on the LED matrix.
letter
The letter to display
text_color
The color in which the letter is shown. Defaults to '[255, 255, 255]' (white).
back_color
The background color of the display. Defaults to '[0, 0, 0]' (black).
CLI Example:
.. code-block:: bash
salt 'raspberry' sensehat.show_letter O
salt 'raspberry' sensehat.show_letter X '[255, 0, 0]'
salt 'raspberry' sensehat.show_letter B '[0, 0, 255]' '[255, 255, 0]'
]
variable[text_color] assign[=] <ast.BoolOp object at 0x7da18ede46d0>
variable[back_color] assign[=] <ast.BoolOp object at 0x7da18ede6a10>
call[name[_sensehat].show_letter, parameter[name[letter], name[text_color], name[back_color]]]
return[dictionary[[<ast.Constant object at 0x7da18ede6e90>], [<ast.Name object at 0x7da18ede5f90>]]] | keyword[def] identifier[show_letter] ( identifier[letter] , identifier[text_color] = keyword[None] , identifier[back_color] = keyword[None] ):
literal[string]
identifier[text_color] = identifier[text_color] keyword[or] [ literal[int] , literal[int] , literal[int] ]
identifier[back_color] = identifier[back_color] keyword[or] [ literal[int] , literal[int] , literal[int] ]
identifier[_sensehat] . identifier[show_letter] ( identifier[letter] , identifier[text_color] , identifier[back_color] )
keyword[return] { literal[string] : identifier[letter] } | def show_letter(letter, text_color=None, back_color=None):
"""
Displays a single letter on the LED matrix.
letter
The letter to display
text_color
The color in which the letter is shown. Defaults to '[255, 255, 255]' (white).
back_color
The background color of the display. Defaults to '[0, 0, 0]' (black).
CLI Example:
.. code-block:: bash
salt 'raspberry' sensehat.show_letter O
salt 'raspberry' sensehat.show_letter X '[255, 0, 0]'
salt 'raspberry' sensehat.show_letter B '[0, 0, 255]' '[255, 255, 0]'
"""
text_color = text_color or [255, 255, 255]
back_color = back_color or [0, 0, 0]
_sensehat.show_letter(letter, text_color, back_color)
return {'letter': letter} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.