code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def list_all_currencies(cls, **kwargs):
"""List Currencies
Return a list of Currencies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_currencies(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Currency]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_currencies_with_http_info(**kwargs)
else:
(data) = cls._list_all_currencies_with_http_info(**kwargs)
return data | def function[list_all_currencies, parameter[cls]]:
constant[List Currencies
Return a list of Currencies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_currencies(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Currency]
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._list_all_currencies_with_http_info, parameter[]]] | keyword[def] identifier[list_all_currencies] ( identifier[cls] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_list_all_currencies_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_list_all_currencies_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data] | def list_all_currencies(cls, **kwargs):
"""List Currencies
Return a list of Currencies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_currencies(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Currency]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_currencies_with_http_info(**kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._list_all_currencies_with_http_info(**kwargs)
return data |
def prt_hier_up(self, goids, prt=sys.stdout):
"""Write hierarchy for all GO IDs below GO ID in arg, goid."""
go2goterm_all = {go:self.gosubdag.go2obj[go] for go in goids}
objp = GoPaths()
items_list = []
for namespace, go2term_ns in self._get_namespace2go2term(go2goterm_all).items():
goids_all = set() # GO IDs from user-specfied GO to root
for goid_usr, goterm in go2term_ns.items():
goids_all.add(goid_usr)
paths = objp.get_paths_from_to(goterm, goid_end=None, dn0_up1=True)
goids_all.update(set(o.id for p in paths for o in p))
# Only include GO IDs from user-specified GO to the root
if 'include_only' not in self.usrdct:
self.usrdct['include_only'] = set()
self.usrdct['include_only'].update(goids_all)
# Mark the user-specfied GO term
if 'item_marks' not in self.usrdct:
self.usrdct['item_marks'] = {}
for goid_usr in go2term_ns.keys():
if goid_usr not in self.usrdct['item_marks']:
self.usrdct['item_marks'][goid_usr] = '*'
# Write the hierarchy
wrhiercfg = self._get_wrhiercfg()
obj = WrHierPrt(self.gosubdag.go2obj, self.gosubdag.go2nt, wrhiercfg, prt)
go_root = self._get_goroot(goids_all, namespace)
obj.prt_hier_rec(go_root)
items_list.extend(obj.items_list)
return items_list | def function[prt_hier_up, parameter[self, goids, prt]]:
constant[Write hierarchy for all GO IDs below GO ID in arg, goid.]
variable[go2goterm_all] assign[=] <ast.DictComp object at 0x7da1b26acfa0>
variable[objp] assign[=] call[name[GoPaths], parameter[]]
variable[items_list] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b26ad480>, <ast.Name object at 0x7da1b26aea40>]]] in starred[call[call[name[self]._get_namespace2go2term, parameter[name[go2goterm_all]]].items, parameter[]]] begin[:]
variable[goids_all] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da2054a6920>, <ast.Name object at 0x7da18bccba30>]]] in starred[call[name[go2term_ns].items, parameter[]]] begin[:]
call[name[goids_all].add, parameter[name[goid_usr]]]
variable[paths] assign[=] call[name[objp].get_paths_from_to, parameter[name[goterm]]]
call[name[goids_all].update, parameter[call[name[set], parameter[<ast.GeneratorExp object at 0x7da18bcc9c30>]]]]
if compare[constant[include_only] <ast.NotIn object at 0x7da2590d7190> name[self].usrdct] begin[:]
call[name[self].usrdct][constant[include_only]] assign[=] call[name[set], parameter[]]
call[call[name[self].usrdct][constant[include_only]].update, parameter[name[goids_all]]]
if compare[constant[item_marks] <ast.NotIn object at 0x7da2590d7190> name[self].usrdct] begin[:]
call[name[self].usrdct][constant[item_marks]] assign[=] dictionary[[], []]
for taget[name[goid_usr]] in starred[call[name[go2term_ns].keys, parameter[]]] begin[:]
if compare[name[goid_usr] <ast.NotIn object at 0x7da2590d7190> call[name[self].usrdct][constant[item_marks]]] begin[:]
call[call[name[self].usrdct][constant[item_marks]]][name[goid_usr]] assign[=] constant[*]
variable[wrhiercfg] assign[=] call[name[self]._get_wrhiercfg, parameter[]]
variable[obj] assign[=] call[name[WrHierPrt], parameter[name[self].gosubdag.go2obj, name[self].gosubdag.go2nt, name[wrhiercfg], name[prt]]]
variable[go_root] assign[=] call[name[self]._get_goroot, parameter[name[goids_all], name[namespace]]]
call[name[obj].prt_hier_rec, parameter[name[go_root]]]
call[name[items_list].extend, parameter[name[obj].items_list]]
return[name[items_list]] | keyword[def] identifier[prt_hier_up] ( identifier[self] , identifier[goids] , identifier[prt] = identifier[sys] . identifier[stdout] ):
literal[string]
identifier[go2goterm_all] ={ identifier[go] : identifier[self] . identifier[gosubdag] . identifier[go2obj] [ identifier[go] ] keyword[for] identifier[go] keyword[in] identifier[goids] }
identifier[objp] = identifier[GoPaths] ()
identifier[items_list] =[]
keyword[for] identifier[namespace] , identifier[go2term_ns] keyword[in] identifier[self] . identifier[_get_namespace2go2term] ( identifier[go2goterm_all] ). identifier[items] ():
identifier[goids_all] = identifier[set] ()
keyword[for] identifier[goid_usr] , identifier[goterm] keyword[in] identifier[go2term_ns] . identifier[items] ():
identifier[goids_all] . identifier[add] ( identifier[goid_usr] )
identifier[paths] = identifier[objp] . identifier[get_paths_from_to] ( identifier[goterm] , identifier[goid_end] = keyword[None] , identifier[dn0_up1] = keyword[True] )
identifier[goids_all] . identifier[update] ( identifier[set] ( identifier[o] . identifier[id] keyword[for] identifier[p] keyword[in] identifier[paths] keyword[for] identifier[o] keyword[in] identifier[p] ))
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[usrdct] :
identifier[self] . identifier[usrdct] [ literal[string] ]= identifier[set] ()
identifier[self] . identifier[usrdct] [ literal[string] ]. identifier[update] ( identifier[goids_all] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[usrdct] :
identifier[self] . identifier[usrdct] [ literal[string] ]={}
keyword[for] identifier[goid_usr] keyword[in] identifier[go2term_ns] . identifier[keys] ():
keyword[if] identifier[goid_usr] keyword[not] keyword[in] identifier[self] . identifier[usrdct] [ literal[string] ]:
identifier[self] . identifier[usrdct] [ literal[string] ][ identifier[goid_usr] ]= literal[string]
identifier[wrhiercfg] = identifier[self] . identifier[_get_wrhiercfg] ()
identifier[obj] = identifier[WrHierPrt] ( identifier[self] . identifier[gosubdag] . identifier[go2obj] , identifier[self] . identifier[gosubdag] . identifier[go2nt] , identifier[wrhiercfg] , identifier[prt] )
identifier[go_root] = identifier[self] . identifier[_get_goroot] ( identifier[goids_all] , identifier[namespace] )
identifier[obj] . identifier[prt_hier_rec] ( identifier[go_root] )
identifier[items_list] . identifier[extend] ( identifier[obj] . identifier[items_list] )
keyword[return] identifier[items_list] | def prt_hier_up(self, goids, prt=sys.stdout):
"""Write hierarchy for all GO IDs below GO ID in arg, goid."""
go2goterm_all = {go: self.gosubdag.go2obj[go] for go in goids}
objp = GoPaths()
items_list = []
for (namespace, go2term_ns) in self._get_namespace2go2term(go2goterm_all).items():
goids_all = set() # GO IDs from user-specfied GO to root
for (goid_usr, goterm) in go2term_ns.items():
goids_all.add(goid_usr)
paths = objp.get_paths_from_to(goterm, goid_end=None, dn0_up1=True)
goids_all.update(set((o.id for p in paths for o in p))) # depends on [control=['for'], data=[]]
# Only include GO IDs from user-specified GO to the root
if 'include_only' not in self.usrdct:
self.usrdct['include_only'] = set() # depends on [control=['if'], data=[]]
self.usrdct['include_only'].update(goids_all)
# Mark the user-specfied GO term
if 'item_marks' not in self.usrdct:
self.usrdct['item_marks'] = {} # depends on [control=['if'], data=[]]
for goid_usr in go2term_ns.keys():
if goid_usr not in self.usrdct['item_marks']:
self.usrdct['item_marks'][goid_usr] = '*' # depends on [control=['if'], data=['goid_usr']] # depends on [control=['for'], data=['goid_usr']]
# Write the hierarchy
wrhiercfg = self._get_wrhiercfg()
obj = WrHierPrt(self.gosubdag.go2obj, self.gosubdag.go2nt, wrhiercfg, prt)
go_root = self._get_goroot(goids_all, namespace)
obj.prt_hier_rec(go_root)
items_list.extend(obj.items_list) # depends on [control=['for'], data=[]]
return items_list |
def read_header(self):
""" Read next header (multiple headers in file)
Returns:
(header, data_idx) - a dictionary of keyword:value header data and
also the byte index of where the corresponding data block resides.
"""
start_idx = self.file_obj.tell()
key, val = '', ''
header_dict = {}
keep_reading = True
first_line = self.file_obj
try:
while keep_reading:
if start_idx + 80 > self.filesize:
keep_reading = False
raise EndOfFileError("End Of Data File")
line = self.file_obj.read(80)
if PYTHON3:
line = line.decode("utf-8")
# print line
if line.startswith('END'):
keep_reading = False
break
else:
key, val = line.split('=')
key, val = key.strip(), val.strip()
if "'" in val:
# Items in quotes are strings
val = str(val.strip("'").strip())
elif "." in val:
# Items with periods are floats (if not a string)
val = float(val)
else:
# Otherwise it's an integer
val = int(val)
header_dict[key] = val
except ValueError:
print("CURRENT LINE: ", line)
print("BLOCK START IDX: ", start_idx)
print("FILE SIZE: ", self.filesize)
print("NEXT 512 BYTES: \n")
print(self.file_obj.read(512))
raise
data_idx = self.file_obj.tell()
# Seek past padding if DIRECTIO is being used
if "DIRECTIO" in header_dict.keys():
if int(header_dict["DIRECTIO"]) == 1:
if data_idx % 512:
data_idx += (512 - data_idx % 512)
self.file_obj.seek(start_idx)
return header_dict, data_idx | def function[read_header, parameter[self]]:
constant[ Read next header (multiple headers in file)
Returns:
(header, data_idx) - a dictionary of keyword:value header data and
also the byte index of where the corresponding data block resides.
]
variable[start_idx] assign[=] call[name[self].file_obj.tell, parameter[]]
<ast.Tuple object at 0x7da18f09cd60> assign[=] tuple[[<ast.Constant object at 0x7da18f09edd0>, <ast.Constant object at 0x7da18f09e2c0>]]
variable[header_dict] assign[=] dictionary[[], []]
variable[keep_reading] assign[=] constant[True]
variable[first_line] assign[=] name[self].file_obj
<ast.Try object at 0x7da18f09f280>
variable[data_idx] assign[=] call[name[self].file_obj.tell, parameter[]]
if compare[constant[DIRECTIO] in call[name[header_dict].keys, parameter[]]] begin[:]
if compare[call[name[int], parameter[call[name[header_dict]][constant[DIRECTIO]]]] equal[==] constant[1]] begin[:]
if binary_operation[name[data_idx] <ast.Mod object at 0x7da2590d6920> constant[512]] begin[:]
<ast.AugAssign object at 0x7da18f812650>
call[name[self].file_obj.seek, parameter[name[start_idx]]]
return[tuple[[<ast.Name object at 0x7da18f812380>, <ast.Name object at 0x7da18f811240>]]] | keyword[def] identifier[read_header] ( identifier[self] ):
literal[string]
identifier[start_idx] = identifier[self] . identifier[file_obj] . identifier[tell] ()
identifier[key] , identifier[val] = literal[string] , literal[string]
identifier[header_dict] ={}
identifier[keep_reading] = keyword[True]
identifier[first_line] = identifier[self] . identifier[file_obj]
keyword[try] :
keyword[while] identifier[keep_reading] :
keyword[if] identifier[start_idx] + literal[int] > identifier[self] . identifier[filesize] :
identifier[keep_reading] = keyword[False]
keyword[raise] identifier[EndOfFileError] ( literal[string] )
identifier[line] = identifier[self] . identifier[file_obj] . identifier[read] ( literal[int] )
keyword[if] identifier[PYTHON3] :
identifier[line] = identifier[line] . identifier[decode] ( literal[string] )
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[keep_reading] = keyword[False]
keyword[break]
keyword[else] :
identifier[key] , identifier[val] = identifier[line] . identifier[split] ( literal[string] )
identifier[key] , identifier[val] = identifier[key] . identifier[strip] (), identifier[val] . identifier[strip] ()
keyword[if] literal[string] keyword[in] identifier[val] :
identifier[val] = identifier[str] ( identifier[val] . identifier[strip] ( literal[string] ). identifier[strip] ())
keyword[elif] literal[string] keyword[in] identifier[val] :
identifier[val] = identifier[float] ( identifier[val] )
keyword[else] :
identifier[val] = identifier[int] ( identifier[val] )
identifier[header_dict] [ identifier[key] ]= identifier[val]
keyword[except] identifier[ValueError] :
identifier[print] ( literal[string] , identifier[line] )
identifier[print] ( literal[string] , identifier[start_idx] )
identifier[print] ( literal[string] , identifier[self] . identifier[filesize] )
identifier[print] ( literal[string] )
identifier[print] ( identifier[self] . identifier[file_obj] . identifier[read] ( literal[int] ))
keyword[raise]
identifier[data_idx] = identifier[self] . identifier[file_obj] . identifier[tell] ()
keyword[if] literal[string] keyword[in] identifier[header_dict] . identifier[keys] ():
keyword[if] identifier[int] ( identifier[header_dict] [ literal[string] ])== literal[int] :
keyword[if] identifier[data_idx] % literal[int] :
identifier[data_idx] +=( literal[int] - identifier[data_idx] % literal[int] )
identifier[self] . identifier[file_obj] . identifier[seek] ( identifier[start_idx] )
keyword[return] identifier[header_dict] , identifier[data_idx] | def read_header(self):
""" Read next header (multiple headers in file)
Returns:
(header, data_idx) - a dictionary of keyword:value header data and
also the byte index of where the corresponding data block resides.
"""
start_idx = self.file_obj.tell()
(key, val) = ('', '')
header_dict = {}
keep_reading = True
first_line = self.file_obj
try:
while keep_reading:
if start_idx + 80 > self.filesize:
keep_reading = False
raise EndOfFileError('End Of Data File') # depends on [control=['if'], data=[]]
line = self.file_obj.read(80)
if PYTHON3:
line = line.decode('utf-8') # depends on [control=['if'], data=[]]
# print line
if line.startswith('END'):
keep_reading = False
break # depends on [control=['if'], data=[]]
else:
(key, val) = line.split('=')
(key, val) = (key.strip(), val.strip())
if "'" in val:
# Items in quotes are strings
val = str(val.strip("'").strip()) # depends on [control=['if'], data=['val']]
elif '.' in val:
# Items with periods are floats (if not a string)
val = float(val) # depends on [control=['if'], data=['val']]
else:
# Otherwise it's an integer
val = int(val)
header_dict[key] = val # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except ValueError:
print('CURRENT LINE: ', line)
print('BLOCK START IDX: ', start_idx)
print('FILE SIZE: ', self.filesize)
print('NEXT 512 BYTES: \n')
print(self.file_obj.read(512))
raise # depends on [control=['except'], data=[]]
data_idx = self.file_obj.tell()
# Seek past padding if DIRECTIO is being used
if 'DIRECTIO' in header_dict.keys():
if int(header_dict['DIRECTIO']) == 1:
if data_idx % 512:
data_idx += 512 - data_idx % 512 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.file_obj.seek(start_idx)
return (header_dict, data_idx) |
def __load_profile(self, profile, uuid, verbose):
"""Create a new profile when the unique identity does not have any."""
def is_empty_profile(prf):
return not (prf.name or prf.email or
prf.gender or prf.gender_acc or
prf.is_bot or prf.country_code)
uid = api.unique_identities(self.db, uuid)[0]
if profile:
self.__create_profile(profile, uuid, verbose)
elif is_empty_profile(uid.profile):
self.__create_profile_from_identities(uid.identities, uuid, verbose)
else:
self.log("-- empty profile given for %s. Not updated" % uuid, verbose) | def function[__load_profile, parameter[self, profile, uuid, verbose]]:
constant[Create a new profile when the unique identity does not have any.]
def function[is_empty_profile, parameter[prf]]:
return[<ast.UnaryOp object at 0x7da1b0e177c0>]
variable[uid] assign[=] call[call[name[api].unique_identities, parameter[name[self].db, name[uuid]]]][constant[0]]
if name[profile] begin[:]
call[name[self].__create_profile, parameter[name[profile], name[uuid], name[verbose]]] | keyword[def] identifier[__load_profile] ( identifier[self] , identifier[profile] , identifier[uuid] , identifier[verbose] ):
literal[string]
keyword[def] identifier[is_empty_profile] ( identifier[prf] ):
keyword[return] keyword[not] ( identifier[prf] . identifier[name] keyword[or] identifier[prf] . identifier[email] keyword[or]
identifier[prf] . identifier[gender] keyword[or] identifier[prf] . identifier[gender_acc] keyword[or]
identifier[prf] . identifier[is_bot] keyword[or] identifier[prf] . identifier[country_code] )
identifier[uid] = identifier[api] . identifier[unique_identities] ( identifier[self] . identifier[db] , identifier[uuid] )[ literal[int] ]
keyword[if] identifier[profile] :
identifier[self] . identifier[__create_profile] ( identifier[profile] , identifier[uuid] , identifier[verbose] )
keyword[elif] identifier[is_empty_profile] ( identifier[uid] . identifier[profile] ):
identifier[self] . identifier[__create_profile_from_identities] ( identifier[uid] . identifier[identities] , identifier[uuid] , identifier[verbose] )
keyword[else] :
identifier[self] . identifier[log] ( literal[string] % identifier[uuid] , identifier[verbose] ) | def __load_profile(self, profile, uuid, verbose):
"""Create a new profile when the unique identity does not have any."""
def is_empty_profile(prf):
return not (prf.name or prf.email or prf.gender or prf.gender_acc or prf.is_bot or prf.country_code)
uid = api.unique_identities(self.db, uuid)[0]
if profile:
self.__create_profile(profile, uuid, verbose) # depends on [control=['if'], data=[]]
elif is_empty_profile(uid.profile):
self.__create_profile_from_identities(uid.identities, uuid, verbose) # depends on [control=['if'], data=[]]
else:
self.log('-- empty profile given for %s. Not updated' % uuid, verbose) |
def get_changes_since(self, change_number, app_changes=True, package_changes=False):
"""Get changes since a change number
:param change_number: change number to use as stating point
:type change_number: :class:`int`
:param app_changes: whether to inclued app changes
:type app_changes: :class:`bool`
:param package_changes: whether to inclued package changes
:type package_changes: :class:`bool`
:return: `CMsgClientPICSChangesSinceResponse <https://github.com/ValvePython/steam/blob/39627fe883feeed2206016bacd92cf0e4580ead6/protobufs/steammessages_clientserver.proto#L1171-L1191>`_
:rtype: proto message instance, or :class:`None` on timeout
"""
return self.send_job_and_wait(MsgProto(EMsg.ClientPICSChangesSinceRequest),
{
'since_change_number': change_number,
'send_app_info_changes': app_changes,
'send_package_info_changes': package_changes,
},
timeout=15
) | def function[get_changes_since, parameter[self, change_number, app_changes, package_changes]]:
constant[Get changes since a change number
:param change_number: change number to use as stating point
:type change_number: :class:`int`
:param app_changes: whether to inclued app changes
:type app_changes: :class:`bool`
:param package_changes: whether to inclued package changes
:type package_changes: :class:`bool`
:return: `CMsgClientPICSChangesSinceResponse <https://github.com/ValvePython/steam/blob/39627fe883feeed2206016bacd92cf0e4580ead6/protobufs/steammessages_clientserver.proto#L1171-L1191>`_
:rtype: proto message instance, or :class:`None` on timeout
]
return[call[name[self].send_job_and_wait, parameter[call[name[MsgProto], parameter[name[EMsg].ClientPICSChangesSinceRequest]], dictionary[[<ast.Constant object at 0x7da1b23838b0>, <ast.Constant object at 0x7da1b2381690>, <ast.Constant object at 0x7da1b2381660>], [<ast.Name object at 0x7da1b2381720>, <ast.Name object at 0x7da1b2383880>, <ast.Name object at 0x7da1b2381e40>]]]]] | keyword[def] identifier[get_changes_since] ( identifier[self] , identifier[change_number] , identifier[app_changes] = keyword[True] , identifier[package_changes] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[send_job_and_wait] ( identifier[MsgProto] ( identifier[EMsg] . identifier[ClientPICSChangesSinceRequest] ),
{
literal[string] : identifier[change_number] ,
literal[string] : identifier[app_changes] ,
literal[string] : identifier[package_changes] ,
},
identifier[timeout] = literal[int]
) | def get_changes_since(self, change_number, app_changes=True, package_changes=False):
"""Get changes since a change number
:param change_number: change number to use as stating point
:type change_number: :class:`int`
:param app_changes: whether to inclued app changes
:type app_changes: :class:`bool`
:param package_changes: whether to inclued package changes
:type package_changes: :class:`bool`
:return: `CMsgClientPICSChangesSinceResponse <https://github.com/ValvePython/steam/blob/39627fe883feeed2206016bacd92cf0e4580ead6/protobufs/steammessages_clientserver.proto#L1171-L1191>`_
:rtype: proto message instance, or :class:`None` on timeout
"""
return self.send_job_and_wait(MsgProto(EMsg.ClientPICSChangesSinceRequest), {'since_change_number': change_number, 'send_app_info_changes': app_changes, 'send_package_info_changes': package_changes}, timeout=15) |
def observe(self, amount):
"""Observe the given amount."""
self._sum.inc(amount)
for i, bound in enumerate(self._upper_bounds):
if amount <= bound:
self._buckets[i].inc(1)
break | def function[observe, parameter[self, amount]]:
constant[Observe the given amount.]
call[name[self]._sum.inc, parameter[name[amount]]]
for taget[tuple[[<ast.Name object at 0x7da1b21e3490>, <ast.Name object at 0x7da1b21e0af0>]]] in starred[call[name[enumerate], parameter[name[self]._upper_bounds]]] begin[:]
if compare[name[amount] less_or_equal[<=] name[bound]] begin[:]
call[call[name[self]._buckets][name[i]].inc, parameter[constant[1]]]
break | keyword[def] identifier[observe] ( identifier[self] , identifier[amount] ):
literal[string]
identifier[self] . identifier[_sum] . identifier[inc] ( identifier[amount] )
keyword[for] identifier[i] , identifier[bound] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_upper_bounds] ):
keyword[if] identifier[amount] <= identifier[bound] :
identifier[self] . identifier[_buckets] [ identifier[i] ]. identifier[inc] ( literal[int] )
keyword[break] | def observe(self, amount):
"""Observe the given amount."""
self._sum.inc(amount)
for (i, bound) in enumerate(self._upper_bounds):
if amount <= bound:
self._buckets[i].inc(1)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def getAttr(self, node, name, nsuri=None, default=join):
"""Return the value of the attribute named 'name' with the
optional nsuri, or the default if one is specified. If
nsuri is not specified, an attribute that matches the
given name will be returned regardless of namespace."""
if nsuri is None:
result = node._attrs.get(name, None)
if result is None:
for item in node._attrsNS.keys():
if item[1] == name:
result = node._attrsNS[item]
break
else:
result = node._attrsNS.get((nsuri, name), None)
if result is not None:
return result.value
if default is not join:
return default
return '' | def function[getAttr, parameter[self, node, name, nsuri, default]]:
constant[Return the value of the attribute named 'name' with the
optional nsuri, or the default if one is specified. If
nsuri is not specified, an attribute that matches the
given name will be returned regardless of namespace.]
if compare[name[nsuri] is constant[None]] begin[:]
variable[result] assign[=] call[name[node]._attrs.get, parameter[name[name], constant[None]]]
if compare[name[result] is constant[None]] begin[:]
for taget[name[item]] in starred[call[name[node]._attrsNS.keys, parameter[]]] begin[:]
if compare[call[name[item]][constant[1]] equal[==] name[name]] begin[:]
variable[result] assign[=] call[name[node]._attrsNS][name[item]]
break
if compare[name[result] is_not constant[None]] begin[:]
return[name[result].value]
if compare[name[default] is_not name[join]] begin[:]
return[name[default]]
return[constant[]] | keyword[def] identifier[getAttr] ( identifier[self] , identifier[node] , identifier[name] , identifier[nsuri] = keyword[None] , identifier[default] = identifier[join] ):
literal[string]
keyword[if] identifier[nsuri] keyword[is] keyword[None] :
identifier[result] = identifier[node] . identifier[_attrs] . identifier[get] ( identifier[name] , keyword[None] )
keyword[if] identifier[result] keyword[is] keyword[None] :
keyword[for] identifier[item] keyword[in] identifier[node] . identifier[_attrsNS] . identifier[keys] ():
keyword[if] identifier[item] [ literal[int] ]== identifier[name] :
identifier[result] = identifier[node] . identifier[_attrsNS] [ identifier[item] ]
keyword[break]
keyword[else] :
identifier[result] = identifier[node] . identifier[_attrsNS] . identifier[get] (( identifier[nsuri] , identifier[name] ), keyword[None] )
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[result] . identifier[value]
keyword[if] identifier[default] keyword[is] keyword[not] identifier[join] :
keyword[return] identifier[default]
keyword[return] literal[string] | def getAttr(self, node, name, nsuri=None, default=join):
"""Return the value of the attribute named 'name' with the
optional nsuri, or the default if one is specified. If
nsuri is not specified, an attribute that matches the
given name will be returned regardless of namespace."""
if nsuri is None:
result = node._attrs.get(name, None)
if result is None:
for item in node._attrsNS.keys():
if item[1] == name:
result = node._attrsNS[item]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=['result']] # depends on [control=['if'], data=[]]
else:
result = node._attrsNS.get((nsuri, name), None)
if result is not None:
return result.value # depends on [control=['if'], data=['result']]
if default is not join:
return default # depends on [control=['if'], data=['default']]
return '' |
def GeneralGuinierPorod(q, factor, *args, **kwargs):
"""Empirical generalized multi-part Guinier-Porod scattering
Inputs:
-------
``q``: independent variable
``factor``: factor for the first branch
other arguments (*args): the defining arguments of the consecutive
parts: radius of gyration (``Rg``) and dimensionality
parameter (``s``) for Guinier and exponent (``alpha``) for
power-law parts.
supported keyword arguments:
``startswithguinier``: True if the first segment is a Guinier-type
scattering (this is the default) or False if it is a power-law
Formula:
--------
The intensity is a piecewise function with continuous first derivatives.
The separating points in ``q`` between the consecutive parts and the
intensity factors of them (except the first) are determined from
conditions of smoothness (continuity of the function and its first
derivative) at the border points of the intervals. Guinier-type
(``G*q**(3-s)*exp(-q^2*Rg1^2/s)``) and Power-law type (``A*q^alpha``)
parts follow each other in alternating sequence. The exact number of
parts is determined from the number of positional arguments (*args).
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719.
"""
if kwargs.get('startswithguinier', True):
funcs = [lambda q, A = factor:GeneralGuinier(q, A, args[0], args[1])]
i = 2
guiniernext = False
else:
funcs = [lambda q, A = factor: Powerlaw(q, A, args[0])]
i = 1
guiniernext = True
indices = np.ones_like(q, dtype=np.bool)
constraints = []
while i < len(args):
if guiniernext:
# args[i] is a radius of gyration, args[i+1] is a dimensionality parameter, args[i-1] is a power-law exponent
qsep = _PGgen_qsep(args[i - 1], args[i], args[i + 1])
factor = _PGgen_G(args[i - 1], args[i], args[i + 1], factor)
funcs.append(lambda q, G=factor, Rg=args[i], s=args[i + 1]: GeneralGuinier(q, G, Rg, s))
guiniernext = False
i += 2
else:
# args[i] is an exponent, args[i-2] is a radius of gyration, args[i-1] is a dimensionality parameter
qsep = _PGgen_qsep(args[i], args[i - 2], args[i - 1])
factor = _PGgen_A(args[i], args[i - 2], args[i - 1], factor)
funcs.append(lambda q, a=factor, alpha=args[i]: a * q ** alpha)
guiniernext = True
i += 1
# this belongs to the previous
constraints.append(indices & (q < qsep))
indices[q < qsep] = False
constraints.append(indices)
return np.piecewise(q, constraints, funcs) | def function[GeneralGuinierPorod, parameter[q, factor]]:
constant[Empirical generalized multi-part Guinier-Porod scattering
Inputs:
-------
``q``: independent variable
``factor``: factor for the first branch
other arguments (*args): the defining arguments of the consecutive
parts: radius of gyration (``Rg``) and dimensionality
parameter (``s``) for Guinier and exponent (``alpha``) for
power-law parts.
supported keyword arguments:
``startswithguinier``: True if the first segment is a Guinier-type
scattering (this is the default) or False if it is a power-law
Formula:
--------
The intensity is a piecewise function with continuous first derivatives.
The separating points in ``q`` between the consecutive parts and the
intensity factors of them (except the first) are determined from
conditions of smoothness (continuity of the function and its first
derivative) at the border points of the intervals. Guinier-type
(``G*q**(3-s)*exp(-q^2*Rg1^2/s)``) and Power-law type (``A*q^alpha``)
parts follow each other in alternating sequence. The exact number of
parts is determined from the number of positional arguments (*args).
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719.
]
if call[name[kwargs].get, parameter[constant[startswithguinier], constant[True]]] begin[:]
variable[funcs] assign[=] list[[<ast.Lambda object at 0x7da1b10e5de0>]]
variable[i] assign[=] constant[2]
variable[guiniernext] assign[=] constant[False]
variable[indices] assign[=] call[name[np].ones_like, parameter[name[q]]]
variable[constraints] assign[=] list[[]]
while compare[name[i] less[<] call[name[len], parameter[name[args]]]] begin[:]
if name[guiniernext] begin[:]
variable[qsep] assign[=] call[name[_PGgen_qsep], parameter[call[name[args]][binary_operation[name[i] - constant[1]]], call[name[args]][name[i]], call[name[args]][binary_operation[name[i] + constant[1]]]]]
variable[factor] assign[=] call[name[_PGgen_G], parameter[call[name[args]][binary_operation[name[i] - constant[1]]], call[name[args]][name[i]], call[name[args]][binary_operation[name[i] + constant[1]]], name[factor]]]
call[name[funcs].append, parameter[<ast.Lambda object at 0x7da20c76f910>]]
variable[guiniernext] assign[=] constant[False]
<ast.AugAssign object at 0x7da20c76ceb0>
call[name[constraints].append, parameter[binary_operation[name[indices] <ast.BitAnd object at 0x7da2590d6b60> compare[name[q] less[<] name[qsep]]]]]
call[name[indices]][compare[name[q] less[<] name[qsep]]] assign[=] constant[False]
call[name[constraints].append, parameter[name[indices]]]
return[call[name[np].piecewise, parameter[name[q], name[constraints], name[funcs]]]] | keyword[def] identifier[GeneralGuinierPorod] ( identifier[q] , identifier[factor] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[True] ):
identifier[funcs] =[ keyword[lambda] identifier[q] , identifier[A] = identifier[factor] : identifier[GeneralGuinier] ( identifier[q] , identifier[A] , identifier[args] [ literal[int] ], identifier[args] [ literal[int] ])]
identifier[i] = literal[int]
identifier[guiniernext] = keyword[False]
keyword[else] :
identifier[funcs] =[ keyword[lambda] identifier[q] , identifier[A] = identifier[factor] : identifier[Powerlaw] ( identifier[q] , identifier[A] , identifier[args] [ literal[int] ])]
identifier[i] = literal[int]
identifier[guiniernext] = keyword[True]
identifier[indices] = identifier[np] . identifier[ones_like] ( identifier[q] , identifier[dtype] = identifier[np] . identifier[bool] )
identifier[constraints] =[]
keyword[while] identifier[i] < identifier[len] ( identifier[args] ):
keyword[if] identifier[guiniernext] :
identifier[qsep] = identifier[_PGgen_qsep] ( identifier[args] [ identifier[i] - literal[int] ], identifier[args] [ identifier[i] ], identifier[args] [ identifier[i] + literal[int] ])
identifier[factor] = identifier[_PGgen_G] ( identifier[args] [ identifier[i] - literal[int] ], identifier[args] [ identifier[i] ], identifier[args] [ identifier[i] + literal[int] ], identifier[factor] )
identifier[funcs] . identifier[append] ( keyword[lambda] identifier[q] , identifier[G] = identifier[factor] , identifier[Rg] = identifier[args] [ identifier[i] ], identifier[s] = identifier[args] [ identifier[i] + literal[int] ]: identifier[GeneralGuinier] ( identifier[q] , identifier[G] , identifier[Rg] , identifier[s] ))
identifier[guiniernext] = keyword[False]
identifier[i] += literal[int]
keyword[else] :
identifier[qsep] = identifier[_PGgen_qsep] ( identifier[args] [ identifier[i] ], identifier[args] [ identifier[i] - literal[int] ], identifier[args] [ identifier[i] - literal[int] ])
identifier[factor] = identifier[_PGgen_A] ( identifier[args] [ identifier[i] ], identifier[args] [ identifier[i] - literal[int] ], identifier[args] [ identifier[i] - literal[int] ], identifier[factor] )
identifier[funcs] . identifier[append] ( keyword[lambda] identifier[q] , identifier[a] = identifier[factor] , identifier[alpha] = identifier[args] [ identifier[i] ]: identifier[a] * identifier[q] ** identifier[alpha] )
identifier[guiniernext] = keyword[True]
identifier[i] += literal[int]
identifier[constraints] . identifier[append] ( identifier[indices] &( identifier[q] < identifier[qsep] ))
identifier[indices] [ identifier[q] < identifier[qsep] ]= keyword[False]
identifier[constraints] . identifier[append] ( identifier[indices] )
keyword[return] identifier[np] . identifier[piecewise] ( identifier[q] , identifier[constraints] , identifier[funcs] ) | def GeneralGuinierPorod(q, factor, *args, **kwargs):
"""Empirical generalized multi-part Guinier-Porod scattering
Inputs:
-------
``q``: independent variable
``factor``: factor for the first branch
other arguments (*args): the defining arguments of the consecutive
parts: radius of gyration (``Rg``) and dimensionality
parameter (``s``) for Guinier and exponent (``alpha``) for
power-law parts.
supported keyword arguments:
``startswithguinier``: True if the first segment is a Guinier-type
scattering (this is the default) or False if it is a power-law
Formula:
--------
The intensity is a piecewise function with continuous first derivatives.
The separating points in ``q`` between the consecutive parts and the
intensity factors of them (except the first) are determined from
conditions of smoothness (continuity of the function and its first
derivative) at the border points of the intervals. Guinier-type
(``G*q**(3-s)*exp(-q^2*Rg1^2/s)``) and Power-law type (``A*q^alpha``)
parts follow each other in alternating sequence. The exact number of
parts is determined from the number of positional arguments (*args).
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719.
"""
if kwargs.get('startswithguinier', True):
funcs = [lambda q, A=factor: GeneralGuinier(q, A, args[0], args[1])]
i = 2
guiniernext = False # depends on [control=['if'], data=[]]
else:
funcs = [lambda q, A=factor: Powerlaw(q, A, args[0])]
i = 1
guiniernext = True
indices = np.ones_like(q, dtype=np.bool)
constraints = []
while i < len(args):
if guiniernext:
# args[i] is a radius of gyration, args[i+1] is a dimensionality parameter, args[i-1] is a power-law exponent
qsep = _PGgen_qsep(args[i - 1], args[i], args[i + 1])
factor = _PGgen_G(args[i - 1], args[i], args[i + 1], factor)
funcs.append(lambda q, G=factor, Rg=args[i], s=args[i + 1]: GeneralGuinier(q, G, Rg, s))
guiniernext = False
i += 2 # depends on [control=['if'], data=[]]
else:
# args[i] is an exponent, args[i-2] is a radius of gyration, args[i-1] is a dimensionality parameter
qsep = _PGgen_qsep(args[i], args[i - 2], args[i - 1])
factor = _PGgen_A(args[i], args[i - 2], args[i - 1], factor)
funcs.append(lambda q, a=factor, alpha=args[i]: a * q ** alpha)
guiniernext = True
i += 1
# this belongs to the previous
constraints.append(indices & (q < qsep))
indices[q < qsep] = False # depends on [control=['while'], data=['i']]
constraints.append(indices)
return np.piecewise(q, constraints, funcs) |
def listscripts(self):
"""List available scripts.
See MANAGESIEVE specifications, section 2.7
:returns: a 2-uple (active script, [script1, ...])
"""
code, data, listing = self.__send_command(
"LISTSCRIPTS", withcontent=True)
if code == "NO":
return None
ret = []
active_script = None
for l in listing.splitlines():
if self.__size_expr.match(l):
continue
m = re.match(br'"([^"]+)"\s*(.+)', l)
if m is None:
ret += [l.strip(b'"').decode("utf-8")]
continue
script = m.group(1).decode("utf-8")
if self.__active_expr.match(m.group(2)):
active_script = script
continue
ret += [script]
self.__dprint(ret)
return (active_script, ret) | def function[listscripts, parameter[self]]:
constant[List available scripts.
See MANAGESIEVE specifications, section 2.7
:returns: a 2-uple (active script, [script1, ...])
]
<ast.Tuple object at 0x7da2041db880> assign[=] call[name[self].__send_command, parameter[constant[LISTSCRIPTS]]]
if compare[name[code] equal[==] constant[NO]] begin[:]
return[constant[None]]
variable[ret] assign[=] list[[]]
variable[active_script] assign[=] constant[None]
for taget[name[l]] in starred[call[name[listing].splitlines, parameter[]]] begin[:]
if call[name[self].__size_expr.match, parameter[name[l]]] begin[:]
continue
variable[m] assign[=] call[name[re].match, parameter[constant[b'"([^"]+)"\\s*(.+)'], name[l]]]
if compare[name[m] is constant[None]] begin[:]
<ast.AugAssign object at 0x7da2041d97b0>
continue
variable[script] assign[=] call[call[name[m].group, parameter[constant[1]]].decode, parameter[constant[utf-8]]]
if call[name[self].__active_expr.match, parameter[call[name[m].group, parameter[constant[2]]]]] begin[:]
variable[active_script] assign[=] name[script]
continue
<ast.AugAssign object at 0x7da2041dadd0>
call[name[self].__dprint, parameter[name[ret]]]
return[tuple[[<ast.Name object at 0x7da2041da1d0>, <ast.Name object at 0x7da2041dbfa0>]]] | keyword[def] identifier[listscripts] ( identifier[self] ):
literal[string]
identifier[code] , identifier[data] , identifier[listing] = identifier[self] . identifier[__send_command] (
literal[string] , identifier[withcontent] = keyword[True] )
keyword[if] identifier[code] == literal[string] :
keyword[return] keyword[None]
identifier[ret] =[]
identifier[active_script] = keyword[None]
keyword[for] identifier[l] keyword[in] identifier[listing] . identifier[splitlines] ():
keyword[if] identifier[self] . identifier[__size_expr] . identifier[match] ( identifier[l] ):
keyword[continue]
identifier[m] = identifier[re] . identifier[match] ( literal[string] , identifier[l] )
keyword[if] identifier[m] keyword[is] keyword[None] :
identifier[ret] +=[ identifier[l] . identifier[strip] ( literal[string] ). identifier[decode] ( literal[string] )]
keyword[continue]
identifier[script] = identifier[m] . identifier[group] ( literal[int] ). identifier[decode] ( literal[string] )
keyword[if] identifier[self] . identifier[__active_expr] . identifier[match] ( identifier[m] . identifier[group] ( literal[int] )):
identifier[active_script] = identifier[script]
keyword[continue]
identifier[ret] +=[ identifier[script] ]
identifier[self] . identifier[__dprint] ( identifier[ret] )
keyword[return] ( identifier[active_script] , identifier[ret] ) | def listscripts(self):
"""List available scripts.
See MANAGESIEVE specifications, section 2.7
:returns: a 2-uple (active script, [script1, ...])
"""
(code, data, listing) = self.__send_command('LISTSCRIPTS', withcontent=True)
if code == 'NO':
return None # depends on [control=['if'], data=[]]
ret = []
active_script = None
for l in listing.splitlines():
if self.__size_expr.match(l):
continue # depends on [control=['if'], data=[]]
m = re.match(b'"([^"]+)"\\s*(.+)', l)
if m is None:
ret += [l.strip(b'"').decode('utf-8')]
continue # depends on [control=['if'], data=[]]
script = m.group(1).decode('utf-8')
if self.__active_expr.match(m.group(2)):
active_script = script
continue # depends on [control=['if'], data=[]]
ret += [script] # depends on [control=['for'], data=['l']]
self.__dprint(ret)
return (active_script, ret) |
def get_dilation_rates(hparams, width):
"""Get a list of valid dilation rates.
Args:
hparams: HParams.
width: spatial dimension. Ensures that the effective filter size is
not larger than the spatial dimension.
Returns:
allowed_dilations: A list of dilation rates.
"""
# dil_rate=1 means no dilation.
allowed_dilations = [[1]*5]
apply_dilations = hparams.get("latent_apply_dilations", False)
dilation_rates = hparams.get("latent_dilation_rates", [1, 3])
if apply_dilations:
for rate in dilation_rates:
# k + (k - 1) * rate but k is harcoded to be 3 everywhere.
filter_size = 3 + 2 * rate
if filter_size <= width:
curr_dilation = [1, 1, rate+1, rate+1, 1]
allowed_dilations.append(curr_dilation)
return allowed_dilations | def function[get_dilation_rates, parameter[hparams, width]]:
constant[Get a list of valid dilation rates.
Args:
hparams: HParams.
width: spatial dimension. Ensures that the effective filter size is
not larger than the spatial dimension.
Returns:
allowed_dilations: A list of dilation rates.
]
variable[allowed_dilations] assign[=] list[[<ast.BinOp object at 0x7da1b2088c10>]]
variable[apply_dilations] assign[=] call[name[hparams].get, parameter[constant[latent_apply_dilations], constant[False]]]
variable[dilation_rates] assign[=] call[name[hparams].get, parameter[constant[latent_dilation_rates], list[[<ast.Constant object at 0x7da1b2089a20>, <ast.Constant object at 0x7da1b208bb50>]]]]
if name[apply_dilations] begin[:]
for taget[name[rate]] in starred[name[dilation_rates]] begin[:]
variable[filter_size] assign[=] binary_operation[constant[3] + binary_operation[constant[2] * name[rate]]]
if compare[name[filter_size] less_or_equal[<=] name[width]] begin[:]
variable[curr_dilation] assign[=] list[[<ast.Constant object at 0x7da1b208bb20>, <ast.Constant object at 0x7da1b2089c00>, <ast.BinOp object at 0x7da1b208b520>, <ast.BinOp object at 0x7da1b20880d0>, <ast.Constant object at 0x7da1b20895a0>]]
call[name[allowed_dilations].append, parameter[name[curr_dilation]]]
return[name[allowed_dilations]] | keyword[def] identifier[get_dilation_rates] ( identifier[hparams] , identifier[width] ):
literal[string]
identifier[allowed_dilations] =[[ literal[int] ]* literal[int] ]
identifier[apply_dilations] = identifier[hparams] . identifier[get] ( literal[string] , keyword[False] )
identifier[dilation_rates] = identifier[hparams] . identifier[get] ( literal[string] ,[ literal[int] , literal[int] ])
keyword[if] identifier[apply_dilations] :
keyword[for] identifier[rate] keyword[in] identifier[dilation_rates] :
identifier[filter_size] = literal[int] + literal[int] * identifier[rate]
keyword[if] identifier[filter_size] <= identifier[width] :
identifier[curr_dilation] =[ literal[int] , literal[int] , identifier[rate] + literal[int] , identifier[rate] + literal[int] , literal[int] ]
identifier[allowed_dilations] . identifier[append] ( identifier[curr_dilation] )
keyword[return] identifier[allowed_dilations] | def get_dilation_rates(hparams, width):
"""Get a list of valid dilation rates.
Args:
hparams: HParams.
width: spatial dimension. Ensures that the effective filter size is
not larger than the spatial dimension.
Returns:
allowed_dilations: A list of dilation rates.
"""
# dil_rate=1 means no dilation.
allowed_dilations = [[1] * 5]
apply_dilations = hparams.get('latent_apply_dilations', False)
dilation_rates = hparams.get('latent_dilation_rates', [1, 3])
if apply_dilations:
for rate in dilation_rates:
# k + (k - 1) * rate but k is harcoded to be 3 everywhere.
filter_size = 3 + 2 * rate
if filter_size <= width:
curr_dilation = [1, 1, rate + 1, rate + 1, 1]
allowed_dilations.append(curr_dilation) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rate']] # depends on [control=['if'], data=[]]
return allowed_dilations |
def sleep(self, seconds=0.05, dt=0.01):
"""
A "smooth" version of time.sleep(): waits for the time to pass but
processes events every dt as well.
Note this requires that the object is either a window or embedded
somewhere within a window.
"""
t0 = _t.time()
while _t.time()-t0 < seconds:
# Pause a bit to avoid heavy CPU
_t.sleep(dt)
# process events
self.process_events() | def function[sleep, parameter[self, seconds, dt]]:
constant[
A "smooth" version of time.sleep(): waits for the time to pass but
processes events every dt as well.
Note this requires that the object is either a window or embedded
somewhere within a window.
]
variable[t0] assign[=] call[name[_t].time, parameter[]]
while compare[binary_operation[call[name[_t].time, parameter[]] - name[t0]] less[<] name[seconds]] begin[:]
call[name[_t].sleep, parameter[name[dt]]]
call[name[self].process_events, parameter[]] | keyword[def] identifier[sleep] ( identifier[self] , identifier[seconds] = literal[int] , identifier[dt] = literal[int] ):
literal[string]
identifier[t0] = identifier[_t] . identifier[time] ()
keyword[while] identifier[_t] . identifier[time] ()- identifier[t0] < identifier[seconds] :
identifier[_t] . identifier[sleep] ( identifier[dt] )
identifier[self] . identifier[process_events] () | def sleep(self, seconds=0.05, dt=0.01):
"""
A "smooth" version of time.sleep(): waits for the time to pass but
processes events every dt as well.
Note this requires that the object is either a window or embedded
somewhere within a window.
"""
t0 = _t.time()
while _t.time() - t0 < seconds:
# Pause a bit to avoid heavy CPU
_t.sleep(dt)
# process events
self.process_events() # depends on [control=['while'], data=[]] |
def _figure_data(self, plot, fmt='html', doc=None, as_script=False, **kwargs):
"""
Given a plot instance, an output format and an optional bokeh
document, return the corresponding data. If as_script is True,
the content will be split in an HTML and a JS component.
"""
model = plot.state
if doc is None:
doc = plot.document
else:
plot.document = doc
for m in model.references():
m._document = None
doc.theme = self.theme
doc.add_root(model)
# Bokeh raises warnings about duplicate tools and empty subplots
# but at the holoviews level these are not issues
logger = logging.getLogger(bokeh.core.validation.check.__file__)
logger.disabled = True
if fmt == 'png':
from bokeh.io.export import get_screenshot_as_png
img = get_screenshot_as_png(plot.state, None)
imgByteArr = BytesIO()
img.save(imgByteArr, format='PNG')
data = imgByteArr.getvalue()
if as_script:
b64 = base64.b64encode(data).decode("utf-8")
(mime_type, tag) = MIME_TYPES[fmt], HTML_TAGS[fmt]
src = HTML_TAGS['base64'].format(mime_type=mime_type, b64=b64)
div = tag.format(src=src, mime_type=mime_type, css='')
js = ''
else:
try:
with silence_warnings(EMPTY_LAYOUT, MISSING_RENDERERS):
js, div, _ = notebook_content(model)
html = NOTEBOOK_DIV.format(plot_script=js, plot_div=div)
data = encode_utf8(html)
doc.hold()
except:
logger.disabled = False
raise
logger.disabled = False
plot.document = doc
if as_script:
return div, js
return data | def function[_figure_data, parameter[self, plot, fmt, doc, as_script]]:
constant[
Given a plot instance, an output format and an optional bokeh
document, return the corresponding data. If as_script is True,
the content will be split in an HTML and a JS component.
]
variable[model] assign[=] name[plot].state
if compare[name[doc] is constant[None]] begin[:]
variable[doc] assign[=] name[plot].document
for taget[name[m]] in starred[call[name[model].references, parameter[]]] begin[:]
name[m]._document assign[=] constant[None]
name[doc].theme assign[=] name[self].theme
call[name[doc].add_root, parameter[name[model]]]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[bokeh].core.validation.check.__file__]]
name[logger].disabled assign[=] constant[True]
if compare[name[fmt] equal[==] constant[png]] begin[:]
from relative_module[bokeh.io.export] import module[get_screenshot_as_png]
variable[img] assign[=] call[name[get_screenshot_as_png], parameter[name[plot].state, constant[None]]]
variable[imgByteArr] assign[=] call[name[BytesIO], parameter[]]
call[name[img].save, parameter[name[imgByteArr]]]
variable[data] assign[=] call[name[imgByteArr].getvalue, parameter[]]
if name[as_script] begin[:]
variable[b64] assign[=] call[call[name[base64].b64encode, parameter[name[data]]].decode, parameter[constant[utf-8]]]
<ast.Tuple object at 0x7da18f00ee30> assign[=] tuple[[<ast.Subscript object at 0x7da18f00e560>, <ast.Subscript object at 0x7da18f00e110>]]
variable[src] assign[=] call[call[name[HTML_TAGS]][constant[base64]].format, parameter[]]
variable[div] assign[=] call[name[tag].format, parameter[]]
variable[js] assign[=] constant[]
name[plot].document assign[=] name[doc]
if name[as_script] begin[:]
return[tuple[[<ast.Name object at 0x7da18dc07fd0>, <ast.Name object at 0x7da18dc04b20>]]]
return[name[data]] | keyword[def] identifier[_figure_data] ( identifier[self] , identifier[plot] , identifier[fmt] = literal[string] , identifier[doc] = keyword[None] , identifier[as_script] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[model] = identifier[plot] . identifier[state]
keyword[if] identifier[doc] keyword[is] keyword[None] :
identifier[doc] = identifier[plot] . identifier[document]
keyword[else] :
identifier[plot] . identifier[document] = identifier[doc]
keyword[for] identifier[m] keyword[in] identifier[model] . identifier[references] ():
identifier[m] . identifier[_document] = keyword[None]
identifier[doc] . identifier[theme] = identifier[self] . identifier[theme]
identifier[doc] . identifier[add_root] ( identifier[model] )
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[bokeh] . identifier[core] . identifier[validation] . identifier[check] . identifier[__file__] )
identifier[logger] . identifier[disabled] = keyword[True]
keyword[if] identifier[fmt] == literal[string] :
keyword[from] identifier[bokeh] . identifier[io] . identifier[export] keyword[import] identifier[get_screenshot_as_png]
identifier[img] = identifier[get_screenshot_as_png] ( identifier[plot] . identifier[state] , keyword[None] )
identifier[imgByteArr] = identifier[BytesIO] ()
identifier[img] . identifier[save] ( identifier[imgByteArr] , identifier[format] = literal[string] )
identifier[data] = identifier[imgByteArr] . identifier[getvalue] ()
keyword[if] identifier[as_script] :
identifier[b64] = identifier[base64] . identifier[b64encode] ( identifier[data] ). identifier[decode] ( literal[string] )
( identifier[mime_type] , identifier[tag] )= identifier[MIME_TYPES] [ identifier[fmt] ], identifier[HTML_TAGS] [ identifier[fmt] ]
identifier[src] = identifier[HTML_TAGS] [ literal[string] ]. identifier[format] ( identifier[mime_type] = identifier[mime_type] , identifier[b64] = identifier[b64] )
identifier[div] = identifier[tag] . identifier[format] ( identifier[src] = identifier[src] , identifier[mime_type] = identifier[mime_type] , identifier[css] = literal[string] )
identifier[js] = literal[string]
keyword[else] :
keyword[try] :
keyword[with] identifier[silence_warnings] ( identifier[EMPTY_LAYOUT] , identifier[MISSING_RENDERERS] ):
identifier[js] , identifier[div] , identifier[_] = identifier[notebook_content] ( identifier[model] )
identifier[html] = identifier[NOTEBOOK_DIV] . identifier[format] ( identifier[plot_script] = identifier[js] , identifier[plot_div] = identifier[div] )
identifier[data] = identifier[encode_utf8] ( identifier[html] )
identifier[doc] . identifier[hold] ()
keyword[except] :
identifier[logger] . identifier[disabled] = keyword[False]
keyword[raise]
identifier[logger] . identifier[disabled] = keyword[False]
identifier[plot] . identifier[document] = identifier[doc]
keyword[if] identifier[as_script] :
keyword[return] identifier[div] , identifier[js]
keyword[return] identifier[data] | def _figure_data(self, plot, fmt='html', doc=None, as_script=False, **kwargs):
"""
Given a plot instance, an output format and an optional bokeh
document, return the corresponding data. If as_script is True,
the content will be split in an HTML and a JS component.
"""
model = plot.state
if doc is None:
doc = plot.document # depends on [control=['if'], data=['doc']]
else:
plot.document = doc
for m in model.references():
m._document = None # depends on [control=['for'], data=['m']]
doc.theme = self.theme
doc.add_root(model)
# Bokeh raises warnings about duplicate tools and empty subplots
# but at the holoviews level these are not issues
logger = logging.getLogger(bokeh.core.validation.check.__file__)
logger.disabled = True
if fmt == 'png':
from bokeh.io.export import get_screenshot_as_png
img = get_screenshot_as_png(plot.state, None)
imgByteArr = BytesIO()
img.save(imgByteArr, format='PNG')
data = imgByteArr.getvalue()
if as_script:
b64 = base64.b64encode(data).decode('utf-8')
(mime_type, tag) = (MIME_TYPES[fmt], HTML_TAGS[fmt])
src = HTML_TAGS['base64'].format(mime_type=mime_type, b64=b64)
div = tag.format(src=src, mime_type=mime_type, css='')
js = '' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['fmt']]
else:
try:
with silence_warnings(EMPTY_LAYOUT, MISSING_RENDERERS):
(js, div, _) = notebook_content(model) # depends on [control=['with'], data=[]]
html = NOTEBOOK_DIV.format(plot_script=js, plot_div=div)
data = encode_utf8(html)
doc.hold() # depends on [control=['try'], data=[]]
except:
logger.disabled = False
raise # depends on [control=['except'], data=[]]
logger.disabled = False
plot.document = doc
if as_script:
return (div, js) # depends on [control=['if'], data=[]]
return data |
def previous_active_pane(self):
"""
The previous active :class:`.Pane` or `None` if unknown.
"""
p = self._prev_active_pane and self._prev_active_pane()
# Only return when this pane actually still exists in the current
# window.
if p and p in self.panes:
return p | def function[previous_active_pane, parameter[self]]:
constant[
The previous active :class:`.Pane` or `None` if unknown.
]
variable[p] assign[=] <ast.BoolOp object at 0x7da20c6ab490>
if <ast.BoolOp object at 0x7da20c6a9d80> begin[:]
return[name[p]] | keyword[def] identifier[previous_active_pane] ( identifier[self] ):
literal[string]
identifier[p] = identifier[self] . identifier[_prev_active_pane] keyword[and] identifier[self] . identifier[_prev_active_pane] ()
keyword[if] identifier[p] keyword[and] identifier[p] keyword[in] identifier[self] . identifier[panes] :
keyword[return] identifier[p] | def previous_active_pane(self):
"""
The previous active :class:`.Pane` or `None` if unknown.
"""
p = self._prev_active_pane and self._prev_active_pane()
# Only return when this pane actually still exists in the current
# window.
if p and p in self.panes:
return p # depends on [control=['if'], data=[]] |
def nwise(iterable, n):
"""
Iterate through a sequence with a defined length window
>>> list(nwise(range(8), 3))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (5, 6, 7)]
>>> list(nwise(range(3), 5))
[]
Parameters
----------
iterable
n : length of each sequence
Yields
------
Tuples of length n
"""
iters = itertools.tee(iterable, n)
iters = (itertools.islice(it, i, None) for i, it in enumerate(iters))
return itertools.izip(*iters) | def function[nwise, parameter[iterable, n]]:
constant[
Iterate through a sequence with a defined length window
>>> list(nwise(range(8), 3))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (5, 6, 7)]
>>> list(nwise(range(3), 5))
[]
Parameters
----------
iterable
n : length of each sequence
Yields
------
Tuples of length n
]
variable[iters] assign[=] call[name[itertools].tee, parameter[name[iterable], name[n]]]
variable[iters] assign[=] <ast.GeneratorExp object at 0x7da1b0417f10>
return[call[name[itertools].izip, parameter[<ast.Starred object at 0x7da1b0415600>]]] | keyword[def] identifier[nwise] ( identifier[iterable] , identifier[n] ):
literal[string]
identifier[iters] = identifier[itertools] . identifier[tee] ( identifier[iterable] , identifier[n] )
identifier[iters] =( identifier[itertools] . identifier[islice] ( identifier[it] , identifier[i] , keyword[None] ) keyword[for] identifier[i] , identifier[it] keyword[in] identifier[enumerate] ( identifier[iters] ))
keyword[return] identifier[itertools] . identifier[izip] (* identifier[iters] ) | def nwise(iterable, n):
"""
Iterate through a sequence with a defined length window
>>> list(nwise(range(8), 3))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (5, 6, 7)]
>>> list(nwise(range(3), 5))
[]
Parameters
----------
iterable
n : length of each sequence
Yields
------
Tuples of length n
"""
iters = itertools.tee(iterable, n)
iters = (itertools.islice(it, i, None) for (i, it) in enumerate(iters))
return itertools.izip(*iters) |
def __substitute_replace_pairs(self):
"""
Substitutes all replace pairs in the source of the stored routine.
"""
self._set_magic_constants()
routine_source = []
i = 0
for line in self._routine_source_code_lines:
self._replace['__LINE__'] = "'%d'" % (i + 1)
for search, replace in self._replace.items():
tmp = re.findall(search, line, re.IGNORECASE)
if tmp:
line = line.replace(tmp[0], replace)
routine_source.append(line)
i += 1
self._routine_source_code = "\n".join(routine_source) | def function[__substitute_replace_pairs, parameter[self]]:
constant[
Substitutes all replace pairs in the source of the stored routine.
]
call[name[self]._set_magic_constants, parameter[]]
variable[routine_source] assign[=] list[[]]
variable[i] assign[=] constant[0]
for taget[name[line]] in starred[name[self]._routine_source_code_lines] begin[:]
call[name[self]._replace][constant[__LINE__]] assign[=] binary_operation[constant['%d'] <ast.Mod object at 0x7da2590d6920> binary_operation[name[i] + constant[1]]]
for taget[tuple[[<ast.Name object at 0x7da1b18ba6e0>, <ast.Name object at 0x7da1b18ba140>]]] in starred[call[name[self]._replace.items, parameter[]]] begin[:]
variable[tmp] assign[=] call[name[re].findall, parameter[name[search], name[line], name[re].IGNORECASE]]
if name[tmp] begin[:]
variable[line] assign[=] call[name[line].replace, parameter[call[name[tmp]][constant[0]], name[replace]]]
call[name[routine_source].append, parameter[name[line]]]
<ast.AugAssign object at 0x7da18bccb940>
name[self]._routine_source_code assign[=] call[constant[
].join, parameter[name[routine_source]]] | keyword[def] identifier[__substitute_replace_pairs] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_set_magic_constants] ()
identifier[routine_source] =[]
identifier[i] = literal[int]
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[_routine_source_code_lines] :
identifier[self] . identifier[_replace] [ literal[string] ]= literal[string] %( identifier[i] + literal[int] )
keyword[for] identifier[search] , identifier[replace] keyword[in] identifier[self] . identifier[_replace] . identifier[items] ():
identifier[tmp] = identifier[re] . identifier[findall] ( identifier[search] , identifier[line] , identifier[re] . identifier[IGNORECASE] )
keyword[if] identifier[tmp] :
identifier[line] = identifier[line] . identifier[replace] ( identifier[tmp] [ literal[int] ], identifier[replace] )
identifier[routine_source] . identifier[append] ( identifier[line] )
identifier[i] += literal[int]
identifier[self] . identifier[_routine_source_code] = literal[string] . identifier[join] ( identifier[routine_source] ) | def __substitute_replace_pairs(self):
"""
Substitutes all replace pairs in the source of the stored routine.
"""
self._set_magic_constants()
routine_source = []
i = 0
for line in self._routine_source_code_lines:
self._replace['__LINE__'] = "'%d'" % (i + 1)
for (search, replace) in self._replace.items():
tmp = re.findall(search, line, re.IGNORECASE)
if tmp:
line = line.replace(tmp[0], replace) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
routine_source.append(line)
i += 1 # depends on [control=['for'], data=['line']]
self._routine_source_code = '\n'.join(routine_source) |
def filter(self, record):
'''Returns True for all records if running in the CLI, else returns
True.
When running inside XBMC it calls the xbmc.log() method and prevents
the message from being double printed to STDOUT.
'''
# When running in XBMC, any logged statements will be double printed
# since we are calling xbmc.log() explicitly. Therefore we return False
# so every log message is filtered out and not printed again.
if CLI_MODE:
return True
else:
# Must not be imported until here because of import order issues
# when running in CLI
from xbmcswift2 import xbmc
xbmc_level = XBMCFilter.xbmc_levels.get(
XBMCFilter.python_to_xbmc.get(record.levelname))
xbmc.log('%s%s' % (self.prefix, record.getMessage()), xbmc_level)
return False | def function[filter, parameter[self, record]]:
constant[Returns True for all records if running in the CLI, else returns
True.
When running inside XBMC it calls the xbmc.log() method and prevents
the message from being double printed to STDOUT.
]
if name[CLI_MODE] begin[:]
return[constant[True]] | keyword[def] identifier[filter] ( identifier[self] , identifier[record] ):
literal[string]
keyword[if] identifier[CLI_MODE] :
keyword[return] keyword[True]
keyword[else] :
keyword[from] identifier[xbmcswift2] keyword[import] identifier[xbmc]
identifier[xbmc_level] = identifier[XBMCFilter] . identifier[xbmc_levels] . identifier[get] (
identifier[XBMCFilter] . identifier[python_to_xbmc] . identifier[get] ( identifier[record] . identifier[levelname] ))
identifier[xbmc] . identifier[log] ( literal[string] %( identifier[self] . identifier[prefix] , identifier[record] . identifier[getMessage] ()), identifier[xbmc_level] )
keyword[return] keyword[False] | def filter(self, record):
"""Returns True for all records if running in the CLI, else returns
True.
When running inside XBMC it calls the xbmc.log() method and prevents
the message from being double printed to STDOUT.
"""
# When running in XBMC, any logged statements will be double printed
# since we are calling xbmc.log() explicitly. Therefore we return False
# so every log message is filtered out and not printed again.
if CLI_MODE:
return True # depends on [control=['if'], data=[]]
else:
# Must not be imported until here because of import order issues
# when running in CLI
from xbmcswift2 import xbmc
xbmc_level = XBMCFilter.xbmc_levels.get(XBMCFilter.python_to_xbmc.get(record.levelname))
xbmc.log('%s%s' % (self.prefix, record.getMessage()), xbmc_level)
return False |
def get_user_list(host_name, client_name, client_pass):
"""
Pulls the list of users in a client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - user_id_list: A python list of user ids.
"""
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="getusrs",
values="whr=*")
# Make request.
request_result = send_request(host_name, request)
# Extract a python list from xml object.
user_id_list = list()
append_user_id = user_id_list.append
if request_result is not None:
user_list_xml = request_result.text
tree = etree.parse(StringIO(user_list_xml))
root = tree.getroot()
xml_rows = root.findall("./result/row/usr")
for xml_row in xml_rows:
append_user_id(xml_row.text)
return user_id_list | def function[get_user_list, parameter[host_name, client_name, client_pass]]:
constant[
Pulls the list of users in a client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - user_id_list: A python list of user ids.
]
variable[request] assign[=] call[name[construct_request], parameter[]]
variable[request_result] assign[=] call[name[send_request], parameter[name[host_name], name[request]]]
variable[user_id_list] assign[=] call[name[list], parameter[]]
variable[append_user_id] assign[=] name[user_id_list].append
if compare[name[request_result] is_not constant[None]] begin[:]
variable[user_list_xml] assign[=] name[request_result].text
variable[tree] assign[=] call[name[etree].parse, parameter[call[name[StringIO], parameter[name[user_list_xml]]]]]
variable[root] assign[=] call[name[tree].getroot, parameter[]]
variable[xml_rows] assign[=] call[name[root].findall, parameter[constant[./result/row/usr]]]
for taget[name[xml_row]] in starred[name[xml_rows]] begin[:]
call[name[append_user_id], parameter[name[xml_row].text]]
return[name[user_id_list]] | keyword[def] identifier[get_user_list] ( identifier[host_name] , identifier[client_name] , identifier[client_pass] ):
literal[string]
identifier[request] = identifier[construct_request] ( identifier[model_type] = literal[string] ,
identifier[client_name] = identifier[client_name] ,
identifier[client_pass] = identifier[client_pass] ,
identifier[command] = literal[string] ,
identifier[values] = literal[string] )
identifier[request_result] = identifier[send_request] ( identifier[host_name] , identifier[request] )
identifier[user_id_list] = identifier[list] ()
identifier[append_user_id] = identifier[user_id_list] . identifier[append]
keyword[if] identifier[request_result] keyword[is] keyword[not] keyword[None] :
identifier[user_list_xml] = identifier[request_result] . identifier[text]
identifier[tree] = identifier[etree] . identifier[parse] ( identifier[StringIO] ( identifier[user_list_xml] ))
identifier[root] = identifier[tree] . identifier[getroot] ()
identifier[xml_rows] = identifier[root] . identifier[findall] ( literal[string] )
keyword[for] identifier[xml_row] keyword[in] identifier[xml_rows] :
identifier[append_user_id] ( identifier[xml_row] . identifier[text] )
keyword[return] identifier[user_id_list] | def get_user_list(host_name, client_name, client_pass):
"""
Pulls the list of users in a client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - user_id_list: A python list of user ids.
"""
# Construct request.
request = construct_request(model_type='pers', client_name=client_name, client_pass=client_pass, command='getusrs', values='whr=*')
# Make request.
request_result = send_request(host_name, request)
# Extract a python list from xml object.
user_id_list = list()
append_user_id = user_id_list.append
if request_result is not None:
user_list_xml = request_result.text
tree = etree.parse(StringIO(user_list_xml))
root = tree.getroot()
xml_rows = root.findall('./result/row/usr')
for xml_row in xml_rows:
append_user_id(xml_row.text) # depends on [control=['for'], data=['xml_row']] # depends on [control=['if'], data=['request_result']]
return user_id_list |
def update_tcs_table(self):
"""
Periodically update a table of info from the TCS.
Only works at GTC
"""
g = get_root(self).globals
if not g.cpars['tcs_on'] or not g.cpars['telins_name'].lower() == 'gtc':
self.after(60000, self.update_tcs_table)
return
try:
tel_server = tcs.get_telescope_server()
telpars = tel_server.getTelescopeParams()
add_gtc_header_table_row(self.tcs_table, telpars)
except Exception as err:
g.clog.warn('Could not update table of TCS info')
# schedule next call for 60s later
self.after(60000, self.update_tcs_table) | def function[update_tcs_table, parameter[self]]:
constant[
Periodically update a table of info from the TCS.
Only works at GTC
]
variable[g] assign[=] call[name[get_root], parameter[name[self]]].globals
if <ast.BoolOp object at 0x7da20e954040> begin[:]
call[name[self].after, parameter[constant[60000], name[self].update_tcs_table]]
return[None]
<ast.Try object at 0x7da20e957b80>
call[name[self].after, parameter[constant[60000], name[self].update_tcs_table]] | keyword[def] identifier[update_tcs_table] ( identifier[self] ):
literal[string]
identifier[g] = identifier[get_root] ( identifier[self] ). identifier[globals]
keyword[if] keyword[not] identifier[g] . identifier[cpars] [ literal[string] ] keyword[or] keyword[not] identifier[g] . identifier[cpars] [ literal[string] ]. identifier[lower] ()== literal[string] :
identifier[self] . identifier[after] ( literal[int] , identifier[self] . identifier[update_tcs_table] )
keyword[return]
keyword[try] :
identifier[tel_server] = identifier[tcs] . identifier[get_telescope_server] ()
identifier[telpars] = identifier[tel_server] . identifier[getTelescopeParams] ()
identifier[add_gtc_header_table_row] ( identifier[self] . identifier[tcs_table] , identifier[telpars] )
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[g] . identifier[clog] . identifier[warn] ( literal[string] )
identifier[self] . identifier[after] ( literal[int] , identifier[self] . identifier[update_tcs_table] ) | def update_tcs_table(self):
"""
Periodically update a table of info from the TCS.
Only works at GTC
"""
g = get_root(self).globals
if not g.cpars['tcs_on'] or not g.cpars['telins_name'].lower() == 'gtc':
self.after(60000, self.update_tcs_table)
return # depends on [control=['if'], data=[]]
try:
tel_server = tcs.get_telescope_server()
telpars = tel_server.getTelescopeParams()
add_gtc_header_table_row(self.tcs_table, telpars) # depends on [control=['try'], data=[]]
except Exception as err:
g.clog.warn('Could not update table of TCS info') # depends on [control=['except'], data=[]]
# schedule next call for 60s later
self.after(60000, self.update_tcs_table) |
async def issuer_revoke_credential(wallet_handle: int,
blob_storage_reader_handle: int,
rev_reg_id: str,
cred_revoc_id: str) -> str:
"""
Revoke a credential identified by a cred_revoc_id (returned by issuer_create_credential).
The corresponding credential definition and revocation registry must be already
created an stored into the wallet.
This call returns revoc registry delta as json file intended to be shared as REVOC_REG_ENTRY transaction.
Note that it is possible to accumulate deltas to reduce ledger load.
:param wallet_handle: wallet handler (created by open_wallet).
:param blob_storage_reader_handle: pre-configured blob storage reader instance handle that will allow
to read revocation tails
:param rev_reg_id: id of revocation registry stored in wallet
:param cred_revoc_id: local id for revocation info
:return: Revocation registry delta json with a revoked credential.
"""
logger = logging.getLogger(__name__)
logger.debug(
"issuer_revoke_credential: >>> wallet_handle: %r, blob_storage_reader_handle: %r, rev_reg_id: %r, "
"cred_revoc_id: %r",
wallet_handle,
blob_storage_reader_handle,
rev_reg_id,
cred_revoc_id)
if not hasattr(issuer_revoke_credential, "cb"):
logger.debug("issuer_revoke_credential: Creating callback")
issuer_revoke_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle)
c_rev_reg_id = c_char_p(rev_reg_id.encode('utf-8'))
c_cred_revoc_id = c_char_p(cred_revoc_id.encode('utf-8'))
revoc_reg_delta_json = await do_call('indy_issuer_revoke_credential',
c_wallet_handle,
c_blob_storage_reader_handle,
c_rev_reg_id,
c_cred_revoc_id,
issuer_revoke_credential.cb)
res = revoc_reg_delta_json.decode()
logger.debug("issuer_revoke_credential: <<< res: %r", res)
return res | <ast.AsyncFunctionDef object at 0x7da18dc9bd90> | keyword[async] keyword[def] identifier[issuer_revoke_credential] ( identifier[wallet_handle] : identifier[int] ,
identifier[blob_storage_reader_handle] : identifier[int] ,
identifier[rev_reg_id] : identifier[str] ,
identifier[cred_revoc_id] : identifier[str] )-> identifier[str] :
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[debug] (
literal[string]
literal[string] ,
identifier[wallet_handle] ,
identifier[blob_storage_reader_handle] ,
identifier[rev_reg_id] ,
identifier[cred_revoc_id] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[issuer_revoke_credential] , literal[string] ):
identifier[logger] . identifier[debug] ( literal[string] )
identifier[issuer_revoke_credential] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_int32] , identifier[c_int32] , identifier[c_char_p] ))
identifier[c_wallet_handle] = identifier[c_int32] ( identifier[wallet_handle] )
identifier[c_blob_storage_reader_handle] = identifier[c_int32] ( identifier[blob_storage_reader_handle] )
identifier[c_rev_reg_id] = identifier[c_char_p] ( identifier[rev_reg_id] . identifier[encode] ( literal[string] ))
identifier[c_cred_revoc_id] = identifier[c_char_p] ( identifier[cred_revoc_id] . identifier[encode] ( literal[string] ))
identifier[revoc_reg_delta_json] = keyword[await] identifier[do_call] ( literal[string] ,
identifier[c_wallet_handle] ,
identifier[c_blob_storage_reader_handle] ,
identifier[c_rev_reg_id] ,
identifier[c_cred_revoc_id] ,
identifier[issuer_revoke_credential] . identifier[cb] )
identifier[res] = identifier[revoc_reg_delta_json] . identifier[decode] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[res] )
keyword[return] identifier[res] | async def issuer_revoke_credential(wallet_handle: int, blob_storage_reader_handle: int, rev_reg_id: str, cred_revoc_id: str) -> str:
"""
Revoke a credential identified by a cred_revoc_id (returned by issuer_create_credential).
The corresponding credential definition and revocation registry must be already
created an stored into the wallet.
This call returns revoc registry delta as json file intended to be shared as REVOC_REG_ENTRY transaction.
Note that it is possible to accumulate deltas to reduce ledger load.
:param wallet_handle: wallet handler (created by open_wallet).
:param blob_storage_reader_handle: pre-configured blob storage reader instance handle that will allow
to read revocation tails
:param rev_reg_id: id of revocation registry stored in wallet
:param cred_revoc_id: local id for revocation info
:return: Revocation registry delta json with a revoked credential.
"""
logger = logging.getLogger(__name__)
logger.debug('issuer_revoke_credential: >>> wallet_handle: %r, blob_storage_reader_handle: %r, rev_reg_id: %r, cred_revoc_id: %r', wallet_handle, blob_storage_reader_handle, rev_reg_id, cred_revoc_id)
if not hasattr(issuer_revoke_credential, 'cb'):
logger.debug('issuer_revoke_credential: Creating callback')
issuer_revoke_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) # depends on [control=['if'], data=[]]
c_wallet_handle = c_int32(wallet_handle)
c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle)
c_rev_reg_id = c_char_p(rev_reg_id.encode('utf-8'))
c_cred_revoc_id = c_char_p(cred_revoc_id.encode('utf-8'))
revoc_reg_delta_json = await do_call('indy_issuer_revoke_credential', c_wallet_handle, c_blob_storage_reader_handle, c_rev_reg_id, c_cred_revoc_id, issuer_revoke_credential.cb)
res = revoc_reg_delta_json.decode()
logger.debug('issuer_revoke_credential: <<< res: %r', res)
return res |
def send_reset_password_email(person):
"""Sends an email to user allowing them to set their password."""
uid = urlsafe_base64_encode(force_bytes(person.pk)).decode("ascii")
token = default_token_generator.make_token(person)
url = '%s/persons/reset/%s/%s/' % (
settings.REGISTRATION_BASE_URL, uid, token)
context = CONTEXT.copy()
context.update({
'url': url,
'receiver': person,
})
to_email = person.email
subject, body = render_email('reset_password', context)
send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email]) | def function[send_reset_password_email, parameter[person]]:
constant[Sends an email to user allowing them to set their password.]
variable[uid] assign[=] call[call[name[urlsafe_base64_encode], parameter[call[name[force_bytes], parameter[name[person].pk]]]].decode, parameter[constant[ascii]]]
variable[token] assign[=] call[name[default_token_generator].make_token, parameter[name[person]]]
variable[url] assign[=] binary_operation[constant[%s/persons/reset/%s/%s/] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18ede42b0>, <ast.Name object at 0x7da18ede72b0>, <ast.Name object at 0x7da18ede62f0>]]]
variable[context] assign[=] call[name[CONTEXT].copy, parameter[]]
call[name[context].update, parameter[dictionary[[<ast.Constant object at 0x7da18ede7a90>, <ast.Constant object at 0x7da18ede5450>], [<ast.Name object at 0x7da18ede5ff0>, <ast.Name object at 0x7da18ede4190>]]]]
variable[to_email] assign[=] name[person].email
<ast.Tuple object at 0x7da18ede5c30> assign[=] call[name[render_email], parameter[constant[reset_password], name[context]]]
call[name[send_mail], parameter[name[subject], name[body], name[settings].ACCOUNTS_EMAIL, list[[<ast.Name object at 0x7da18ede5b40>]]]] | keyword[def] identifier[send_reset_password_email] ( identifier[person] ):
literal[string]
identifier[uid] = identifier[urlsafe_base64_encode] ( identifier[force_bytes] ( identifier[person] . identifier[pk] )). identifier[decode] ( literal[string] )
identifier[token] = identifier[default_token_generator] . identifier[make_token] ( identifier[person] )
identifier[url] = literal[string] %(
identifier[settings] . identifier[REGISTRATION_BASE_URL] , identifier[uid] , identifier[token] )
identifier[context] = identifier[CONTEXT] . identifier[copy] ()
identifier[context] . identifier[update] ({
literal[string] : identifier[url] ,
literal[string] : identifier[person] ,
})
identifier[to_email] = identifier[person] . identifier[email]
identifier[subject] , identifier[body] = identifier[render_email] ( literal[string] , identifier[context] )
identifier[send_mail] ( identifier[subject] , identifier[body] , identifier[settings] . identifier[ACCOUNTS_EMAIL] ,[ identifier[to_email] ]) | def send_reset_password_email(person):
"""Sends an email to user allowing them to set their password."""
uid = urlsafe_base64_encode(force_bytes(person.pk)).decode('ascii')
token = default_token_generator.make_token(person)
url = '%s/persons/reset/%s/%s/' % (settings.REGISTRATION_BASE_URL, uid, token)
context = CONTEXT.copy()
context.update({'url': url, 'receiver': person})
to_email = person.email
(subject, body) = render_email('reset_password', context)
send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email]) |
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# Create "Cluster definition criteria" entity
if isinstance(self.connectivity, int):
if self.connectivity == 6:
self.connectivity = NIDM_VOXEL6CONNECTED
elif self.connectivity == 18:
self.connectivity = NIDM_VOXEL18CONNECTED
elif self.connectivity == 26:
self.connectivity = NIDM_VOXEL26CONNECTED
# FIXME if connectivity is missing
if self.connectivity is not None:
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_HAS_CONNECTIVITY_CRITERION, self.connectivity))
else:
atts = (
(PROV['type'], NIDM_CLUSTER_DEFINITION_CRITERIA),
(PROV['label'], label))
self.add_attributes(atts) | def function[export, parameter[self, nidm_version, export_dir]]:
constant[
Create prov entities and activities.
]
if call[name[isinstance], parameter[name[self].connectivity, name[int]]] begin[:]
if compare[name[self].connectivity equal[==] constant[6]] begin[:]
name[self].connectivity assign[=] name[NIDM_VOXEL6CONNECTED]
if compare[name[self].connectivity is_not constant[None]] begin[:]
variable[atts] assign[=] tuple[[<ast.Tuple object at 0x7da1b0b591b0>, <ast.Tuple object at 0x7da1b0b5a170>, <ast.Tuple object at 0x7da1b0b59a80>]]
call[name[self].add_attributes, parameter[name[atts]]] | keyword[def] identifier[export] ( identifier[self] , identifier[nidm_version] , identifier[export_dir] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[connectivity] , identifier[int] ):
keyword[if] identifier[self] . identifier[connectivity] == literal[int] :
identifier[self] . identifier[connectivity] = identifier[NIDM_VOXEL6CONNECTED]
keyword[elif] identifier[self] . identifier[connectivity] == literal[int] :
identifier[self] . identifier[connectivity] = identifier[NIDM_VOXEL18CONNECTED]
keyword[elif] identifier[self] . identifier[connectivity] == literal[int] :
identifier[self] . identifier[connectivity] = identifier[NIDM_VOXEL26CONNECTED]
keyword[if] identifier[self] . identifier[connectivity] keyword[is] keyword[not] keyword[None] :
identifier[atts] =(
( identifier[PROV] [ literal[string] ], identifier[self] . identifier[type] ),
( identifier[PROV] [ literal[string] ], identifier[self] . identifier[label] ),
( identifier[NIDM_HAS_CONNECTIVITY_CRITERION] , identifier[self] . identifier[connectivity] ))
keyword[else] :
identifier[atts] =(
( identifier[PROV] [ literal[string] ], identifier[NIDM_CLUSTER_DEFINITION_CRITERIA] ),
( identifier[PROV] [ literal[string] ], identifier[label] ))
identifier[self] . identifier[add_attributes] ( identifier[atts] ) | def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# Create "Cluster definition criteria" entity
if isinstance(self.connectivity, int):
if self.connectivity == 6:
self.connectivity = NIDM_VOXEL6CONNECTED # depends on [control=['if'], data=[]]
elif self.connectivity == 18:
self.connectivity = NIDM_VOXEL18CONNECTED # depends on [control=['if'], data=[]]
elif self.connectivity == 26:
self.connectivity = NIDM_VOXEL26CONNECTED # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# FIXME if connectivity is missing
if self.connectivity is not None:
atts = ((PROV['type'], self.type), (PROV['label'], self.label), (NIDM_HAS_CONNECTIVITY_CRITERION, self.connectivity)) # depends on [control=['if'], data=[]]
else:
atts = ((PROV['type'], NIDM_CLUSTER_DEFINITION_CRITERIA), (PROV['label'], label))
self.add_attributes(atts) |
def child_context(self, *args, **kwargs):
"""
Context setup first in child process, before returning from start() call in parent.
Result is passed in as argument of update
:return:
"""
# Now we can extract config values
expected_args = {
'services': [],
'topics': [], # bwcompat
'subscribers': [],
'publishers': [],
'params': [],
# TODO : all of them !
}
ifargs = {
arg: self.config_handler.config.get(arg.upper(), default) for arg, default in expected_args.items()
}
# overriding with kwargs
ifargs.update(kwargs)
# storing passed args in config in case of reset
# calling setup on child context enter call
if self.interface is None:
#for BW compat
# TODO : change API to use the child_context from pyzmp coprocess
self.setup(*args, **ifargs)
with super(PyrosBase, self).child_context(*args, **kwargs) as cctxt:
yield cctxt | def function[child_context, parameter[self]]:
constant[
Context setup first in child process, before returning from start() call in parent.
Result is passed in as argument of update
:return:
]
variable[expected_args] assign[=] dictionary[[<ast.Constant object at 0x7da20c7cbc40>, <ast.Constant object at 0x7da20c7cabc0>, <ast.Constant object at 0x7da20c7c87f0>, <ast.Constant object at 0x7da20c7ca5c0>, <ast.Constant object at 0x7da20c7cbee0>], [<ast.List object at 0x7da20c7cb3a0>, <ast.List object at 0x7da20c7ca1a0>, <ast.List object at 0x7da20c7cb790>, <ast.List object at 0x7da20c7cba00>, <ast.List object at 0x7da20c7ca110>]]
variable[ifargs] assign[=] <ast.DictComp object at 0x7da20c7c9270>
call[name[ifargs].update, parameter[name[kwargs]]]
if compare[name[self].interface is constant[None]] begin[:]
call[name[self].setup, parameter[<ast.Starred object at 0x7da20c7ca2c0>]]
with call[call[name[super], parameter[name[PyrosBase], name[self]]].child_context, parameter[<ast.Starred object at 0x7da18eb558d0>]] begin[:]
<ast.Yield object at 0x7da18eb56ad0> | keyword[def] identifier[child_context] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[expected_args] ={
literal[string] :[],
literal[string] :[],
literal[string] :[],
literal[string] :[],
literal[string] :[],
}
identifier[ifargs] ={
identifier[arg] : identifier[self] . identifier[config_handler] . identifier[config] . identifier[get] ( identifier[arg] . identifier[upper] (), identifier[default] ) keyword[for] identifier[arg] , identifier[default] keyword[in] identifier[expected_args] . identifier[items] ()
}
identifier[ifargs] . identifier[update] ( identifier[kwargs] )
keyword[if] identifier[self] . identifier[interface] keyword[is] keyword[None] :
identifier[self] . identifier[setup] (* identifier[args] ,** identifier[ifargs] )
keyword[with] identifier[super] ( identifier[PyrosBase] , identifier[self] ). identifier[child_context] (* identifier[args] ,** identifier[kwargs] ) keyword[as] identifier[cctxt] :
keyword[yield] identifier[cctxt] | def child_context(self, *args, **kwargs):
"""
Context setup first in child process, before returning from start() call in parent.
Result is passed in as argument of update
:return:
"""
# Now we can extract config values
# bwcompat
# TODO : all of them !
expected_args = {'services': [], 'topics': [], 'subscribers': [], 'publishers': [], 'params': []}
ifargs = {arg: self.config_handler.config.get(arg.upper(), default) for (arg, default) in expected_args.items()}
# overriding with kwargs
ifargs.update(kwargs)
# storing passed args in config in case of reset
# calling setup on child context enter call
if self.interface is None:
#for BW compat
# TODO : change API to use the child_context from pyzmp coprocess
self.setup(*args, **ifargs) # depends on [control=['if'], data=[]]
with super(PyrosBase, self).child_context(*args, **kwargs) as cctxt:
yield cctxt # depends on [control=['with'], data=['cctxt']] |
def minimize(self, model, session=None, var_list=None, feed_dict=None,
maxiter=1000, initialize=False, anchor=True, step_callback=None, **kwargs):
"""
Minimizes objective function of the model.
:param model: GPflow model with objective tensor.
:param session: Session where optimization will be run.
:param var_list: List of extra variables which should be trained during optimization.
:param feed_dict: Feed dictionary of tensors passed to session run method.
:param maxiter: Number of run interation.
:param initialize: If `True` model parameters will be re-initialized even if they were
initialized before for gotten session.
:param anchor: If `True` trained variable values computed during optimization at
particular session will be synchronized with internal parameter values.
:param step_callback: A callback function to execute at each optimization step.
The callback should accept variable argument list, where first argument is
optimization step number.
:type step_callback: Callable[[], None]
:param kwargs: This is a dictionary of extra parameters for session run method.
"""
if model is None or not isinstance(model, Model):
raise ValueError('The `model` argument must be a GPflow model.')
opt = self.make_optimize_action(model,
session=session,
var_list=var_list,
feed_dict=feed_dict, **kwargs)
self._model = opt.model
self._minimize_operation = opt.optimizer_tensor
session = model.enquire_session(session)
with session.as_default():
for step in range(maxiter):
opt()
if step_callback is not None:
step_callback(step)
if anchor:
opt.model.anchor(session) | def function[minimize, parameter[self, model, session, var_list, feed_dict, maxiter, initialize, anchor, step_callback]]:
constant[
Minimizes objective function of the model.
:param model: GPflow model with objective tensor.
:param session: Session where optimization will be run.
:param var_list: List of extra variables which should be trained during optimization.
:param feed_dict: Feed dictionary of tensors passed to session run method.
:param maxiter: Number of run interation.
:param initialize: If `True` model parameters will be re-initialized even if they were
initialized before for gotten session.
:param anchor: If `True` trained variable values computed during optimization at
particular session will be synchronized with internal parameter values.
:param step_callback: A callback function to execute at each optimization step.
The callback should accept variable argument list, where first argument is
optimization step number.
:type step_callback: Callable[[], None]
:param kwargs: This is a dictionary of extra parameters for session run method.
]
if <ast.BoolOp object at 0x7da1b21253c0> begin[:]
<ast.Raise object at 0x7da1b2125c30>
variable[opt] assign[=] call[name[self].make_optimize_action, parameter[name[model]]]
name[self]._model assign[=] name[opt].model
name[self]._minimize_operation assign[=] name[opt].optimizer_tensor
variable[session] assign[=] call[name[model].enquire_session, parameter[name[session]]]
with call[name[session].as_default, parameter[]] begin[:]
for taget[name[step]] in starred[call[name[range], parameter[name[maxiter]]]] begin[:]
call[name[opt], parameter[]]
if compare[name[step_callback] is_not constant[None]] begin[:]
call[name[step_callback], parameter[name[step]]]
if name[anchor] begin[:]
call[name[opt].model.anchor, parameter[name[session]]] | keyword[def] identifier[minimize] ( identifier[self] , identifier[model] , identifier[session] = keyword[None] , identifier[var_list] = keyword[None] , identifier[feed_dict] = keyword[None] ,
identifier[maxiter] = literal[int] , identifier[initialize] = keyword[False] , identifier[anchor] = keyword[True] , identifier[step_callback] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[model] keyword[is] keyword[None] keyword[or] keyword[not] identifier[isinstance] ( identifier[model] , identifier[Model] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[opt] = identifier[self] . identifier[make_optimize_action] ( identifier[model] ,
identifier[session] = identifier[session] ,
identifier[var_list] = identifier[var_list] ,
identifier[feed_dict] = identifier[feed_dict] ,** identifier[kwargs] )
identifier[self] . identifier[_model] = identifier[opt] . identifier[model]
identifier[self] . identifier[_minimize_operation] = identifier[opt] . identifier[optimizer_tensor]
identifier[session] = identifier[model] . identifier[enquire_session] ( identifier[session] )
keyword[with] identifier[session] . identifier[as_default] ():
keyword[for] identifier[step] keyword[in] identifier[range] ( identifier[maxiter] ):
identifier[opt] ()
keyword[if] identifier[step_callback] keyword[is] keyword[not] keyword[None] :
identifier[step_callback] ( identifier[step] )
keyword[if] identifier[anchor] :
identifier[opt] . identifier[model] . identifier[anchor] ( identifier[session] ) | def minimize(self, model, session=None, var_list=None, feed_dict=None, maxiter=1000, initialize=False, anchor=True, step_callback=None, **kwargs):
"""
Minimizes objective function of the model.
:param model: GPflow model with objective tensor.
:param session: Session where optimization will be run.
:param var_list: List of extra variables which should be trained during optimization.
:param feed_dict: Feed dictionary of tensors passed to session run method.
:param maxiter: Number of run interation.
:param initialize: If `True` model parameters will be re-initialized even if they were
initialized before for gotten session.
:param anchor: If `True` trained variable values computed during optimization at
particular session will be synchronized with internal parameter values.
:param step_callback: A callback function to execute at each optimization step.
The callback should accept variable argument list, where first argument is
optimization step number.
:type step_callback: Callable[[], None]
:param kwargs: This is a dictionary of extra parameters for session run method.
"""
if model is None or not isinstance(model, Model):
raise ValueError('The `model` argument must be a GPflow model.') # depends on [control=['if'], data=[]]
opt = self.make_optimize_action(model, session=session, var_list=var_list, feed_dict=feed_dict, **kwargs)
self._model = opt.model
self._minimize_operation = opt.optimizer_tensor
session = model.enquire_session(session)
with session.as_default():
for step in range(maxiter):
opt()
if step_callback is not None:
step_callback(step) # depends on [control=['if'], data=['step_callback']] # depends on [control=['for'], data=['step']] # depends on [control=['with'], data=[]]
if anchor:
opt.model.anchor(session) # depends on [control=['if'], data=[]] |
def segments_colinear(ab, cd):
'''
segments_colinear_2D((a, b), (c, d)) yields True if either a or b is on the line segment (c,d) or
if c or d is on the line segment (a,b) and the lines are colinear; otherwise yields False. All
of a, b, c, and d must be (x,y) coordinates or 2xN (x,y) coordinate matrices, or (x,y,z) or 3xN
matrices.
'''
(a,b) = ab
(c,d) = cd
ss = [point_on_segment(ab, c), point_on_segment(ab, d),
point_on_segment(cd, a), point_on_segment(cd, b)]
return np.sum(ss, axis=0) > 1 | def function[segments_colinear, parameter[ab, cd]]:
constant[
segments_colinear_2D((a, b), (c, d)) yields True if either a or b is on the line segment (c,d) or
if c or d is on the line segment (a,b) and the lines are colinear; otherwise yields False. All
of a, b, c, and d must be (x,y) coordinates or 2xN (x,y) coordinate matrices, or (x,y,z) or 3xN
matrices.
]
<ast.Tuple object at 0x7da1b0e3bee0> assign[=] name[ab]
<ast.Tuple object at 0x7da1b0e3afb0> assign[=] name[cd]
variable[ss] assign[=] list[[<ast.Call object at 0x7da1b0e3b5e0>, <ast.Call object at 0x7da1b0e3a230>, <ast.Call object at 0x7da1b0e383d0>, <ast.Call object at 0x7da1b0e39ab0>]]
return[compare[call[name[np].sum, parameter[name[ss]]] greater[>] constant[1]]] | keyword[def] identifier[segments_colinear] ( identifier[ab] , identifier[cd] ):
literal[string]
( identifier[a] , identifier[b] )= identifier[ab]
( identifier[c] , identifier[d] )= identifier[cd]
identifier[ss] =[ identifier[point_on_segment] ( identifier[ab] , identifier[c] ), identifier[point_on_segment] ( identifier[ab] , identifier[d] ),
identifier[point_on_segment] ( identifier[cd] , identifier[a] ), identifier[point_on_segment] ( identifier[cd] , identifier[b] )]
keyword[return] identifier[np] . identifier[sum] ( identifier[ss] , identifier[axis] = literal[int] )> literal[int] | def segments_colinear(ab, cd):
"""
segments_colinear_2D((a, b), (c, d)) yields True if either a or b is on the line segment (c,d) or
if c or d is on the line segment (a,b) and the lines are colinear; otherwise yields False. All
of a, b, c, and d must be (x,y) coordinates or 2xN (x,y) coordinate matrices, or (x,y,z) or 3xN
matrices.
"""
(a, b) = ab
(c, d) = cd
ss = [point_on_segment(ab, c), point_on_segment(ab, d), point_on_segment(cd, a), point_on_segment(cd, b)]
return np.sum(ss, axis=0) > 1 |
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if self.is_sparse:
values = self.get_values()
if fill_tuple is None:
fill_value = self.fill_value
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=False, fill_value=fill_value)
else:
fill_value = fill_tuple[0]
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = libinternals.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs) | def function[take_nd, parameter[self, indexer, axis, new_mgr_locs, fill_tuple]]:
constant[
Take values according to indexer and return them as a block.bb
]
variable[values] assign[=] name[self].values
if name[self].is_sparse begin[:]
variable[values] assign[=] call[name[self].get_values, parameter[]]
if compare[name[fill_tuple] is constant[None]] begin[:]
variable[fill_value] assign[=] name[self].fill_value
variable[new_values] assign[=] call[name[algos].take_nd, parameter[name[values], name[indexer]]]
if compare[name[new_mgr_locs] is constant[None]] begin[:]
if compare[name[axis] equal[==] constant[0]] begin[:]
variable[slc] assign[=] call[name[libinternals].indexer_as_slice, parameter[name[indexer]]]
if compare[name[slc] is_not constant[None]] begin[:]
variable[new_mgr_locs] assign[=] call[name[self].mgr_locs][name[slc]]
if <ast.UnaryOp object at 0x7da204346c50> begin[:]
return[call[name[self].make_block, parameter[name[new_values], name[new_mgr_locs]]]] | keyword[def] identifier[take_nd] ( identifier[self] , identifier[indexer] , identifier[axis] , identifier[new_mgr_locs] = keyword[None] , identifier[fill_tuple] = keyword[None] ):
literal[string]
identifier[values] = identifier[self] . identifier[values]
keyword[if] identifier[self] . identifier[is_sparse] :
identifier[values] = identifier[self] . identifier[get_values] ()
keyword[if] identifier[fill_tuple] keyword[is] keyword[None] :
identifier[fill_value] = identifier[self] . identifier[fill_value]
identifier[new_values] = identifier[algos] . identifier[take_nd] ( identifier[values] , identifier[indexer] , identifier[axis] = identifier[axis] ,
identifier[allow_fill] = keyword[False] , identifier[fill_value] = identifier[fill_value] )
keyword[else] :
identifier[fill_value] = identifier[fill_tuple] [ literal[int] ]
identifier[new_values] = identifier[algos] . identifier[take_nd] ( identifier[values] , identifier[indexer] , identifier[axis] = identifier[axis] ,
identifier[allow_fill] = keyword[True] , identifier[fill_value] = identifier[fill_value] )
keyword[if] identifier[new_mgr_locs] keyword[is] keyword[None] :
keyword[if] identifier[axis] == literal[int] :
identifier[slc] = identifier[libinternals] . identifier[indexer_as_slice] ( identifier[indexer] )
keyword[if] identifier[slc] keyword[is] keyword[not] keyword[None] :
identifier[new_mgr_locs] = identifier[self] . identifier[mgr_locs] [ identifier[slc] ]
keyword[else] :
identifier[new_mgr_locs] = identifier[self] . identifier[mgr_locs] [ identifier[indexer] ]
keyword[else] :
identifier[new_mgr_locs] = identifier[self] . identifier[mgr_locs]
keyword[if] keyword[not] identifier[is_dtype_equal] ( identifier[new_values] . identifier[dtype] , identifier[self] . identifier[dtype] ):
keyword[return] identifier[self] . identifier[make_block] ( identifier[new_values] , identifier[new_mgr_locs] )
keyword[else] :
keyword[return] identifier[self] . identifier[make_block_same_class] ( identifier[new_values] , identifier[new_mgr_locs] ) | def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if self.is_sparse:
values = self.get_values() # depends on [control=['if'], data=[]]
if fill_tuple is None:
fill_value = self.fill_value
new_values = algos.take_nd(values, indexer, axis=axis, allow_fill=False, fill_value=fill_value) # depends on [control=['if'], data=[]]
else:
fill_value = fill_tuple[0]
new_values = algos.take_nd(values, indexer, axis=axis, allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = libinternals.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc] # depends on [control=['if'], data=['slc']]
else:
new_mgr_locs = self.mgr_locs[indexer] # depends on [control=['if'], data=[]]
else:
new_mgr_locs = self.mgr_locs # depends on [control=['if'], data=['new_mgr_locs']]
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs) # depends on [control=['if'], data=[]]
else:
return self.make_block_same_class(new_values, new_mgr_locs) |
def run_in_background(coroutine: "Callable[[concurrent.futures.Future[T], Coroutine[Any, Any, None]]", *, debug: bool = False, _policy_lock: threading.Lock = threading.Lock()) -> T:
"""
Runs ``coroutine(future)`` in a new event loop on a background thread.
Blocks and returns the *future* result as soon as it is resolved.
The coroutine and all remaining tasks continue running in the background
until it is complete.
Note: This installs a :class:`chess.engine.EventLoopPolicy` for the entire
process.
"""
assert asyncio.iscoroutinefunction(coroutine)
with _policy_lock:
if not isinstance(asyncio.get_event_loop_policy(), EventLoopPolicy):
asyncio.set_event_loop_policy(EventLoopPolicy())
future = concurrent.futures.Future() # type: concurrent.futures.Future[T]
def background() -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(debug)
try:
loop.run_until_complete(coroutine(future))
future.cancel()
except Exception as exc:
future.set_exception(exc)
return
finally:
try:
# Finish all remaining tasks.
pending = _all_tasks(loop)
loop.run_until_complete(asyncio.gather(*pending, loop=loop, return_exceptions=True))
# Shutdown async generators.
try:
loop.run_until_complete(loop.shutdown_asyncgens())
except AttributeError:
# Before Python 3.6.
pass
finally:
loop.close()
threading.Thread(target=background).start()
return future.result() | def function[run_in_background, parameter[coroutine]]:
constant[
Runs ``coroutine(future)`` in a new event loop on a background thread.
Blocks and returns the *future* result as soon as it is resolved.
The coroutine and all remaining tasks continue running in the background
until it is complete.
Note: This installs a :class:`chess.engine.EventLoopPolicy` for the entire
process.
]
assert[call[name[asyncio].iscoroutinefunction, parameter[name[coroutine]]]]
with name[_policy_lock] begin[:]
if <ast.UnaryOp object at 0x7da1b17e3d30> begin[:]
call[name[asyncio].set_event_loop_policy, parameter[call[name[EventLoopPolicy], parameter[]]]]
variable[future] assign[=] call[name[concurrent].futures.Future, parameter[]]
def function[background, parameter[]]:
variable[loop] assign[=] call[name[asyncio].new_event_loop, parameter[]]
call[name[asyncio].set_event_loop, parameter[name[loop]]]
call[name[loop].set_debug, parameter[name[debug]]]
<ast.Try object at 0x7da1b17912a0>
call[call[name[threading].Thread, parameter[]].start, parameter[]]
return[call[name[future].result, parameter[]]] | keyword[def] identifier[run_in_background] ( identifier[coroutine] : literal[string] ,*, identifier[debug] : identifier[bool] = keyword[False] , identifier[_policy_lock] : identifier[threading] . identifier[Lock] = identifier[threading] . identifier[Lock] ())-> identifier[T] :
literal[string]
keyword[assert] identifier[asyncio] . identifier[iscoroutinefunction] ( identifier[coroutine] )
keyword[with] identifier[_policy_lock] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[asyncio] . identifier[get_event_loop_policy] (), identifier[EventLoopPolicy] ):
identifier[asyncio] . identifier[set_event_loop_policy] ( identifier[EventLoopPolicy] ())
identifier[future] = identifier[concurrent] . identifier[futures] . identifier[Future] ()
keyword[def] identifier[background] ()-> keyword[None] :
identifier[loop] = identifier[asyncio] . identifier[new_event_loop] ()
identifier[asyncio] . identifier[set_event_loop] ( identifier[loop] )
identifier[loop] . identifier[set_debug] ( identifier[debug] )
keyword[try] :
identifier[loop] . identifier[run_until_complete] ( identifier[coroutine] ( identifier[future] ))
identifier[future] . identifier[cancel] ()
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[future] . identifier[set_exception] ( identifier[exc] )
keyword[return]
keyword[finally] :
keyword[try] :
identifier[pending] = identifier[_all_tasks] ( identifier[loop] )
identifier[loop] . identifier[run_until_complete] ( identifier[asyncio] . identifier[gather] (* identifier[pending] , identifier[loop] = identifier[loop] , identifier[return_exceptions] = keyword[True] ))
keyword[try] :
identifier[loop] . identifier[run_until_complete] ( identifier[loop] . identifier[shutdown_asyncgens] ())
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[finally] :
identifier[loop] . identifier[close] ()
identifier[threading] . identifier[Thread] ( identifier[target] = identifier[background] ). identifier[start] ()
keyword[return] identifier[future] . identifier[result] () | def run_in_background(coroutine: 'Callable[[concurrent.futures.Future[T], Coroutine[Any, Any, None]]', *, debug: bool=False, _policy_lock: threading.Lock=threading.Lock()) -> T:
"""
Runs ``coroutine(future)`` in a new event loop on a background thread.
Blocks and returns the *future* result as soon as it is resolved.
The coroutine and all remaining tasks continue running in the background
until it is complete.
Note: This installs a :class:`chess.engine.EventLoopPolicy` for the entire
process.
"""
assert asyncio.iscoroutinefunction(coroutine)
with _policy_lock:
if not isinstance(asyncio.get_event_loop_policy(), EventLoopPolicy):
asyncio.set_event_loop_policy(EventLoopPolicy()) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
future = concurrent.futures.Future() # type: concurrent.futures.Future[T]
def background() -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(debug)
try:
loop.run_until_complete(coroutine(future))
future.cancel() # depends on [control=['try'], data=[]]
except Exception as exc:
future.set_exception(exc)
return # depends on [control=['except'], data=['exc']]
finally:
try:
# Finish all remaining tasks.
pending = _all_tasks(loop)
loop.run_until_complete(asyncio.gather(*pending, loop=loop, return_exceptions=True))
# Shutdown async generators.
try:
loop.run_until_complete(loop.shutdown_asyncgens()) # depends on [control=['try'], data=[]]
except AttributeError:
# Before Python 3.6.
pass # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
finally:
loop.close()
threading.Thread(target=background).start()
return future.result() |
def map_agents(self, stmts, do_rename=True):
"""Return a new list of statements whose agents have been mapped
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
The statements whose agents need mapping
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Returns
-------
mapped_stmts : list of :py:class:`indra.statements.Statement`
A list of statements given by mapping the agents from each
statement in the input list
"""
# Make a copy of the stmts
mapped_stmts = []
num_skipped = 0
# Iterate over the statements
for stmt in stmts:
mapped_stmt = self.map_agents_for_stmt(stmt, do_rename)
# Check if we should skip the statement
if mapped_stmt is not None:
mapped_stmts.append(mapped_stmt)
else:
num_skipped += 1
logger.info('%s statements filtered out' % num_skipped)
return mapped_stmts | def function[map_agents, parameter[self, stmts, do_rename]]:
constant[Return a new list of statements whose agents have been mapped
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
The statements whose agents need mapping
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Returns
-------
mapped_stmts : list of :py:class:`indra.statements.Statement`
A list of statements given by mapping the agents from each
statement in the input list
]
variable[mapped_stmts] assign[=] list[[]]
variable[num_skipped] assign[=] constant[0]
for taget[name[stmt]] in starred[name[stmts]] begin[:]
variable[mapped_stmt] assign[=] call[name[self].map_agents_for_stmt, parameter[name[stmt], name[do_rename]]]
if compare[name[mapped_stmt] is_not constant[None]] begin[:]
call[name[mapped_stmts].append, parameter[name[mapped_stmt]]]
call[name[logger].info, parameter[binary_operation[constant[%s statements filtered out] <ast.Mod object at 0x7da2590d6920> name[num_skipped]]]]
return[name[mapped_stmts]] | keyword[def] identifier[map_agents] ( identifier[self] , identifier[stmts] , identifier[do_rename] = keyword[True] ):
literal[string]
identifier[mapped_stmts] =[]
identifier[num_skipped] = literal[int]
keyword[for] identifier[stmt] keyword[in] identifier[stmts] :
identifier[mapped_stmt] = identifier[self] . identifier[map_agents_for_stmt] ( identifier[stmt] , identifier[do_rename] )
keyword[if] identifier[mapped_stmt] keyword[is] keyword[not] keyword[None] :
identifier[mapped_stmts] . identifier[append] ( identifier[mapped_stmt] )
keyword[else] :
identifier[num_skipped] += literal[int]
identifier[logger] . identifier[info] ( literal[string] % identifier[num_skipped] )
keyword[return] identifier[mapped_stmts] | def map_agents(self, stmts, do_rename=True):
"""Return a new list of statements whose agents have been mapped
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
The statements whose agents need mapping
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Returns
-------
mapped_stmts : list of :py:class:`indra.statements.Statement`
A list of statements given by mapping the agents from each
statement in the input list
"""
# Make a copy of the stmts
mapped_stmts = []
num_skipped = 0
# Iterate over the statements
for stmt in stmts:
mapped_stmt = self.map_agents_for_stmt(stmt, do_rename)
# Check if we should skip the statement
if mapped_stmt is not None:
mapped_stmts.append(mapped_stmt) # depends on [control=['if'], data=['mapped_stmt']]
else:
num_skipped += 1 # depends on [control=['for'], data=['stmt']]
logger.info('%s statements filtered out' % num_skipped)
return mapped_stmts |
def cprint(self, cstr):
"""
Clear line, then reprint on same line
:param cstr: string to print on current line
"""
cstr = str(cstr) # Force it to be a string
cstr_len = len(cstr)
prev_cstr_len = len(self._prev_cstr)
num_spaces = 0
if cstr_len < prev_cstr_len:
num_spaces = abs(prev_cstr_len - cstr_len)
try:
print(cstr + " " * num_spaces, end='\r')
self._prev_cstr = cstr
except UnicodeEncodeError:
print('Processing...', end='\r')
self._prev_cstr = 'Processing...' | def function[cprint, parameter[self, cstr]]:
constant[
Clear line, then reprint on same line
:param cstr: string to print on current line
]
variable[cstr] assign[=] call[name[str], parameter[name[cstr]]]
variable[cstr_len] assign[=] call[name[len], parameter[name[cstr]]]
variable[prev_cstr_len] assign[=] call[name[len], parameter[name[self]._prev_cstr]]
variable[num_spaces] assign[=] constant[0]
if compare[name[cstr_len] less[<] name[prev_cstr_len]] begin[:]
variable[num_spaces] assign[=] call[name[abs], parameter[binary_operation[name[prev_cstr_len] - name[cstr_len]]]]
<ast.Try object at 0x7da20c7cbd00> | keyword[def] identifier[cprint] ( identifier[self] , identifier[cstr] ):
literal[string]
identifier[cstr] = identifier[str] ( identifier[cstr] )
identifier[cstr_len] = identifier[len] ( identifier[cstr] )
identifier[prev_cstr_len] = identifier[len] ( identifier[self] . identifier[_prev_cstr] )
identifier[num_spaces] = literal[int]
keyword[if] identifier[cstr_len] < identifier[prev_cstr_len] :
identifier[num_spaces] = identifier[abs] ( identifier[prev_cstr_len] - identifier[cstr_len] )
keyword[try] :
identifier[print] ( identifier[cstr] + literal[string] * identifier[num_spaces] , identifier[end] = literal[string] )
identifier[self] . identifier[_prev_cstr] = identifier[cstr]
keyword[except] identifier[UnicodeEncodeError] :
identifier[print] ( literal[string] , identifier[end] = literal[string] )
identifier[self] . identifier[_prev_cstr] = literal[string] | def cprint(self, cstr):
"""
Clear line, then reprint on same line
:param cstr: string to print on current line
"""
cstr = str(cstr) # Force it to be a string
cstr_len = len(cstr)
prev_cstr_len = len(self._prev_cstr)
num_spaces = 0
if cstr_len < prev_cstr_len:
num_spaces = abs(prev_cstr_len - cstr_len) # depends on [control=['if'], data=['cstr_len', 'prev_cstr_len']]
try:
print(cstr + ' ' * num_spaces, end='\r')
self._prev_cstr = cstr # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
print('Processing...', end='\r')
self._prev_cstr = 'Processing...' # depends on [control=['except'], data=[]] |
def build_command_tree(pattern, cmd_params):
"""
Recursively fill in a command tree in cmd_params according to a docopt-parsed "pattern" object.
"""
from docopt import Either, Optional, OneOrMore, Required, Option, Command, Argument
if type(pattern) in [Either, Optional, OneOrMore]:
for child in pattern.children:
build_command_tree(child, cmd_params)
elif type(pattern) in [Required]:
for child in pattern.children:
cmd_params = build_command_tree(child, cmd_params)
elif type(pattern) in [Option]:
suffix = "=" if pattern.argcount else ""
if pattern.short:
cmd_params.options.append(pattern.short + suffix)
if pattern.long:
cmd_params.options.append(pattern.long + suffix)
elif type(pattern) in [Command]:
cmd_params = cmd_params.get_subcommand(pattern.name)
elif type(pattern) in [Argument]:
cmd_params.arguments.append(pattern.name)
return cmd_params | def function[build_command_tree, parameter[pattern, cmd_params]]:
constant[
Recursively fill in a command tree in cmd_params according to a docopt-parsed "pattern" object.
]
from relative_module[docopt] import module[Either], module[Optional], module[OneOrMore], module[Required], module[Option], module[Command], module[Argument]
if compare[call[name[type], parameter[name[pattern]]] in list[[<ast.Name object at 0x7da2044c1e40>, <ast.Name object at 0x7da2044c3280>, <ast.Name object at 0x7da2044c0490>]]] begin[:]
for taget[name[child]] in starred[name[pattern].children] begin[:]
call[name[build_command_tree], parameter[name[child], name[cmd_params]]]
return[name[cmd_params]] | keyword[def] identifier[build_command_tree] ( identifier[pattern] , identifier[cmd_params] ):
literal[string]
keyword[from] identifier[docopt] keyword[import] identifier[Either] , identifier[Optional] , identifier[OneOrMore] , identifier[Required] , identifier[Option] , identifier[Command] , identifier[Argument]
keyword[if] identifier[type] ( identifier[pattern] ) keyword[in] [ identifier[Either] , identifier[Optional] , identifier[OneOrMore] ]:
keyword[for] identifier[child] keyword[in] identifier[pattern] . identifier[children] :
identifier[build_command_tree] ( identifier[child] , identifier[cmd_params] )
keyword[elif] identifier[type] ( identifier[pattern] ) keyword[in] [ identifier[Required] ]:
keyword[for] identifier[child] keyword[in] identifier[pattern] . identifier[children] :
identifier[cmd_params] = identifier[build_command_tree] ( identifier[child] , identifier[cmd_params] )
keyword[elif] identifier[type] ( identifier[pattern] ) keyword[in] [ identifier[Option] ]:
identifier[suffix] = literal[string] keyword[if] identifier[pattern] . identifier[argcount] keyword[else] literal[string]
keyword[if] identifier[pattern] . identifier[short] :
identifier[cmd_params] . identifier[options] . identifier[append] ( identifier[pattern] . identifier[short] + identifier[suffix] )
keyword[if] identifier[pattern] . identifier[long] :
identifier[cmd_params] . identifier[options] . identifier[append] ( identifier[pattern] . identifier[long] + identifier[suffix] )
keyword[elif] identifier[type] ( identifier[pattern] ) keyword[in] [ identifier[Command] ]:
identifier[cmd_params] = identifier[cmd_params] . identifier[get_subcommand] ( identifier[pattern] . identifier[name] )
keyword[elif] identifier[type] ( identifier[pattern] ) keyword[in] [ identifier[Argument] ]:
identifier[cmd_params] . identifier[arguments] . identifier[append] ( identifier[pattern] . identifier[name] )
keyword[return] identifier[cmd_params] | def build_command_tree(pattern, cmd_params):
"""
Recursively fill in a command tree in cmd_params according to a docopt-parsed "pattern" object.
"""
from docopt import Either, Optional, OneOrMore, Required, Option, Command, Argument
if type(pattern) in [Either, Optional, OneOrMore]:
for child in pattern.children:
build_command_tree(child, cmd_params) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
elif type(pattern) in [Required]:
for child in pattern.children:
cmd_params = build_command_tree(child, cmd_params) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
elif type(pattern) in [Option]:
suffix = '=' if pattern.argcount else ''
if pattern.short:
cmd_params.options.append(pattern.short + suffix) # depends on [control=['if'], data=[]]
if pattern.long:
cmd_params.options.append(pattern.long + suffix) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif type(pattern) in [Command]:
cmd_params = cmd_params.get_subcommand(pattern.name) # depends on [control=['if'], data=[]]
elif type(pattern) in [Argument]:
cmd_params.arguments.append(pattern.name) # depends on [control=['if'], data=[]]
return cmd_params |
def check_grammar(self, ok_start_symbols = set(),
out=sys.stderr):
'''
Check grammar for:
- unused left-hand side nonterminals that are neither start symbols
or listed in ok_start_symbols
- unused right-hand side nonterminals, i.e. not tokens
- right-recursive rules. These can slow down parsing.
'''
warnings = 0
(lhs, rhs, tokens, right_recursive,
dup_rhs) = self.check_sets()
if lhs - ok_start_symbols:
warnings += 1
out.write("LHS symbols not used on the RHS:\n")
out.write(" " + (', '.join(sorted(lhs)) + "\n"))
if rhs:
warnings += 1
out.write("RHS symbols not used on the LHS:\n")
out.write((', '.join(sorted(rhs))) + "\n" )
if right_recursive:
warnings += 1
out.write("Right recursive rules:\n")
for rule in sorted(right_recursive):
out.write(" %s ::= %s\n" % (rule[0], ' '.join(rule[1])))
pass
pass
if dup_rhs:
warnings += 1
out.write("Nonterminals with the same RHS\n")
for rhs in sorted(dup_rhs.keys()):
out.write(" RHS: %s\n" % ' '.join(rhs))
out.write(" LHS: %s\n" % ', '.join(dup_rhs[rhs]))
out.write(" ---\n")
pass
pass
return warnings | def function[check_grammar, parameter[self, ok_start_symbols, out]]:
constant[
Check grammar for:
- unused left-hand side nonterminals that are neither start symbols
or listed in ok_start_symbols
- unused right-hand side nonterminals, i.e. not tokens
- right-recursive rules. These can slow down parsing.
]
variable[warnings] assign[=] constant[0]
<ast.Tuple object at 0x7da20c7c9a50> assign[=] call[name[self].check_sets, parameter[]]
if binary_operation[name[lhs] - name[ok_start_symbols]] begin[:]
<ast.AugAssign object at 0x7da20c7cae00>
call[name[out].write, parameter[constant[LHS symbols not used on the RHS:
]]]
call[name[out].write, parameter[binary_operation[constant[ ] + binary_operation[call[constant[, ].join, parameter[call[name[sorted], parameter[name[lhs]]]]] + constant[
]]]]]
if name[rhs] begin[:]
<ast.AugAssign object at 0x7da20c7c9f00>
call[name[out].write, parameter[constant[RHS symbols not used on the LHS:
]]]
call[name[out].write, parameter[binary_operation[call[constant[, ].join, parameter[call[name[sorted], parameter[name[rhs]]]]] + constant[
]]]]
if name[right_recursive] begin[:]
<ast.AugAssign object at 0x7da20c7c9ab0>
call[name[out].write, parameter[constant[Right recursive rules:
]]]
for taget[name[rule]] in starred[call[name[sorted], parameter[name[right_recursive]]]] begin[:]
call[name[out].write, parameter[binary_operation[constant[ %s ::= %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20c7cb250>, <ast.Call object at 0x7da20c7c9930>]]]]]
pass
pass
if name[dup_rhs] begin[:]
<ast.AugAssign object at 0x7da20c7c8fd0>
call[name[out].write, parameter[constant[Nonterminals with the same RHS
]]]
for taget[name[rhs]] in starred[call[name[sorted], parameter[call[name[dup_rhs].keys, parameter[]]]]] begin[:]
call[name[out].write, parameter[binary_operation[constant[ RHS: %s
] <ast.Mod object at 0x7da2590d6920> call[constant[ ].join, parameter[name[rhs]]]]]]
call[name[out].write, parameter[binary_operation[constant[ LHS: %s
] <ast.Mod object at 0x7da2590d6920> call[constant[, ].join, parameter[call[name[dup_rhs]][name[rhs]]]]]]]
call[name[out].write, parameter[constant[ ---
]]]
pass
pass
return[name[warnings]] | keyword[def] identifier[check_grammar] ( identifier[self] , identifier[ok_start_symbols] = identifier[set] (),
identifier[out] = identifier[sys] . identifier[stderr] ):
literal[string]
identifier[warnings] = literal[int]
( identifier[lhs] , identifier[rhs] , identifier[tokens] , identifier[right_recursive] ,
identifier[dup_rhs] )= identifier[self] . identifier[check_sets] ()
keyword[if] identifier[lhs] - identifier[ok_start_symbols] :
identifier[warnings] += literal[int]
identifier[out] . identifier[write] ( literal[string] )
identifier[out] . identifier[write] ( literal[string] +( literal[string] . identifier[join] ( identifier[sorted] ( identifier[lhs] ))+ literal[string] ))
keyword[if] identifier[rhs] :
identifier[warnings] += literal[int]
identifier[out] . identifier[write] ( literal[string] )
identifier[out] . identifier[write] (( literal[string] . identifier[join] ( identifier[sorted] ( identifier[rhs] )))+ literal[string] )
keyword[if] identifier[right_recursive] :
identifier[warnings] += literal[int]
identifier[out] . identifier[write] ( literal[string] )
keyword[for] identifier[rule] keyword[in] identifier[sorted] ( identifier[right_recursive] ):
identifier[out] . identifier[write] ( literal[string] %( identifier[rule] [ literal[int] ], literal[string] . identifier[join] ( identifier[rule] [ literal[int] ])))
keyword[pass]
keyword[pass]
keyword[if] identifier[dup_rhs] :
identifier[warnings] += literal[int]
identifier[out] . identifier[write] ( literal[string] )
keyword[for] identifier[rhs] keyword[in] identifier[sorted] ( identifier[dup_rhs] . identifier[keys] ()):
identifier[out] . identifier[write] ( literal[string] % literal[string] . identifier[join] ( identifier[rhs] ))
identifier[out] . identifier[write] ( literal[string] % literal[string] . identifier[join] ( identifier[dup_rhs] [ identifier[rhs] ]))
identifier[out] . identifier[write] ( literal[string] )
keyword[pass]
keyword[pass]
keyword[return] identifier[warnings] | def check_grammar(self, ok_start_symbols=set(), out=sys.stderr):
"""
Check grammar for:
- unused left-hand side nonterminals that are neither start symbols
or listed in ok_start_symbols
- unused right-hand side nonterminals, i.e. not tokens
- right-recursive rules. These can slow down parsing.
"""
warnings = 0
(lhs, rhs, tokens, right_recursive, dup_rhs) = self.check_sets()
if lhs - ok_start_symbols:
warnings += 1
out.write('LHS symbols not used on the RHS:\n')
out.write(' ' + (', '.join(sorted(lhs)) + '\n')) # depends on [control=['if'], data=[]]
if rhs:
warnings += 1
out.write('RHS symbols not used on the LHS:\n')
out.write(', '.join(sorted(rhs)) + '\n') # depends on [control=['if'], data=[]]
if right_recursive:
warnings += 1
out.write('Right recursive rules:\n')
for rule in sorted(right_recursive):
out.write(' %s ::= %s\n' % (rule[0], ' '.join(rule[1])))
pass # depends on [control=['for'], data=['rule']]
pass # depends on [control=['if'], data=[]]
if dup_rhs:
warnings += 1
out.write('Nonterminals with the same RHS\n')
for rhs in sorted(dup_rhs.keys()):
out.write(' RHS: %s\n' % ' '.join(rhs))
out.write(' LHS: %s\n' % ', '.join(dup_rhs[rhs]))
out.write(' ---\n')
pass # depends on [control=['for'], data=['rhs']]
pass # depends on [control=['if'], data=[]]
return warnings |
def _tp_finder(self, dcycle): # Private routine
"""
Routine to find thermal pulses in given star and returns an
index vector that gives the cycle number in which the thermal
pulse occure.
The routine looks for the C/O ratio jumping up and up, so only
useful in TP-AGB star. A vector is given back that indicates
the position of the cycle that is at 95% of the thermal pulse
(to make sure it's not in the next one and that most of the
processing is done). The script also returns the co_ratio
vector - the C/O ratio (number fraction) at the given thermal
pulse.
"""
# read in c and o isotopes for all cycles, regarding deltacycle
last_cycle = int(self.se.cycles[len(self.se.cycles)-1])
cyc_tp = list(range(1,last_cycle + dcycle, dcycle))
all_data = array(self.get(cyc_tp,['C-12','C-13','O-16','O-17','O-18']))
c_nf = np.zeros(len(all_data))
o_nf = np.zeros(len(all_data))
for i in range(len(all_data)):
c_nf[i] = all_data[i][0] + all_data[i][1]
o_nf[i] = all_data[i][2] + all_data[i][3] + all_data[i][4]
# search for thermal pulses
co_ratio = (old_div(c_nf, o_nf)) * 15.9994 / 12.0107
tp_guess = 200 # this should be an upper limit!
tp_guess_max = 200 # to through an error
# guess variables, i is the actual break criterion, n a max counter
gi = 0
gn = 0
while gi != 1 and gn < 10000:
tp_ind = list()
i = 0
while i < len(co_ratio)-2:
gcompar= old_div(1., (dcycle*tp_guess*100.))
slope1 = old_div((co_ratio[i+1]-co_ratio[i]),(dcycle))
slope2 = old_div((co_ratio[i+2]-co_ratio[i+1]),dcycle)
if slope1 > gcompar and slope2 < gcompar and co_ratio[i+1] > co_ratio[i]:
tp_ind.append(i+1)
i += 3 # jump three cycles to avoid defining a single cycle twice!
else:
i += 1
if abs(len(tp_ind) - tp_guess) < old_div(tp_guess,2): # gotta be within factor two of guess
gi = 1
else:
gn += 1
tp_guess /= 2
# check w/ maximum of thermal pulses allowed
if len(tp_ind) > tp_guess_max:
print('Problem detected with number of pulses')
# create thermal pulse vector
tp_startf = zeros(len(tp_ind)) # found start
for i in range(len(tp_startf)):
tp_startf[i] = cyc_tp[tp_ind[i]]
# read out isotopic composition at 95% of the thermal pulse and the initial of the star
# set up thermal pulse positions
tp_limits = zeros(len(tp_startf)+1)
for i in range(len(tp_startf)):
tp_limits[i] = tp_startf[i]
tp_limits[len(tp_limits)-1] = int(self.se.cycles[len(self.se.cycles)-1])
# thermal pulse position (where to read the isotope ratio)
tp_pos = list()
for i in range(len(tp_startf)):
tp_pos.append(int(tp_limits[i] + 0.95 * (tp_limits[i+1] - tp_limits[i])))
# create co_ret vector to return c/o ratio vector
co_return = zeros(len(tp_pos))
for i in range(len(tp_pos)):
co_return[i] = co_ratio[tp_ind[i]]
# return the two vectors
return tp_pos,co_return | def function[_tp_finder, parameter[self, dcycle]]:
constant[
Routine to find thermal pulses in given star and returns an
index vector that gives the cycle number in which the thermal
pulse occure.
The routine looks for the C/O ratio jumping up and up, so only
useful in TP-AGB star. A vector is given back that indicates
the position of the cycle that is at 95% of the thermal pulse
(to make sure it's not in the next one and that most of the
processing is done). The script also returns the co_ratio
vector - the C/O ratio (number fraction) at the given thermal
pulse.
]
variable[last_cycle] assign[=] call[name[int], parameter[call[name[self].se.cycles][binary_operation[call[name[len], parameter[name[self].se.cycles]] - constant[1]]]]]
variable[cyc_tp] assign[=] call[name[list], parameter[call[name[range], parameter[constant[1], binary_operation[name[last_cycle] + name[dcycle]], name[dcycle]]]]]
variable[all_data] assign[=] call[name[array], parameter[call[name[self].get, parameter[name[cyc_tp], list[[<ast.Constant object at 0x7da20c990ee0>, <ast.Constant object at 0x7da20c993ca0>, <ast.Constant object at 0x7da20c990730>, <ast.Constant object at 0x7da20c991de0>, <ast.Constant object at 0x7da20c9915a0>]]]]]]
variable[c_nf] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[all_data]]]]]
variable[o_nf] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[all_data]]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[all_data]]]]]] begin[:]
call[name[c_nf]][name[i]] assign[=] binary_operation[call[call[name[all_data]][name[i]]][constant[0]] + call[call[name[all_data]][name[i]]][constant[1]]]
call[name[o_nf]][name[i]] assign[=] binary_operation[binary_operation[call[call[name[all_data]][name[i]]][constant[2]] + call[call[name[all_data]][name[i]]][constant[3]]] + call[call[name[all_data]][name[i]]][constant[4]]]
variable[co_ratio] assign[=] binary_operation[binary_operation[call[name[old_div], parameter[name[c_nf], name[o_nf]]] * constant[15.9994]] / constant[12.0107]]
variable[tp_guess] assign[=] constant[200]
variable[tp_guess_max] assign[=] constant[200]
variable[gi] assign[=] constant[0]
variable[gn] assign[=] constant[0]
while <ast.BoolOp object at 0x7da20c991360> begin[:]
variable[tp_ind] assign[=] call[name[list], parameter[]]
variable[i] assign[=] constant[0]
while compare[name[i] less[<] binary_operation[call[name[len], parameter[name[co_ratio]]] - constant[2]]] begin[:]
variable[gcompar] assign[=] call[name[old_div], parameter[constant[1.0], binary_operation[binary_operation[name[dcycle] * name[tp_guess]] * constant[100.0]]]]
variable[slope1] assign[=] call[name[old_div], parameter[binary_operation[call[name[co_ratio]][binary_operation[name[i] + constant[1]]] - call[name[co_ratio]][name[i]]], name[dcycle]]]
variable[slope2] assign[=] call[name[old_div], parameter[binary_operation[call[name[co_ratio]][binary_operation[name[i] + constant[2]]] - call[name[co_ratio]][binary_operation[name[i] + constant[1]]]], name[dcycle]]]
if <ast.BoolOp object at 0x7da18f810f40> begin[:]
call[name[tp_ind].append, parameter[binary_operation[name[i] + constant[1]]]]
<ast.AugAssign object at 0x7da18f813730>
if compare[call[name[abs], parameter[binary_operation[call[name[len], parameter[name[tp_ind]]] - name[tp_guess]]]] less[<] call[name[old_div], parameter[name[tp_guess], constant[2]]]] begin[:]
variable[gi] assign[=] constant[1]
if compare[call[name[len], parameter[name[tp_ind]]] greater[>] name[tp_guess_max]] begin[:]
call[name[print], parameter[constant[Problem detected with number of pulses]]]
variable[tp_startf] assign[=] call[name[zeros], parameter[call[name[len], parameter[name[tp_ind]]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[tp_startf]]]]]] begin[:]
call[name[tp_startf]][name[i]] assign[=] call[name[cyc_tp]][call[name[tp_ind]][name[i]]]
variable[tp_limits] assign[=] call[name[zeros], parameter[binary_operation[call[name[len], parameter[name[tp_startf]]] + constant[1]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[tp_startf]]]]]] begin[:]
call[name[tp_limits]][name[i]] assign[=] call[name[tp_startf]][name[i]]
call[name[tp_limits]][binary_operation[call[name[len], parameter[name[tp_limits]]] - constant[1]]] assign[=] call[name[int], parameter[call[name[self].se.cycles][binary_operation[call[name[len], parameter[name[self].se.cycles]] - constant[1]]]]]
variable[tp_pos] assign[=] call[name[list], parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[tp_startf]]]]]] begin[:]
call[name[tp_pos].append, parameter[call[name[int], parameter[binary_operation[call[name[tp_limits]][name[i]] + binary_operation[constant[0.95] * binary_operation[call[name[tp_limits]][binary_operation[name[i] + constant[1]]] - call[name[tp_limits]][name[i]]]]]]]]]
variable[co_return] assign[=] call[name[zeros], parameter[call[name[len], parameter[name[tp_pos]]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[tp_pos]]]]]] begin[:]
call[name[co_return]][name[i]] assign[=] call[name[co_ratio]][call[name[tp_ind]][name[i]]]
return[tuple[[<ast.Name object at 0x7da1b1a8e710>, <ast.Name object at 0x7da1b1a8f910>]]] | keyword[def] identifier[_tp_finder] ( identifier[self] , identifier[dcycle] ):
literal[string]
identifier[last_cycle] = identifier[int] ( identifier[self] . identifier[se] . identifier[cycles] [ identifier[len] ( identifier[self] . identifier[se] . identifier[cycles] )- literal[int] ])
identifier[cyc_tp] = identifier[list] ( identifier[range] ( literal[int] , identifier[last_cycle] + identifier[dcycle] , identifier[dcycle] ))
identifier[all_data] = identifier[array] ( identifier[self] . identifier[get] ( identifier[cyc_tp] ,[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]))
identifier[c_nf] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[all_data] ))
identifier[o_nf] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[all_data] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[all_data] )):
identifier[c_nf] [ identifier[i] ]= identifier[all_data] [ identifier[i] ][ literal[int] ]+ identifier[all_data] [ identifier[i] ][ literal[int] ]
identifier[o_nf] [ identifier[i] ]= identifier[all_data] [ identifier[i] ][ literal[int] ]+ identifier[all_data] [ identifier[i] ][ literal[int] ]+ identifier[all_data] [ identifier[i] ][ literal[int] ]
identifier[co_ratio] =( identifier[old_div] ( identifier[c_nf] , identifier[o_nf] ))* literal[int] / literal[int]
identifier[tp_guess] = literal[int]
identifier[tp_guess_max] = literal[int]
identifier[gi] = literal[int]
identifier[gn] = literal[int]
keyword[while] identifier[gi] != literal[int] keyword[and] identifier[gn] < literal[int] :
identifier[tp_ind] = identifier[list] ()
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[len] ( identifier[co_ratio] )- literal[int] :
identifier[gcompar] = identifier[old_div] ( literal[int] ,( identifier[dcycle] * identifier[tp_guess] * literal[int] ))
identifier[slope1] = identifier[old_div] (( identifier[co_ratio] [ identifier[i] + literal[int] ]- identifier[co_ratio] [ identifier[i] ]),( identifier[dcycle] ))
identifier[slope2] = identifier[old_div] (( identifier[co_ratio] [ identifier[i] + literal[int] ]- identifier[co_ratio] [ identifier[i] + literal[int] ]), identifier[dcycle] )
keyword[if] identifier[slope1] > identifier[gcompar] keyword[and] identifier[slope2] < identifier[gcompar] keyword[and] identifier[co_ratio] [ identifier[i] + literal[int] ]> identifier[co_ratio] [ identifier[i] ]:
identifier[tp_ind] . identifier[append] ( identifier[i] + literal[int] )
identifier[i] += literal[int]
keyword[else] :
identifier[i] += literal[int]
keyword[if] identifier[abs] ( identifier[len] ( identifier[tp_ind] )- identifier[tp_guess] )< identifier[old_div] ( identifier[tp_guess] , literal[int] ):
identifier[gi] = literal[int]
keyword[else] :
identifier[gn] += literal[int]
identifier[tp_guess] /= literal[int]
keyword[if] identifier[len] ( identifier[tp_ind] )> identifier[tp_guess_max] :
identifier[print] ( literal[string] )
identifier[tp_startf] = identifier[zeros] ( identifier[len] ( identifier[tp_ind] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[tp_startf] )):
identifier[tp_startf] [ identifier[i] ]= identifier[cyc_tp] [ identifier[tp_ind] [ identifier[i] ]]
identifier[tp_limits] = identifier[zeros] ( identifier[len] ( identifier[tp_startf] )+ literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[tp_startf] )):
identifier[tp_limits] [ identifier[i] ]= identifier[tp_startf] [ identifier[i] ]
identifier[tp_limits] [ identifier[len] ( identifier[tp_limits] )- literal[int] ]= identifier[int] ( identifier[self] . identifier[se] . identifier[cycles] [ identifier[len] ( identifier[self] . identifier[se] . identifier[cycles] )- literal[int] ])
identifier[tp_pos] = identifier[list] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[tp_startf] )):
identifier[tp_pos] . identifier[append] ( identifier[int] ( identifier[tp_limits] [ identifier[i] ]+ literal[int] *( identifier[tp_limits] [ identifier[i] + literal[int] ]- identifier[tp_limits] [ identifier[i] ])))
identifier[co_return] = identifier[zeros] ( identifier[len] ( identifier[tp_pos] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[tp_pos] )):
identifier[co_return] [ identifier[i] ]= identifier[co_ratio] [ identifier[tp_ind] [ identifier[i] ]]
keyword[return] identifier[tp_pos] , identifier[co_return] | def _tp_finder(self, dcycle): # Private routine
"\n Routine to find thermal pulses in given star and returns an\n index vector that gives the cycle number in which the thermal\n pulse occure.\n\n The routine looks for the C/O ratio jumping up and up, so only\n useful in TP-AGB star. A vector is given back that indicates\n the position of the cycle that is at 95% of the thermal pulse\n (to make sure it's not in the next one and that most of the\n processing is done). The script also returns the co_ratio\n vector - the C/O ratio (number fraction) at the given thermal\n pulse.\n\n "
# read in c and o isotopes for all cycles, regarding deltacycle
last_cycle = int(self.se.cycles[len(self.se.cycles) - 1])
cyc_tp = list(range(1, last_cycle + dcycle, dcycle))
all_data = array(self.get(cyc_tp, ['C-12', 'C-13', 'O-16', 'O-17', 'O-18']))
c_nf = np.zeros(len(all_data))
o_nf = np.zeros(len(all_data))
for i in range(len(all_data)):
c_nf[i] = all_data[i][0] + all_data[i][1]
o_nf[i] = all_data[i][2] + all_data[i][3] + all_data[i][4] # depends on [control=['for'], data=['i']]
# search for thermal pulses
co_ratio = old_div(c_nf, o_nf) * 15.9994 / 12.0107
tp_guess = 200 # this should be an upper limit!
tp_guess_max = 200 # to through an error
# guess variables, i is the actual break criterion, n a max counter
gi = 0
gn = 0
while gi != 1 and gn < 10000:
tp_ind = list()
i = 0
while i < len(co_ratio) - 2:
gcompar = old_div(1.0, dcycle * tp_guess * 100.0)
slope1 = old_div(co_ratio[i + 1] - co_ratio[i], dcycle)
slope2 = old_div(co_ratio[i + 2] - co_ratio[i + 1], dcycle)
if slope1 > gcompar and slope2 < gcompar and (co_ratio[i + 1] > co_ratio[i]):
tp_ind.append(i + 1)
i += 3 # jump three cycles to avoid defining a single cycle twice! # depends on [control=['if'], data=[]]
else:
i += 1 # depends on [control=['while'], data=['i']]
if abs(len(tp_ind) - tp_guess) < old_div(tp_guess, 2): # gotta be within factor two of guess
gi = 1 # depends on [control=['if'], data=[]]
else:
gn += 1
tp_guess /= 2 # depends on [control=['while'], data=[]]
# check w/ maximum of thermal pulses allowed
if len(tp_ind) > tp_guess_max:
print('Problem detected with number of pulses') # depends on [control=['if'], data=[]]
# create thermal pulse vector
tp_startf = zeros(len(tp_ind)) # found start
for i in range(len(tp_startf)):
tp_startf[i] = cyc_tp[tp_ind[i]] # depends on [control=['for'], data=['i']]
# read out isotopic composition at 95% of the thermal pulse and the initial of the star
# set up thermal pulse positions
tp_limits = zeros(len(tp_startf) + 1)
for i in range(len(tp_startf)):
tp_limits[i] = tp_startf[i] # depends on [control=['for'], data=['i']]
tp_limits[len(tp_limits) - 1] = int(self.se.cycles[len(self.se.cycles) - 1])
# thermal pulse position (where to read the isotope ratio)
tp_pos = list()
for i in range(len(tp_startf)):
tp_pos.append(int(tp_limits[i] + 0.95 * (tp_limits[i + 1] - tp_limits[i]))) # depends on [control=['for'], data=['i']]
# create co_ret vector to return c/o ratio vector
co_return = zeros(len(tp_pos))
for i in range(len(tp_pos)):
co_return[i] = co_ratio[tp_ind[i]] # depends on [control=['for'], data=['i']]
# return the two vectors
return (tp_pos, co_return) |
def is_readable(path):
'''
Returns True if provided file or directory exists and can be read with the current user.
Returns False otherwise.
'''
return os.access(os.path.abspath(path), os.R_OK) | def function[is_readable, parameter[path]]:
constant[
Returns True if provided file or directory exists and can be read with the current user.
Returns False otherwise.
]
return[call[name[os].access, parameter[call[name[os].path.abspath, parameter[name[path]]], name[os].R_OK]]] | keyword[def] identifier[is_readable] ( identifier[path] ):
literal[string]
keyword[return] identifier[os] . identifier[access] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[path] ), identifier[os] . identifier[R_OK] ) | def is_readable(path):
"""
Returns True if provided file or directory exists and can be read with the current user.
Returns False otherwise.
"""
return os.access(os.path.abspath(path), os.R_OK) |
def srem(self, key, *values):
"""Emulate srem."""
redis_set = self._get_set(key, 'SREM')
if not redis_set:
return 0
before_count = len(redis_set)
for value in values:
redis_set.discard(self._encode(value))
after_count = len(redis_set)
if before_count > 0 and len(redis_set) == 0:
self.delete(key)
return before_count - after_count | def function[srem, parameter[self, key]]:
constant[Emulate srem.]
variable[redis_set] assign[=] call[name[self]._get_set, parameter[name[key], constant[SREM]]]
if <ast.UnaryOp object at 0x7da18ede6d40> begin[:]
return[constant[0]]
variable[before_count] assign[=] call[name[len], parameter[name[redis_set]]]
for taget[name[value]] in starred[name[values]] begin[:]
call[name[redis_set].discard, parameter[call[name[self]._encode, parameter[name[value]]]]]
variable[after_count] assign[=] call[name[len], parameter[name[redis_set]]]
if <ast.BoolOp object at 0x7da18ede7d90> begin[:]
call[name[self].delete, parameter[name[key]]]
return[binary_operation[name[before_count] - name[after_count]]] | keyword[def] identifier[srem] ( identifier[self] , identifier[key] ,* identifier[values] ):
literal[string]
identifier[redis_set] = identifier[self] . identifier[_get_set] ( identifier[key] , literal[string] )
keyword[if] keyword[not] identifier[redis_set] :
keyword[return] literal[int]
identifier[before_count] = identifier[len] ( identifier[redis_set] )
keyword[for] identifier[value] keyword[in] identifier[values] :
identifier[redis_set] . identifier[discard] ( identifier[self] . identifier[_encode] ( identifier[value] ))
identifier[after_count] = identifier[len] ( identifier[redis_set] )
keyword[if] identifier[before_count] > literal[int] keyword[and] identifier[len] ( identifier[redis_set] )== literal[int] :
identifier[self] . identifier[delete] ( identifier[key] )
keyword[return] identifier[before_count] - identifier[after_count] | def srem(self, key, *values):
"""Emulate srem."""
redis_set = self._get_set(key, 'SREM')
if not redis_set:
return 0 # depends on [control=['if'], data=[]]
before_count = len(redis_set)
for value in values:
redis_set.discard(self._encode(value)) # depends on [control=['for'], data=['value']]
after_count = len(redis_set)
if before_count > 0 and len(redis_set) == 0:
self.delete(key) # depends on [control=['if'], data=[]]
return before_count - after_count |
def scale_degree_to_semitone(scale_degree):
r"""Convert a scale degree to semitone.
Parameters
----------
scale degree : str
Spelling of a relative scale degree, e.g. 'b3', '7', '#5'
Returns
-------
semitone : int
Relative semitone of the scale degree, wrapped to a single octave
Raises
------
InvalidChordException if `scale_degree` is invalid.
"""
semitone = 0
offset = 0
if scale_degree.startswith("#"):
offset = scale_degree.count("#")
scale_degree = scale_degree.strip("#")
elif scale_degree.startswith('b'):
offset = -1 * scale_degree.count("b")
scale_degree = scale_degree.strip("b")
semitone = SCALE_DEGREES.get(scale_degree, None)
if semitone is None:
raise InvalidChordException(
"Scale degree improperly formed: {}, expected one of {}."
.format(scale_degree, list(SCALE_DEGREES.keys())))
return semitone + offset | def function[scale_degree_to_semitone, parameter[scale_degree]]:
constant[Convert a scale degree to semitone.
Parameters
----------
scale degree : str
Spelling of a relative scale degree, e.g. 'b3', '7', '#5'
Returns
-------
semitone : int
Relative semitone of the scale degree, wrapped to a single octave
Raises
------
InvalidChordException if `scale_degree` is invalid.
]
variable[semitone] assign[=] constant[0]
variable[offset] assign[=] constant[0]
if call[name[scale_degree].startswith, parameter[constant[#]]] begin[:]
variable[offset] assign[=] call[name[scale_degree].count, parameter[constant[#]]]
variable[scale_degree] assign[=] call[name[scale_degree].strip, parameter[constant[#]]]
variable[semitone] assign[=] call[name[SCALE_DEGREES].get, parameter[name[scale_degree], constant[None]]]
if compare[name[semitone] is constant[None]] begin[:]
<ast.Raise object at 0x7da18f723f40>
return[binary_operation[name[semitone] + name[offset]]] | keyword[def] identifier[scale_degree_to_semitone] ( identifier[scale_degree] ):
literal[string]
identifier[semitone] = literal[int]
identifier[offset] = literal[int]
keyword[if] identifier[scale_degree] . identifier[startswith] ( literal[string] ):
identifier[offset] = identifier[scale_degree] . identifier[count] ( literal[string] )
identifier[scale_degree] = identifier[scale_degree] . identifier[strip] ( literal[string] )
keyword[elif] identifier[scale_degree] . identifier[startswith] ( literal[string] ):
identifier[offset] =- literal[int] * identifier[scale_degree] . identifier[count] ( literal[string] )
identifier[scale_degree] = identifier[scale_degree] . identifier[strip] ( literal[string] )
identifier[semitone] = identifier[SCALE_DEGREES] . identifier[get] ( identifier[scale_degree] , keyword[None] )
keyword[if] identifier[semitone] keyword[is] keyword[None] :
keyword[raise] identifier[InvalidChordException] (
literal[string]
. identifier[format] ( identifier[scale_degree] , identifier[list] ( identifier[SCALE_DEGREES] . identifier[keys] ())))
keyword[return] identifier[semitone] + identifier[offset] | def scale_degree_to_semitone(scale_degree):
"""Convert a scale degree to semitone.
Parameters
----------
scale degree : str
Spelling of a relative scale degree, e.g. 'b3', '7', '#5'
Returns
-------
semitone : int
Relative semitone of the scale degree, wrapped to a single octave
Raises
------
InvalidChordException if `scale_degree` is invalid.
"""
semitone = 0
offset = 0
if scale_degree.startswith('#'):
offset = scale_degree.count('#')
scale_degree = scale_degree.strip('#') # depends on [control=['if'], data=[]]
elif scale_degree.startswith('b'):
offset = -1 * scale_degree.count('b')
scale_degree = scale_degree.strip('b') # depends on [control=['if'], data=[]]
semitone = SCALE_DEGREES.get(scale_degree, None)
if semitone is None:
raise InvalidChordException('Scale degree improperly formed: {}, expected one of {}.'.format(scale_degree, list(SCALE_DEGREES.keys()))) # depends on [control=['if'], data=[]]
return semitone + offset |
def hierarchical(df, cluster_cols=True, cluster_rows=False, n_col_clusters=False, n_row_clusters=False, row_labels=True, col_labels=True, fcol=None, z_score=0, method='ward', cmap=cm.PuOr_r, return_clusters=False, rdistance_fn=distance.pdist, cdistance_fn=distance.pdist ):
"""
Hierarchical clustering of samples or proteins
Peform a hiearchical clustering on a pandas DataFrame and display the resulting clustering as a
heatmap.
The axis of clustering can be controlled with `cluster_cols` and `cluster_rows`. By default clustering is performed
along the X-axis, therefore to cluster samples transpose the DataFrame as it is passed, using `df.T`.
Samples are z-scored along the 0-axis (y) by default. To override this use the `z_score` param with the axis to `z_score`
or alternatively, `None`, to turn it off.
If a `n_col_clusters` or `n_row_clusters` is specified, this defines the number of clusters to identify and highlight
in the resulting heatmap. At *least* this number of clusters will be selected, in some instances there will be more
if 2 clusters rank equally at the determined cutoff.
If specified `fcol` will be used to colour the axes for matching samples.
:param df: Pandas ``DataFrame`` to cluster
:param cluster_cols: ``bool`` if ``True`` cluster along column axis
:param cluster_rows: ``bool`` if ``True`` cluster along row axis
:param n_col_clusters: ``int`` the ideal number of highlighted clusters in cols
:param n_row_clusters: ``int`` the ideal number of highlighted clusters in rows
:param fcol: ``dict`` of label:colors to be applied along the axes
:param z_score: ``int`` to specify the axis to Z score or `None` to disable
:param method: ``str`` describing cluster method, default ward
:param cmap: matplotlib colourmap for heatmap
:param return_clusters: ``bool`` return clusters in addition to axis
:return: matplotlib axis, or axis and cluster data
"""
# helper for cleaning up axes by removing ticks, tick labels, frame, etc.
def clean_axis(ax):
"""Remove ticks, tick labels, and frame from axis"""
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_axis_bgcolor('#ffffff')
for sp in ax.spines.values():
sp.set_visible(False)
dfc, row_clusters, row_denD, col_clusters, col_denD, edges = _cluster(df,
cluster_cols=cluster_cols, cluster_rows=cluster_rows, n_col_clusters=n_col_clusters,
n_row_clusters=n_row_clusters, z_score=z_score, method='ward',
rdistance_fn=rdistance_fn, cdistance_fn=cdistance_fn
)
# make norm
vmin = dfc.min().min()
vmax = dfc.max().max()
vmax = max([vmax, abs(vmin)]) # choose larger of vmin and vmax
vmin = vmax * -1
# dendrogram single color
sch.set_link_color_palette(['black'])
my_norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# heatmap with row names
fig = plt.figure(figsize=(12, 12))
heatmapGS = gridspec.GridSpec(2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.25, 1], height_ratios=[0.25, 1])
if cluster_cols:
# col dendrogram
col_denAX = fig.add_subplot(heatmapGS[0, 1])
sch.dendrogram(col_clusters, color_threshold=np.inf)
clean_axis(col_denAX)
rowGSSS = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=heatmapGS[1, 0], wspace=0.0, hspace=0.0, width_ratios=[1, 0.05])
if cluster_rows:
# row dendrogram
row_denAX = fig.add_subplot(rowGSSS[0, 0])
sch.dendrogram(row_clusters, color_threshold=np.inf, orientation='right')
clean_axis(row_denAX)
# row colorbar
if fcol and 'Group' in dfc.index.names:
class_idx = dfc.index.names.index('Group')
classcol = [fcol[x] for x in dfc.index.get_level_values(0)[row_denD['leaves']]]
classrgb = np.array([colorConverter.to_rgb(c) for c in classcol]).reshape(-1, 1, 3)
row_cbAX = fig.add_subplot(rowGSSS[0, 1])
row_axi = row_cbAX.imshow(classrgb, interpolation='nearest', aspect='auto', origin='lower')
clean_axis(row_cbAX)
# heatmap
heatmapAX = fig.add_subplot(heatmapGS[1, 1])
axi = heatmapAX.imshow(dfc.iloc[row_denD['leaves'], col_denD['leaves']], interpolation='nearest', aspect='auto', origin='lower'
, norm=my_norm, cmap=cmap)
clean_axis(heatmapAX)
def build_labels(index, ixs):
zstr = zip(*[index.get_level_values(x) for x in ixs])
return np.array([" ".join([str(t) for t in i]) if type(i) == tuple else str(i) for i in zstr])
# row labels
if dfc.shape[0] <= 100:
heatmapAX.set_yticks(range(dfc.shape[0]))
heatmapAX.yaxis.set_ticks_position('right')
if row_labels is True:
row_labels = list(range(len(dfc.index.names)))
ylabels = build_labels(dfc.index, row_labels)[row_denD['leaves']]
heatmapAX.set_yticklabels(ylabels)
# col labels
if dfc.shape[1] <= 100:
heatmapAX.set_xticks(range(dfc.shape[1]))
if col_labels is True:
col_labels = list(range(len(dfc.columns.names)))
xlabels = build_labels(dfc.columns, col_labels)[col_denD['leaves']]
xlabelsL = heatmapAX.set_xticklabels(xlabels)
# rotate labels 90 degrees
for label in xlabelsL:
label.set_rotation(90)
# remove the tick lines
for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines():
l.set_markersize(0)
heatmapAX.grid('off')
if cluster_cols and n_col_clusters:
for edge in edges:
heatmapAX.axvline(edge +0.5, color='k', lw=3)
if cluster_rows and n_row_clusters:
for edge in edges:
heatmapAX.axhline(edge +0.5, color='k', lw=3)
if return_clusters:
return fig, dfc.iloc[row_denD['leaves'], col_denD['leaves']], edges
else:
return fig | def function[hierarchical, parameter[df, cluster_cols, cluster_rows, n_col_clusters, n_row_clusters, row_labels, col_labels, fcol, z_score, method, cmap, return_clusters, rdistance_fn, cdistance_fn]]:
constant[
Hierarchical clustering of samples or proteins
Peform a hiearchical clustering on a pandas DataFrame and display the resulting clustering as a
heatmap.
The axis of clustering can be controlled with `cluster_cols` and `cluster_rows`. By default clustering is performed
along the X-axis, therefore to cluster samples transpose the DataFrame as it is passed, using `df.T`.
Samples are z-scored along the 0-axis (y) by default. To override this use the `z_score` param with the axis to `z_score`
or alternatively, `None`, to turn it off.
If a `n_col_clusters` or `n_row_clusters` is specified, this defines the number of clusters to identify and highlight
in the resulting heatmap. At *least* this number of clusters will be selected, in some instances there will be more
if 2 clusters rank equally at the determined cutoff.
If specified `fcol` will be used to colour the axes for matching samples.
:param df: Pandas ``DataFrame`` to cluster
:param cluster_cols: ``bool`` if ``True`` cluster along column axis
:param cluster_rows: ``bool`` if ``True`` cluster along row axis
:param n_col_clusters: ``int`` the ideal number of highlighted clusters in cols
:param n_row_clusters: ``int`` the ideal number of highlighted clusters in rows
:param fcol: ``dict`` of label:colors to be applied along the axes
:param z_score: ``int`` to specify the axis to Z score or `None` to disable
:param method: ``str`` describing cluster method, default ward
:param cmap: matplotlib colourmap for heatmap
:param return_clusters: ``bool`` return clusters in addition to axis
:return: matplotlib axis, or axis and cluster data
]
def function[clean_axis, parameter[ax]]:
constant[Remove ticks, tick labels, and frame from axis]
call[call[name[ax].get_xaxis, parameter[]].set_ticks, parameter[list[[]]]]
call[call[name[ax].get_yaxis, parameter[]].set_ticks, parameter[list[[]]]]
call[name[ax].set_axis_bgcolor, parameter[constant[#ffffff]]]
for taget[name[sp]] in starred[call[name[ax].spines.values, parameter[]]] begin[:]
call[name[sp].set_visible, parameter[constant[False]]]
<ast.Tuple object at 0x7da20c7955a0> assign[=] call[name[_cluster], parameter[name[df]]]
variable[vmin] assign[=] call[call[name[dfc].min, parameter[]].min, parameter[]]
variable[vmax] assign[=] call[call[name[dfc].max, parameter[]].max, parameter[]]
variable[vmax] assign[=] call[name[max], parameter[list[[<ast.Name object at 0x7da20c794d60>, <ast.Call object at 0x7da20c796770>]]]]
variable[vmin] assign[=] binary_operation[name[vmax] * <ast.UnaryOp object at 0x7da20c7945b0>]
call[name[sch].set_link_color_palette, parameter[list[[<ast.Constant object at 0x7da20c794c70>]]]]
variable[my_norm] assign[=] call[name[mpl].colors.Normalize, parameter[]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[heatmapGS] assign[=] call[name[gridspec].GridSpec, parameter[constant[2], constant[2]]]
if name[cluster_cols] begin[:]
variable[col_denAX] assign[=] call[name[fig].add_subplot, parameter[call[name[heatmapGS]][tuple[[<ast.Constant object at 0x7da18dc051e0>, <ast.Constant object at 0x7da18dc075b0>]]]]]
call[name[sch].dendrogram, parameter[name[col_clusters]]]
call[name[clean_axis], parameter[name[col_denAX]]]
variable[rowGSSS] assign[=] call[name[gridspec].GridSpecFromSubplotSpec, parameter[constant[1], constant[2]]]
if name[cluster_rows] begin[:]
variable[row_denAX] assign[=] call[name[fig].add_subplot, parameter[call[name[rowGSSS]][tuple[[<ast.Constant object at 0x7da18dc04d30>, <ast.Constant object at 0x7da18dc04430>]]]]]
call[name[sch].dendrogram, parameter[name[row_clusters]]]
call[name[clean_axis], parameter[name[row_denAX]]]
if <ast.BoolOp object at 0x7da18dc041f0> begin[:]
variable[class_idx] assign[=] call[name[dfc].index.names.index, parameter[constant[Group]]]
variable[classcol] assign[=] <ast.ListComp object at 0x7da18dc07190>
variable[classrgb] assign[=] call[call[name[np].array, parameter[<ast.ListComp object at 0x7da18dc04880>]].reshape, parameter[<ast.UnaryOp object at 0x7da18dc05ff0>, constant[1], constant[3]]]
variable[row_cbAX] assign[=] call[name[fig].add_subplot, parameter[call[name[rowGSSS]][tuple[[<ast.Constant object at 0x7da18dc054b0>, <ast.Constant object at 0x7da18dc07e50>]]]]]
variable[row_axi] assign[=] call[name[row_cbAX].imshow, parameter[name[classrgb]]]
call[name[clean_axis], parameter[name[row_cbAX]]]
variable[heatmapAX] assign[=] call[name[fig].add_subplot, parameter[call[name[heatmapGS]][tuple[[<ast.Constant object at 0x7da18dc07f10>, <ast.Constant object at 0x7da18dc05960>]]]]]
variable[axi] assign[=] call[name[heatmapAX].imshow, parameter[call[name[dfc].iloc][tuple[[<ast.Subscript object at 0x7da18dc04340>, <ast.Subscript object at 0x7da18dc065c0>]]]]]
call[name[clean_axis], parameter[name[heatmapAX]]]
def function[build_labels, parameter[index, ixs]]:
variable[zstr] assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da18dc05c60>]]
return[call[name[np].array, parameter[<ast.ListComp object at 0x7da18dc06c80>]]]
if compare[call[name[dfc].shape][constant[0]] less_or_equal[<=] constant[100]] begin[:]
call[name[heatmapAX].set_yticks, parameter[call[name[range], parameter[call[name[dfc].shape][constant[0]]]]]]
call[name[heatmapAX].yaxis.set_ticks_position, parameter[constant[right]]]
if compare[name[row_labels] is constant[True]] begin[:]
variable[row_labels] assign[=] call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[dfc].index.names]]]]]]
variable[ylabels] assign[=] call[call[name[build_labels], parameter[name[dfc].index, name[row_labels]]]][call[name[row_denD]][constant[leaves]]]
call[name[heatmapAX].set_yticklabels, parameter[name[ylabels]]]
if compare[call[name[dfc].shape][constant[1]] less_or_equal[<=] constant[100]] begin[:]
call[name[heatmapAX].set_xticks, parameter[call[name[range], parameter[call[name[dfc].shape][constant[1]]]]]]
if compare[name[col_labels] is constant[True]] begin[:]
variable[col_labels] assign[=] call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[dfc].columns.names]]]]]]
variable[xlabels] assign[=] call[call[name[build_labels], parameter[name[dfc].columns, name[col_labels]]]][call[name[col_denD]][constant[leaves]]]
variable[xlabelsL] assign[=] call[name[heatmapAX].set_xticklabels, parameter[name[xlabels]]]
for taget[name[label]] in starred[name[xlabelsL]] begin[:]
call[name[label].set_rotation, parameter[constant[90]]]
for taget[name[l]] in starred[binary_operation[call[name[heatmapAX].get_xticklines, parameter[]] + call[name[heatmapAX].get_yticklines, parameter[]]]] begin[:]
call[name[l].set_markersize, parameter[constant[0]]]
call[name[heatmapAX].grid, parameter[constant[off]]]
if <ast.BoolOp object at 0x7da204347b80> begin[:]
for taget[name[edge]] in starred[name[edges]] begin[:]
call[name[heatmapAX].axvline, parameter[binary_operation[name[edge] + constant[0.5]]]]
if <ast.BoolOp object at 0x7da204345b10> begin[:]
for taget[name[edge]] in starred[name[edges]] begin[:]
call[name[heatmapAX].axhline, parameter[binary_operation[name[edge] + constant[0.5]]]]
if name[return_clusters] begin[:]
return[tuple[[<ast.Name object at 0x7da204344310>, <ast.Subscript object at 0x7da204346f80>, <ast.Name object at 0x7da204345cf0>]]] | keyword[def] identifier[hierarchical] ( identifier[df] , identifier[cluster_cols] = keyword[True] , identifier[cluster_rows] = keyword[False] , identifier[n_col_clusters] = keyword[False] , identifier[n_row_clusters] = keyword[False] , identifier[row_labels] = keyword[True] , identifier[col_labels] = keyword[True] , identifier[fcol] = keyword[None] , identifier[z_score] = literal[int] , identifier[method] = literal[string] , identifier[cmap] = identifier[cm] . identifier[PuOr_r] , identifier[return_clusters] = keyword[False] , identifier[rdistance_fn] = identifier[distance] . identifier[pdist] , identifier[cdistance_fn] = identifier[distance] . identifier[pdist] ):
literal[string]
keyword[def] identifier[clean_axis] ( identifier[ax] ):
literal[string]
identifier[ax] . identifier[get_xaxis] (). identifier[set_ticks] ([])
identifier[ax] . identifier[get_yaxis] (). identifier[set_ticks] ([])
identifier[ax] . identifier[set_axis_bgcolor] ( literal[string] )
keyword[for] identifier[sp] keyword[in] identifier[ax] . identifier[spines] . identifier[values] ():
identifier[sp] . identifier[set_visible] ( keyword[False] )
identifier[dfc] , identifier[row_clusters] , identifier[row_denD] , identifier[col_clusters] , identifier[col_denD] , identifier[edges] = identifier[_cluster] ( identifier[df] ,
identifier[cluster_cols] = identifier[cluster_cols] , identifier[cluster_rows] = identifier[cluster_rows] , identifier[n_col_clusters] = identifier[n_col_clusters] ,
identifier[n_row_clusters] = identifier[n_row_clusters] , identifier[z_score] = identifier[z_score] , identifier[method] = literal[string] ,
identifier[rdistance_fn] = identifier[rdistance_fn] , identifier[cdistance_fn] = identifier[cdistance_fn]
)
identifier[vmin] = identifier[dfc] . identifier[min] (). identifier[min] ()
identifier[vmax] = identifier[dfc] . identifier[max] (). identifier[max] ()
identifier[vmax] = identifier[max] ([ identifier[vmax] , identifier[abs] ( identifier[vmin] )])
identifier[vmin] = identifier[vmax] *- literal[int]
identifier[sch] . identifier[set_link_color_palette] ([ literal[string] ])
identifier[my_norm] = identifier[mpl] . identifier[colors] . identifier[Normalize] ( identifier[vmin] = identifier[vmin] , identifier[vmax] = identifier[vmax] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] ))
identifier[heatmapGS] = identifier[gridspec] . identifier[GridSpec] ( literal[int] , literal[int] , identifier[wspace] = literal[int] , identifier[hspace] = literal[int] , identifier[width_ratios] =[ literal[int] , literal[int] ], identifier[height_ratios] =[ literal[int] , literal[int] ])
keyword[if] identifier[cluster_cols] :
identifier[col_denAX] = identifier[fig] . identifier[add_subplot] ( identifier[heatmapGS] [ literal[int] , literal[int] ])
identifier[sch] . identifier[dendrogram] ( identifier[col_clusters] , identifier[color_threshold] = identifier[np] . identifier[inf] )
identifier[clean_axis] ( identifier[col_denAX] )
identifier[rowGSSS] = identifier[gridspec] . identifier[GridSpecFromSubplotSpec] ( literal[int] , literal[int] , identifier[subplot_spec] = identifier[heatmapGS] [ literal[int] , literal[int] ], identifier[wspace] = literal[int] , identifier[hspace] = literal[int] , identifier[width_ratios] =[ literal[int] , literal[int] ])
keyword[if] identifier[cluster_rows] :
identifier[row_denAX] = identifier[fig] . identifier[add_subplot] ( identifier[rowGSSS] [ literal[int] , literal[int] ])
identifier[sch] . identifier[dendrogram] ( identifier[row_clusters] , identifier[color_threshold] = identifier[np] . identifier[inf] , identifier[orientation] = literal[string] )
identifier[clean_axis] ( identifier[row_denAX] )
keyword[if] identifier[fcol] keyword[and] literal[string] keyword[in] identifier[dfc] . identifier[index] . identifier[names] :
identifier[class_idx] = identifier[dfc] . identifier[index] . identifier[names] . identifier[index] ( literal[string] )
identifier[classcol] =[ identifier[fcol] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[dfc] . identifier[index] . identifier[get_level_values] ( literal[int] )[ identifier[row_denD] [ literal[string] ]]]
identifier[classrgb] = identifier[np] . identifier[array] ([ identifier[colorConverter] . identifier[to_rgb] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[classcol] ]). identifier[reshape] (- literal[int] , literal[int] , literal[int] )
identifier[row_cbAX] = identifier[fig] . identifier[add_subplot] ( identifier[rowGSSS] [ literal[int] , literal[int] ])
identifier[row_axi] = identifier[row_cbAX] . identifier[imshow] ( identifier[classrgb] , identifier[interpolation] = literal[string] , identifier[aspect] = literal[string] , identifier[origin] = literal[string] )
identifier[clean_axis] ( identifier[row_cbAX] )
identifier[heatmapAX] = identifier[fig] . identifier[add_subplot] ( identifier[heatmapGS] [ literal[int] , literal[int] ])
identifier[axi] = identifier[heatmapAX] . identifier[imshow] ( identifier[dfc] . identifier[iloc] [ identifier[row_denD] [ literal[string] ], identifier[col_denD] [ literal[string] ]], identifier[interpolation] = literal[string] , identifier[aspect] = literal[string] , identifier[origin] = literal[string]
, identifier[norm] = identifier[my_norm] , identifier[cmap] = identifier[cmap] )
identifier[clean_axis] ( identifier[heatmapAX] )
keyword[def] identifier[build_labels] ( identifier[index] , identifier[ixs] ):
identifier[zstr] = identifier[zip] (*[ identifier[index] . identifier[get_level_values] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[ixs] ])
keyword[return] identifier[np] . identifier[array] ([ literal[string] . identifier[join] ([ identifier[str] ( identifier[t] ) keyword[for] identifier[t] keyword[in] identifier[i] ]) keyword[if] identifier[type] ( identifier[i] )== identifier[tuple] keyword[else] identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[zstr] ])
keyword[if] identifier[dfc] . identifier[shape] [ literal[int] ]<= literal[int] :
identifier[heatmapAX] . identifier[set_yticks] ( identifier[range] ( identifier[dfc] . identifier[shape] [ literal[int] ]))
identifier[heatmapAX] . identifier[yaxis] . identifier[set_ticks_position] ( literal[string] )
keyword[if] identifier[row_labels] keyword[is] keyword[True] :
identifier[row_labels] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[dfc] . identifier[index] . identifier[names] )))
identifier[ylabels] = identifier[build_labels] ( identifier[dfc] . identifier[index] , identifier[row_labels] )[ identifier[row_denD] [ literal[string] ]]
identifier[heatmapAX] . identifier[set_yticklabels] ( identifier[ylabels] )
keyword[if] identifier[dfc] . identifier[shape] [ literal[int] ]<= literal[int] :
identifier[heatmapAX] . identifier[set_xticks] ( identifier[range] ( identifier[dfc] . identifier[shape] [ literal[int] ]))
keyword[if] identifier[col_labels] keyword[is] keyword[True] :
identifier[col_labels] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[dfc] . identifier[columns] . identifier[names] )))
identifier[xlabels] = identifier[build_labels] ( identifier[dfc] . identifier[columns] , identifier[col_labels] )[ identifier[col_denD] [ literal[string] ]]
identifier[xlabelsL] = identifier[heatmapAX] . identifier[set_xticklabels] ( identifier[xlabels] )
keyword[for] identifier[label] keyword[in] identifier[xlabelsL] :
identifier[label] . identifier[set_rotation] ( literal[int] )
keyword[for] identifier[l] keyword[in] identifier[heatmapAX] . identifier[get_xticklines] ()+ identifier[heatmapAX] . identifier[get_yticklines] ():
identifier[l] . identifier[set_markersize] ( literal[int] )
identifier[heatmapAX] . identifier[grid] ( literal[string] )
keyword[if] identifier[cluster_cols] keyword[and] identifier[n_col_clusters] :
keyword[for] identifier[edge] keyword[in] identifier[edges] :
identifier[heatmapAX] . identifier[axvline] ( identifier[edge] + literal[int] , identifier[color] = literal[string] , identifier[lw] = literal[int] )
keyword[if] identifier[cluster_rows] keyword[and] identifier[n_row_clusters] :
keyword[for] identifier[edge] keyword[in] identifier[edges] :
identifier[heatmapAX] . identifier[axhline] ( identifier[edge] + literal[int] , identifier[color] = literal[string] , identifier[lw] = literal[int] )
keyword[if] identifier[return_clusters] :
keyword[return] identifier[fig] , identifier[dfc] . identifier[iloc] [ identifier[row_denD] [ literal[string] ], identifier[col_denD] [ literal[string] ]], identifier[edges]
keyword[else] :
keyword[return] identifier[fig] | def hierarchical(df, cluster_cols=True, cluster_rows=False, n_col_clusters=False, n_row_clusters=False, row_labels=True, col_labels=True, fcol=None, z_score=0, method='ward', cmap=cm.PuOr_r, return_clusters=False, rdistance_fn=distance.pdist, cdistance_fn=distance.pdist):
"""
Hierarchical clustering of samples or proteins
Peform a hiearchical clustering on a pandas DataFrame and display the resulting clustering as a
heatmap.
The axis of clustering can be controlled with `cluster_cols` and `cluster_rows`. By default clustering is performed
along the X-axis, therefore to cluster samples transpose the DataFrame as it is passed, using `df.T`.
Samples are z-scored along the 0-axis (y) by default. To override this use the `z_score` param with the axis to `z_score`
or alternatively, `None`, to turn it off.
If a `n_col_clusters` or `n_row_clusters` is specified, this defines the number of clusters to identify and highlight
in the resulting heatmap. At *least* this number of clusters will be selected, in some instances there will be more
if 2 clusters rank equally at the determined cutoff.
If specified `fcol` will be used to colour the axes for matching samples.
:param df: Pandas ``DataFrame`` to cluster
:param cluster_cols: ``bool`` if ``True`` cluster along column axis
:param cluster_rows: ``bool`` if ``True`` cluster along row axis
:param n_col_clusters: ``int`` the ideal number of highlighted clusters in cols
:param n_row_clusters: ``int`` the ideal number of highlighted clusters in rows
:param fcol: ``dict`` of label:colors to be applied along the axes
:param z_score: ``int`` to specify the axis to Z score or `None` to disable
:param method: ``str`` describing cluster method, default ward
:param cmap: matplotlib colourmap for heatmap
:param return_clusters: ``bool`` return clusters in addition to axis
:return: matplotlib axis, or axis and cluster data
"""
# helper for cleaning up axes by removing ticks, tick labels, frame, etc.
def clean_axis(ax):
"""Remove ticks, tick labels, and frame from axis"""
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_axis_bgcolor('#ffffff')
for sp in ax.spines.values():
sp.set_visible(False) # depends on [control=['for'], data=['sp']]
(dfc, row_clusters, row_denD, col_clusters, col_denD, edges) = _cluster(df, cluster_cols=cluster_cols, cluster_rows=cluster_rows, n_col_clusters=n_col_clusters, n_row_clusters=n_row_clusters, z_score=z_score, method='ward', rdistance_fn=rdistance_fn, cdistance_fn=cdistance_fn)
# make norm
vmin = dfc.min().min()
vmax = dfc.max().max()
vmax = max([vmax, abs(vmin)]) # choose larger of vmin and vmax
vmin = vmax * -1
# dendrogram single color
sch.set_link_color_palette(['black'])
my_norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# heatmap with row names
fig = plt.figure(figsize=(12, 12))
heatmapGS = gridspec.GridSpec(2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.25, 1], height_ratios=[0.25, 1])
if cluster_cols:
# col dendrogram
col_denAX = fig.add_subplot(heatmapGS[0, 1])
sch.dendrogram(col_clusters, color_threshold=np.inf)
clean_axis(col_denAX) # depends on [control=['if'], data=[]]
rowGSSS = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=heatmapGS[1, 0], wspace=0.0, hspace=0.0, width_ratios=[1, 0.05])
if cluster_rows:
# row dendrogram
row_denAX = fig.add_subplot(rowGSSS[0, 0])
sch.dendrogram(row_clusters, color_threshold=np.inf, orientation='right')
clean_axis(row_denAX) # depends on [control=['if'], data=[]]
# row colorbar
if fcol and 'Group' in dfc.index.names:
class_idx = dfc.index.names.index('Group')
classcol = [fcol[x] for x in dfc.index.get_level_values(0)[row_denD['leaves']]]
classrgb = np.array([colorConverter.to_rgb(c) for c in classcol]).reshape(-1, 1, 3)
row_cbAX = fig.add_subplot(rowGSSS[0, 1])
row_axi = row_cbAX.imshow(classrgb, interpolation='nearest', aspect='auto', origin='lower')
clean_axis(row_cbAX) # depends on [control=['if'], data=[]]
# heatmap
heatmapAX = fig.add_subplot(heatmapGS[1, 1])
axi = heatmapAX.imshow(dfc.iloc[row_denD['leaves'], col_denD['leaves']], interpolation='nearest', aspect='auto', origin='lower', norm=my_norm, cmap=cmap)
clean_axis(heatmapAX)
def build_labels(index, ixs):
zstr = zip(*[index.get_level_values(x) for x in ixs])
return np.array([' '.join([str(t) for t in i]) if type(i) == tuple else str(i) for i in zstr])
# row labels
if dfc.shape[0] <= 100:
heatmapAX.set_yticks(range(dfc.shape[0]))
heatmapAX.yaxis.set_ticks_position('right')
if row_labels is True:
row_labels = list(range(len(dfc.index.names))) # depends on [control=['if'], data=['row_labels']]
ylabels = build_labels(dfc.index, row_labels)[row_denD['leaves']]
heatmapAX.set_yticklabels(ylabels) # depends on [control=['if'], data=[]]
# col labels
if dfc.shape[1] <= 100:
heatmapAX.set_xticks(range(dfc.shape[1]))
if col_labels is True:
col_labels = list(range(len(dfc.columns.names))) # depends on [control=['if'], data=['col_labels']]
xlabels = build_labels(dfc.columns, col_labels)[col_denD['leaves']]
xlabelsL = heatmapAX.set_xticklabels(xlabels)
# rotate labels 90 degrees
for label in xlabelsL:
label.set_rotation(90) # depends on [control=['for'], data=['label']] # depends on [control=['if'], data=[]]
# remove the tick lines
for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines():
l.set_markersize(0) # depends on [control=['for'], data=['l']]
heatmapAX.grid('off')
if cluster_cols and n_col_clusters:
for edge in edges:
heatmapAX.axvline(edge + 0.5, color='k', lw=3) # depends on [control=['for'], data=['edge']] # depends on [control=['if'], data=[]]
if cluster_rows and n_row_clusters:
for edge in edges:
heatmapAX.axhline(edge + 0.5, color='k', lw=3) # depends on [control=['for'], data=['edge']] # depends on [control=['if'], data=[]]
if return_clusters:
return (fig, dfc.iloc[row_denD['leaves'], col_denD['leaves']], edges) # depends on [control=['if'], data=[]]
else:
return fig |
def wrap_httplib_request(request_func):
"""Wrap the httplib request function to trace. Create a new span and update
and close the span in the response later.
"""
def call(self, method, url, body, headers, *args, **kwargs):
_tracer = execution_context.get_opencensus_tracer()
blacklist_hostnames = execution_context.get_opencensus_attr(
'blacklist_hostnames')
dest_url = '{}:{}'.format(self._dns_host, self.port)
if utils.disable_tracing_hostname(dest_url, blacklist_hostnames):
return request_func(self, method, url, body,
headers, *args, **kwargs)
_span = _tracer.start_span()
_span.span_kind = span_module.SpanKind.CLIENT
_span.name = '[httplib]{}'.format(request_func.__name__)
# Add the request url to attributes
_tracer.add_attribute_to_current_span(HTTP_URL, url)
# Add the request method to attributes
_tracer.add_attribute_to_current_span(HTTP_METHOD, method)
# Store the current span id to thread local.
execution_context.set_opencensus_attr(
'httplib/current_span_id', _span.span_id)
try:
headers = headers.copy()
headers.update(_tracer.propagator.to_headers(
_span.context_tracer.span_context))
except Exception: # pragma: NO COVER
pass
return request_func(self, method, url, body, headers, *args, **kwargs)
return call | def function[wrap_httplib_request, parameter[request_func]]:
constant[Wrap the httplib request function to trace. Create a new span and update
and close the span in the response later.
]
def function[call, parameter[self, method, url, body, headers]]:
variable[_tracer] assign[=] call[name[execution_context].get_opencensus_tracer, parameter[]]
variable[blacklist_hostnames] assign[=] call[name[execution_context].get_opencensus_attr, parameter[constant[blacklist_hostnames]]]
variable[dest_url] assign[=] call[constant[{}:{}].format, parameter[name[self]._dns_host, name[self].port]]
if call[name[utils].disable_tracing_hostname, parameter[name[dest_url], name[blacklist_hostnames]]] begin[:]
return[call[name[request_func], parameter[name[self], name[method], name[url], name[body], name[headers], <ast.Starred object at 0x7da20e9575e0>]]]
variable[_span] assign[=] call[name[_tracer].start_span, parameter[]]
name[_span].span_kind assign[=] name[span_module].SpanKind.CLIENT
name[_span].name assign[=] call[constant[[httplib]{}].format, parameter[name[request_func].__name__]]
call[name[_tracer].add_attribute_to_current_span, parameter[name[HTTP_URL], name[url]]]
call[name[_tracer].add_attribute_to_current_span, parameter[name[HTTP_METHOD], name[method]]]
call[name[execution_context].set_opencensus_attr, parameter[constant[httplib/current_span_id], name[_span].span_id]]
<ast.Try object at 0x7da204621630>
return[call[name[request_func], parameter[name[self], name[method], name[url], name[body], name[headers], <ast.Starred object at 0x7da2046205e0>]]]
return[name[call]] | keyword[def] identifier[wrap_httplib_request] ( identifier[request_func] ):
literal[string]
keyword[def] identifier[call] ( identifier[self] , identifier[method] , identifier[url] , identifier[body] , identifier[headers] ,* identifier[args] ,** identifier[kwargs] ):
identifier[_tracer] = identifier[execution_context] . identifier[get_opencensus_tracer] ()
identifier[blacklist_hostnames] = identifier[execution_context] . identifier[get_opencensus_attr] (
literal[string] )
identifier[dest_url] = literal[string] . identifier[format] ( identifier[self] . identifier[_dns_host] , identifier[self] . identifier[port] )
keyword[if] identifier[utils] . identifier[disable_tracing_hostname] ( identifier[dest_url] , identifier[blacklist_hostnames] ):
keyword[return] identifier[request_func] ( identifier[self] , identifier[method] , identifier[url] , identifier[body] ,
identifier[headers] ,* identifier[args] ,** identifier[kwargs] )
identifier[_span] = identifier[_tracer] . identifier[start_span] ()
identifier[_span] . identifier[span_kind] = identifier[span_module] . identifier[SpanKind] . identifier[CLIENT]
identifier[_span] . identifier[name] = literal[string] . identifier[format] ( identifier[request_func] . identifier[__name__] )
identifier[_tracer] . identifier[add_attribute_to_current_span] ( identifier[HTTP_URL] , identifier[url] )
identifier[_tracer] . identifier[add_attribute_to_current_span] ( identifier[HTTP_METHOD] , identifier[method] )
identifier[execution_context] . identifier[set_opencensus_attr] (
literal[string] , identifier[_span] . identifier[span_id] )
keyword[try] :
identifier[headers] = identifier[headers] . identifier[copy] ()
identifier[headers] . identifier[update] ( identifier[_tracer] . identifier[propagator] . identifier[to_headers] (
identifier[_span] . identifier[context_tracer] . identifier[span_context] ))
keyword[except] identifier[Exception] :
keyword[pass]
keyword[return] identifier[request_func] ( identifier[self] , identifier[method] , identifier[url] , identifier[body] , identifier[headers] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[call] | def wrap_httplib_request(request_func):
"""Wrap the httplib request function to trace. Create a new span and update
and close the span in the response later.
"""
def call(self, method, url, body, headers, *args, **kwargs):
_tracer = execution_context.get_opencensus_tracer()
blacklist_hostnames = execution_context.get_opencensus_attr('blacklist_hostnames')
dest_url = '{}:{}'.format(self._dns_host, self.port)
if utils.disable_tracing_hostname(dest_url, blacklist_hostnames):
return request_func(self, method, url, body, headers, *args, **kwargs) # depends on [control=['if'], data=[]]
_span = _tracer.start_span()
_span.span_kind = span_module.SpanKind.CLIENT
_span.name = '[httplib]{}'.format(request_func.__name__)
# Add the request url to attributes
_tracer.add_attribute_to_current_span(HTTP_URL, url)
# Add the request method to attributes
_tracer.add_attribute_to_current_span(HTTP_METHOD, method)
# Store the current span id to thread local.
execution_context.set_opencensus_attr('httplib/current_span_id', _span.span_id)
try:
headers = headers.copy()
headers.update(_tracer.propagator.to_headers(_span.context_tracer.span_context)) # depends on [control=['try'], data=[]]
except Exception: # pragma: NO COVER
pass # depends on [control=['except'], data=[]]
return request_func(self, method, url, body, headers, *args, **kwargs)
return call |
def eigenvectors_rev(T, k, right=True, ncv=None, mu=None):
r"""Compute eigenvectors of reversible transition matrix.
Parameters
----------
T : (M, M) scipy.sparse matrix
Transition matrix (stochastic matrix)
k : int
Number of eigenvalues to compute
right : bool, optional
If True compute right eigenvectors, left eigenvectors otherwise
ncv : int, optional
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
mu : (M,) ndarray, optional
Stationary distribution of T
Returns
-------
eigvec : (M, k) ndarray
k-eigenvectors of T
"""
if mu is None:
mu = stationary_distribution(T)
""" symmetrize T """
smu = np.sqrt(mu)
D = diags(smu, 0)
Dinv = diags(1.0/smu, 0)
S = (D.dot(T)).dot(Dinv)
"""Compute eigenvalues, eigenvecs using a solver for
symmetric/hermititan eigenproblems"""
val, eigvec = scipy.sparse.linalg.eigsh(S, k=k, ncv=ncv, which='LM',
return_eigenvectors=True)
"""Sort eigenvectors"""
ind = np.argsort(np.abs(val))[::-1]
eigvec = eigvec[:, ind]
if right:
return eigvec / smu[:, np.newaxis]
else:
return eigvec * smu[:, np.newaxis] | def function[eigenvectors_rev, parameter[T, k, right, ncv, mu]]:
constant[Compute eigenvectors of reversible transition matrix.
Parameters
----------
T : (M, M) scipy.sparse matrix
Transition matrix (stochastic matrix)
k : int
Number of eigenvalues to compute
right : bool, optional
If True compute right eigenvectors, left eigenvectors otherwise
ncv : int, optional
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
mu : (M,) ndarray, optional
Stationary distribution of T
Returns
-------
eigvec : (M, k) ndarray
k-eigenvectors of T
]
if compare[name[mu] is constant[None]] begin[:]
variable[mu] assign[=] call[name[stationary_distribution], parameter[name[T]]]
constant[ symmetrize T ]
variable[smu] assign[=] call[name[np].sqrt, parameter[name[mu]]]
variable[D] assign[=] call[name[diags], parameter[name[smu], constant[0]]]
variable[Dinv] assign[=] call[name[diags], parameter[binary_operation[constant[1.0] / name[smu]], constant[0]]]
variable[S] assign[=] call[call[name[D].dot, parameter[name[T]]].dot, parameter[name[Dinv]]]
constant[Compute eigenvalues, eigenvecs using a solver for
symmetric/hermititan eigenproblems]
<ast.Tuple object at 0x7da1b26adcf0> assign[=] call[name[scipy].sparse.linalg.eigsh, parameter[name[S]]]
constant[Sort eigenvectors]
variable[ind] assign[=] call[call[name[np].argsort, parameter[call[name[np].abs, parameter[name[val]]]]]][<ast.Slice object at 0x7da1b26ac0d0>]
variable[eigvec] assign[=] call[name[eigvec]][tuple[[<ast.Slice object at 0x7da1b26adbd0>, <ast.Name object at 0x7da1b26ac190>]]]
if name[right] begin[:]
return[binary_operation[name[eigvec] / call[name[smu]][tuple[[<ast.Slice object at 0x7da1b26afbe0>, <ast.Attribute object at 0x7da1b26acdf0>]]]]] | keyword[def] identifier[eigenvectors_rev] ( identifier[T] , identifier[k] , identifier[right] = keyword[True] , identifier[ncv] = keyword[None] , identifier[mu] = keyword[None] ):
literal[string]
keyword[if] identifier[mu] keyword[is] keyword[None] :
identifier[mu] = identifier[stationary_distribution] ( identifier[T] )
literal[string]
identifier[smu] = identifier[np] . identifier[sqrt] ( identifier[mu] )
identifier[D] = identifier[diags] ( identifier[smu] , literal[int] )
identifier[Dinv] = identifier[diags] ( literal[int] / identifier[smu] , literal[int] )
identifier[S] =( identifier[D] . identifier[dot] ( identifier[T] )). identifier[dot] ( identifier[Dinv] )
literal[string]
identifier[val] , identifier[eigvec] = identifier[scipy] . identifier[sparse] . identifier[linalg] . identifier[eigsh] ( identifier[S] , identifier[k] = identifier[k] , identifier[ncv] = identifier[ncv] , identifier[which] = literal[string] ,
identifier[return_eigenvectors] = keyword[True] )
literal[string]
identifier[ind] = identifier[np] . identifier[argsort] ( identifier[np] . identifier[abs] ( identifier[val] ))[::- literal[int] ]
identifier[eigvec] = identifier[eigvec] [:, identifier[ind] ]
keyword[if] identifier[right] :
keyword[return] identifier[eigvec] / identifier[smu] [:, identifier[np] . identifier[newaxis] ]
keyword[else] :
keyword[return] identifier[eigvec] * identifier[smu] [:, identifier[np] . identifier[newaxis] ] | def eigenvectors_rev(T, k, right=True, ncv=None, mu=None):
"""Compute eigenvectors of reversible transition matrix.
Parameters
----------
T : (M, M) scipy.sparse matrix
Transition matrix (stochastic matrix)
k : int
Number of eigenvalues to compute
right : bool, optional
If True compute right eigenvectors, left eigenvectors otherwise
ncv : int, optional
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
mu : (M,) ndarray, optional
Stationary distribution of T
Returns
-------
eigvec : (M, k) ndarray
k-eigenvectors of T
"""
if mu is None:
mu = stationary_distribution(T) # depends on [control=['if'], data=['mu']]
' symmetrize T '
smu = np.sqrt(mu)
D = diags(smu, 0)
Dinv = diags(1.0 / smu, 0)
S = D.dot(T).dot(Dinv)
'Compute eigenvalues, eigenvecs using a solver for\n symmetric/hermititan eigenproblems'
(val, eigvec) = scipy.sparse.linalg.eigsh(S, k=k, ncv=ncv, which='LM', return_eigenvectors=True)
'Sort eigenvectors'
ind = np.argsort(np.abs(val))[::-1]
eigvec = eigvec[:, ind]
if right:
return eigvec / smu[:, np.newaxis] # depends on [control=['if'], data=[]]
else:
return eigvec * smu[:, np.newaxis] |
def create_pattern(cls, userdata):
"""Create a user data instance with all values the same."""
empty = cls.create_empty(None)
userdata_dict = cls.normalize(empty, userdata)
return Userdata(userdata_dict) | def function[create_pattern, parameter[cls, userdata]]:
constant[Create a user data instance with all values the same.]
variable[empty] assign[=] call[name[cls].create_empty, parameter[constant[None]]]
variable[userdata_dict] assign[=] call[name[cls].normalize, parameter[name[empty], name[userdata]]]
return[call[name[Userdata], parameter[name[userdata_dict]]]] | keyword[def] identifier[create_pattern] ( identifier[cls] , identifier[userdata] ):
literal[string]
identifier[empty] = identifier[cls] . identifier[create_empty] ( keyword[None] )
identifier[userdata_dict] = identifier[cls] . identifier[normalize] ( identifier[empty] , identifier[userdata] )
keyword[return] identifier[Userdata] ( identifier[userdata_dict] ) | def create_pattern(cls, userdata):
"""Create a user data instance with all values the same."""
empty = cls.create_empty(None)
userdata_dict = cls.normalize(empty, userdata)
return Userdata(userdata_dict) |
def get_moods(self):
"""
Return moods defined on the gateway.
Returns a Command.
"""
mood_parent = self._get_mood_parent()
def process_result(result):
return [self.get_mood(mood, mood_parent=mood_parent) for mood in
result]
return Command('get', [ROOT_MOODS, mood_parent],
process_result=process_result) | def function[get_moods, parameter[self]]:
constant[
Return moods defined on the gateway.
Returns a Command.
]
variable[mood_parent] assign[=] call[name[self]._get_mood_parent, parameter[]]
def function[process_result, parameter[result]]:
return[<ast.ListComp object at 0x7da18ede7760>]
return[call[name[Command], parameter[constant[get], list[[<ast.Name object at 0x7da18ede5930>, <ast.Name object at 0x7da18ede6f80>]]]]] | keyword[def] identifier[get_moods] ( identifier[self] ):
literal[string]
identifier[mood_parent] = identifier[self] . identifier[_get_mood_parent] ()
keyword[def] identifier[process_result] ( identifier[result] ):
keyword[return] [ identifier[self] . identifier[get_mood] ( identifier[mood] , identifier[mood_parent] = identifier[mood_parent] ) keyword[for] identifier[mood] keyword[in]
identifier[result] ]
keyword[return] identifier[Command] ( literal[string] ,[ identifier[ROOT_MOODS] , identifier[mood_parent] ],
identifier[process_result] = identifier[process_result] ) | def get_moods(self):
"""
Return moods defined on the gateway.
Returns a Command.
"""
mood_parent = self._get_mood_parent()
def process_result(result):
return [self.get_mood(mood, mood_parent=mood_parent) for mood in result]
return Command('get', [ROOT_MOODS, mood_parent], process_result=process_result) |
def splitstring(string):
"""
>>> string = 'apple orange "banana tree" green'
>>> splitstring(string)
['apple', 'orange', 'green', '"banana tree"']
"""
patt = re.compile(r'"[\w ]+"')
if patt.search(string):
quoted_item = patt.search(string).group()
newstring = patt.sub('', string)
return newstring.split() + [quoted_item]
else:
return string.split() | def function[splitstring, parameter[string]]:
constant[
>>> string = 'apple orange "banana tree" green'
>>> splitstring(string)
['apple', 'orange', 'green', '"banana tree"']
]
variable[patt] assign[=] call[name[re].compile, parameter[constant["[\w ]+"]]]
if call[name[patt].search, parameter[name[string]]] begin[:]
variable[quoted_item] assign[=] call[call[name[patt].search, parameter[name[string]]].group, parameter[]]
variable[newstring] assign[=] call[name[patt].sub, parameter[constant[], name[string]]]
return[binary_operation[call[name[newstring].split, parameter[]] + list[[<ast.Name object at 0x7da1b033a950>]]]] | keyword[def] identifier[splitstring] ( identifier[string] ):
literal[string]
identifier[patt] = identifier[re] . identifier[compile] ( literal[string] )
keyword[if] identifier[patt] . identifier[search] ( identifier[string] ):
identifier[quoted_item] = identifier[patt] . identifier[search] ( identifier[string] ). identifier[group] ()
identifier[newstring] = identifier[patt] . identifier[sub] ( literal[string] , identifier[string] )
keyword[return] identifier[newstring] . identifier[split] ()+[ identifier[quoted_item] ]
keyword[else] :
keyword[return] identifier[string] . identifier[split] () | def splitstring(string):
"""
>>> string = 'apple orange "banana tree" green'
>>> splitstring(string)
['apple', 'orange', 'green', '"banana tree"']
"""
patt = re.compile('"[\\w ]+"')
if patt.search(string):
quoted_item = patt.search(string).group()
newstring = patt.sub('', string)
return newstring.split() + [quoted_item] # depends on [control=['if'], data=[]]
else:
return string.split() |
def close_all(self):
"""
Closes all editors
"""
if self._try_close_dirty_tabs():
while self.count():
widget = self.widget(0)
self.remove_tab(0)
self.tab_closed.emit(widget)
return True
return False | def function[close_all, parameter[self]]:
constant[
Closes all editors
]
if call[name[self]._try_close_dirty_tabs, parameter[]] begin[:]
while call[name[self].count, parameter[]] begin[:]
variable[widget] assign[=] call[name[self].widget, parameter[constant[0]]]
call[name[self].remove_tab, parameter[constant[0]]]
call[name[self].tab_closed.emit, parameter[name[widget]]]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[close_all] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_try_close_dirty_tabs] ():
keyword[while] identifier[self] . identifier[count] ():
identifier[widget] = identifier[self] . identifier[widget] ( literal[int] )
identifier[self] . identifier[remove_tab] ( literal[int] )
identifier[self] . identifier[tab_closed] . identifier[emit] ( identifier[widget] )
keyword[return] keyword[True]
keyword[return] keyword[False] | def close_all(self):
"""
Closes all editors
"""
if self._try_close_dirty_tabs():
while self.count():
widget = self.widget(0)
self.remove_tab(0)
self.tab_closed.emit(widget) # depends on [control=['while'], data=[]]
return True # depends on [control=['if'], data=[]]
return False |
def get_const(protocol_version):
"""Return the const module for the protocol_version."""
path = next((
CONST_VERSIONS[const_version]
for const_version in sorted(CONST_VERSIONS, reverse=True)
if parse_ver(protocol_version) >= parse_ver(const_version)
), 'mysensors.const_14')
if path in LOADED_CONST:
return LOADED_CONST[path]
const = import_module(path)
LOADED_CONST[path] = const # Cache the module
return const | def function[get_const, parameter[protocol_version]]:
constant[Return the const module for the protocol_version.]
variable[path] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da20e956e00>, constant[mysensors.const_14]]]
if compare[name[path] in name[LOADED_CONST]] begin[:]
return[call[name[LOADED_CONST]][name[path]]]
variable[const] assign[=] call[name[import_module], parameter[name[path]]]
call[name[LOADED_CONST]][name[path]] assign[=] name[const]
return[name[const]] | keyword[def] identifier[get_const] ( identifier[protocol_version] ):
literal[string]
identifier[path] = identifier[next] ((
identifier[CONST_VERSIONS] [ identifier[const_version] ]
keyword[for] identifier[const_version] keyword[in] identifier[sorted] ( identifier[CONST_VERSIONS] , identifier[reverse] = keyword[True] )
keyword[if] identifier[parse_ver] ( identifier[protocol_version] )>= identifier[parse_ver] ( identifier[const_version] )
), literal[string] )
keyword[if] identifier[path] keyword[in] identifier[LOADED_CONST] :
keyword[return] identifier[LOADED_CONST] [ identifier[path] ]
identifier[const] = identifier[import_module] ( identifier[path] )
identifier[LOADED_CONST] [ identifier[path] ]= identifier[const]
keyword[return] identifier[const] | def get_const(protocol_version):
"""Return the const module for the protocol_version."""
path = next((CONST_VERSIONS[const_version] for const_version in sorted(CONST_VERSIONS, reverse=True) if parse_ver(protocol_version) >= parse_ver(const_version)), 'mysensors.const_14')
if path in LOADED_CONST:
return LOADED_CONST[path] # depends on [control=['if'], data=['path', 'LOADED_CONST']]
const = import_module(path)
LOADED_CONST[path] = const # Cache the module
return const |
def _resize(self, ratio_x, ratio_y, resampling):
"""Return raster resized by ratio."""
new_width = int(np.ceil(self.width * ratio_x))
new_height = int(np.ceil(self.height * ratio_y))
dest_affine = self.affine * Affine.scale(1 / ratio_x, 1 / ratio_y)
if self.not_loaded():
window = rasterio.windows.Window(0, 0, self.width, self.height)
resized_raster = self.get_window(window, xsize=new_width, ysize=new_height, resampling=resampling)
else:
resized_raster = self._reproject(new_width, new_height, dest_affine, resampling=resampling)
return resized_raster | def function[_resize, parameter[self, ratio_x, ratio_y, resampling]]:
constant[Return raster resized by ratio.]
variable[new_width] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[name[self].width * name[ratio_x]]]]]]
variable[new_height] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[name[self].height * name[ratio_y]]]]]]
variable[dest_affine] assign[=] binary_operation[name[self].affine * call[name[Affine].scale, parameter[binary_operation[constant[1] / name[ratio_x]], binary_operation[constant[1] / name[ratio_y]]]]]
if call[name[self].not_loaded, parameter[]] begin[:]
variable[window] assign[=] call[name[rasterio].windows.Window, parameter[constant[0], constant[0], name[self].width, name[self].height]]
variable[resized_raster] assign[=] call[name[self].get_window, parameter[name[window]]]
return[name[resized_raster]] | keyword[def] identifier[_resize] ( identifier[self] , identifier[ratio_x] , identifier[ratio_y] , identifier[resampling] ):
literal[string]
identifier[new_width] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[self] . identifier[width] * identifier[ratio_x] ))
identifier[new_height] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[self] . identifier[height] * identifier[ratio_y] ))
identifier[dest_affine] = identifier[self] . identifier[affine] * identifier[Affine] . identifier[scale] ( literal[int] / identifier[ratio_x] , literal[int] / identifier[ratio_y] )
keyword[if] identifier[self] . identifier[not_loaded] ():
identifier[window] = identifier[rasterio] . identifier[windows] . identifier[Window] ( literal[int] , literal[int] , identifier[self] . identifier[width] , identifier[self] . identifier[height] )
identifier[resized_raster] = identifier[self] . identifier[get_window] ( identifier[window] , identifier[xsize] = identifier[new_width] , identifier[ysize] = identifier[new_height] , identifier[resampling] = identifier[resampling] )
keyword[else] :
identifier[resized_raster] = identifier[self] . identifier[_reproject] ( identifier[new_width] , identifier[new_height] , identifier[dest_affine] , identifier[resampling] = identifier[resampling] )
keyword[return] identifier[resized_raster] | def _resize(self, ratio_x, ratio_y, resampling):
"""Return raster resized by ratio."""
new_width = int(np.ceil(self.width * ratio_x))
new_height = int(np.ceil(self.height * ratio_y))
dest_affine = self.affine * Affine.scale(1 / ratio_x, 1 / ratio_y)
if self.not_loaded():
window = rasterio.windows.Window(0, 0, self.width, self.height)
resized_raster = self.get_window(window, xsize=new_width, ysize=new_height, resampling=resampling) # depends on [control=['if'], data=[]]
else:
resized_raster = self._reproject(new_width, new_height, dest_affine, resampling=resampling)
return resized_raster |
def mediate_transfer(
state: MediatorTransferState,
possible_routes: List['RouteState'],
payer_channel: NettingChannelState,
channelidentifiers_to_channels: ChannelMap,
nodeaddresses_to_networkstates: NodeNetworkStateMap,
pseudo_random_generator: random.Random,
payer_transfer: LockedTransferSignedState,
block_number: BlockNumber,
) -> TransitionResult[MediatorTransferState]:
""" Try a new route or fail back to a refund.
The mediator can safely try a new route knowing that the tokens from
payer_transfer will cover the expenses of the mediation. If there is no
route available that may be used at the moment of the call the mediator may
send a refund back to the payer, allowing the payer to try a different
route.
"""
reachable_routes = filter_reachable_routes(
possible_routes,
nodeaddresses_to_networkstates,
)
available_routes = filter_used_routes(
state.transfers_pair,
reachable_routes,
)
assert payer_channel.partner_state.address == payer_transfer.balance_proof.sender
transfer_pair, mediated_events = forward_transfer_pair(
payer_transfer,
available_routes,
channelidentifiers_to_channels,
pseudo_random_generator,
block_number,
)
if transfer_pair is None:
assert not mediated_events
if state.transfers_pair:
original_pair = state.transfers_pair[0]
original_channel = get_payer_channel(
channelidentifiers_to_channels,
original_pair,
)
else:
original_channel = payer_channel
if original_channel:
transfer_pair, mediated_events = backward_transfer_pair(
original_channel,
payer_transfer,
pseudo_random_generator,
block_number,
)
else:
transfer_pair = None
mediated_events = list()
if transfer_pair is None:
assert not mediated_events
mediated_events = list()
state.waiting_transfer = WaitingTransferState(payer_transfer)
else:
# the list must be ordered from high to low expiration, expiration
# handling depends on it
state.transfers_pair.append(transfer_pair)
return TransitionResult(state, mediated_events) | def function[mediate_transfer, parameter[state, possible_routes, payer_channel, channelidentifiers_to_channels, nodeaddresses_to_networkstates, pseudo_random_generator, payer_transfer, block_number]]:
constant[ Try a new route or fail back to a refund.
The mediator can safely try a new route knowing that the tokens from
payer_transfer will cover the expenses of the mediation. If there is no
route available that may be used at the moment of the call the mediator may
send a refund back to the payer, allowing the payer to try a different
route.
]
variable[reachable_routes] assign[=] call[name[filter_reachable_routes], parameter[name[possible_routes], name[nodeaddresses_to_networkstates]]]
variable[available_routes] assign[=] call[name[filter_used_routes], parameter[name[state].transfers_pair, name[reachable_routes]]]
assert[compare[name[payer_channel].partner_state.address equal[==] name[payer_transfer].balance_proof.sender]]
<ast.Tuple object at 0x7da1b1712050> assign[=] call[name[forward_transfer_pair], parameter[name[payer_transfer], name[available_routes], name[channelidentifiers_to_channels], name[pseudo_random_generator], name[block_number]]]
if compare[name[transfer_pair] is constant[None]] begin[:]
assert[<ast.UnaryOp object at 0x7da1b17136a0>]
if name[state].transfers_pair begin[:]
variable[original_pair] assign[=] call[name[state].transfers_pair][constant[0]]
variable[original_channel] assign[=] call[name[get_payer_channel], parameter[name[channelidentifiers_to_channels], name[original_pair]]]
if name[original_channel] begin[:]
<ast.Tuple object at 0x7da1b1712e90> assign[=] call[name[backward_transfer_pair], parameter[name[original_channel], name[payer_transfer], name[pseudo_random_generator], name[block_number]]]
if compare[name[transfer_pair] is constant[None]] begin[:]
assert[<ast.UnaryOp object at 0x7da1b17135e0>]
variable[mediated_events] assign[=] call[name[list], parameter[]]
name[state].waiting_transfer assign[=] call[name[WaitingTransferState], parameter[name[payer_transfer]]]
return[call[name[TransitionResult], parameter[name[state], name[mediated_events]]]] | keyword[def] identifier[mediate_transfer] (
identifier[state] : identifier[MediatorTransferState] ,
identifier[possible_routes] : identifier[List] [ literal[string] ],
identifier[payer_channel] : identifier[NettingChannelState] ,
identifier[channelidentifiers_to_channels] : identifier[ChannelMap] ,
identifier[nodeaddresses_to_networkstates] : identifier[NodeNetworkStateMap] ,
identifier[pseudo_random_generator] : identifier[random] . identifier[Random] ,
identifier[payer_transfer] : identifier[LockedTransferSignedState] ,
identifier[block_number] : identifier[BlockNumber] ,
)-> identifier[TransitionResult] [ identifier[MediatorTransferState] ]:
literal[string]
identifier[reachable_routes] = identifier[filter_reachable_routes] (
identifier[possible_routes] ,
identifier[nodeaddresses_to_networkstates] ,
)
identifier[available_routes] = identifier[filter_used_routes] (
identifier[state] . identifier[transfers_pair] ,
identifier[reachable_routes] ,
)
keyword[assert] identifier[payer_channel] . identifier[partner_state] . identifier[address] == identifier[payer_transfer] . identifier[balance_proof] . identifier[sender]
identifier[transfer_pair] , identifier[mediated_events] = identifier[forward_transfer_pair] (
identifier[payer_transfer] ,
identifier[available_routes] ,
identifier[channelidentifiers_to_channels] ,
identifier[pseudo_random_generator] ,
identifier[block_number] ,
)
keyword[if] identifier[transfer_pair] keyword[is] keyword[None] :
keyword[assert] keyword[not] identifier[mediated_events]
keyword[if] identifier[state] . identifier[transfers_pair] :
identifier[original_pair] = identifier[state] . identifier[transfers_pair] [ literal[int] ]
identifier[original_channel] = identifier[get_payer_channel] (
identifier[channelidentifiers_to_channels] ,
identifier[original_pair] ,
)
keyword[else] :
identifier[original_channel] = identifier[payer_channel]
keyword[if] identifier[original_channel] :
identifier[transfer_pair] , identifier[mediated_events] = identifier[backward_transfer_pair] (
identifier[original_channel] ,
identifier[payer_transfer] ,
identifier[pseudo_random_generator] ,
identifier[block_number] ,
)
keyword[else] :
identifier[transfer_pair] = keyword[None]
identifier[mediated_events] = identifier[list] ()
keyword[if] identifier[transfer_pair] keyword[is] keyword[None] :
keyword[assert] keyword[not] identifier[mediated_events]
identifier[mediated_events] = identifier[list] ()
identifier[state] . identifier[waiting_transfer] = identifier[WaitingTransferState] ( identifier[payer_transfer] )
keyword[else] :
identifier[state] . identifier[transfers_pair] . identifier[append] ( identifier[transfer_pair] )
keyword[return] identifier[TransitionResult] ( identifier[state] , identifier[mediated_events] ) | def mediate_transfer(state: MediatorTransferState, possible_routes: List['RouteState'], payer_channel: NettingChannelState, channelidentifiers_to_channels: ChannelMap, nodeaddresses_to_networkstates: NodeNetworkStateMap, pseudo_random_generator: random.Random, payer_transfer: LockedTransferSignedState, block_number: BlockNumber) -> TransitionResult[MediatorTransferState]:
""" Try a new route or fail back to a refund.
The mediator can safely try a new route knowing that the tokens from
payer_transfer will cover the expenses of the mediation. If there is no
route available that may be used at the moment of the call the mediator may
send a refund back to the payer, allowing the payer to try a different
route.
"""
reachable_routes = filter_reachable_routes(possible_routes, nodeaddresses_to_networkstates)
available_routes = filter_used_routes(state.transfers_pair, reachable_routes)
assert payer_channel.partner_state.address == payer_transfer.balance_proof.sender
(transfer_pair, mediated_events) = forward_transfer_pair(payer_transfer, available_routes, channelidentifiers_to_channels, pseudo_random_generator, block_number)
if transfer_pair is None:
assert not mediated_events
if state.transfers_pair:
original_pair = state.transfers_pair[0]
original_channel = get_payer_channel(channelidentifiers_to_channels, original_pair) # depends on [control=['if'], data=[]]
else:
original_channel = payer_channel
if original_channel:
(transfer_pair, mediated_events) = backward_transfer_pair(original_channel, payer_transfer, pseudo_random_generator, block_number) # depends on [control=['if'], data=[]]
else:
transfer_pair = None
mediated_events = list() # depends on [control=['if'], data=['transfer_pair']]
if transfer_pair is None:
assert not mediated_events
mediated_events = list()
state.waiting_transfer = WaitingTransferState(payer_transfer) # depends on [control=['if'], data=[]]
else:
# the list must be ordered from high to low expiration, expiration
# handling depends on it
state.transfers_pair.append(transfer_pair)
return TransitionResult(state, mediated_events) |
def _get_network(self, network_info):
"""Send network get request to DCNM.
:param network_info: contains network info to query.
"""
org_name = network_info.get('organizationName', '')
part_name = network_info.get('partitionName', '')
segment_id = network_info['segmentId']
url = self._network_url % (org_name, part_name, segment_id)
return self._send_request('GET', url, '', 'network') | def function[_get_network, parameter[self, network_info]]:
constant[Send network get request to DCNM.
:param network_info: contains network info to query.
]
variable[org_name] assign[=] call[name[network_info].get, parameter[constant[organizationName], constant[]]]
variable[part_name] assign[=] call[name[network_info].get, parameter[constant[partitionName], constant[]]]
variable[segment_id] assign[=] call[name[network_info]][constant[segmentId]]
variable[url] assign[=] binary_operation[name[self]._network_url <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc9b010>, <ast.Name object at 0x7da18dc98280>, <ast.Name object at 0x7da18dc9ab30>]]]
return[call[name[self]._send_request, parameter[constant[GET], name[url], constant[], constant[network]]]] | keyword[def] identifier[_get_network] ( identifier[self] , identifier[network_info] ):
literal[string]
identifier[org_name] = identifier[network_info] . identifier[get] ( literal[string] , literal[string] )
identifier[part_name] = identifier[network_info] . identifier[get] ( literal[string] , literal[string] )
identifier[segment_id] = identifier[network_info] [ literal[string] ]
identifier[url] = identifier[self] . identifier[_network_url] %( identifier[org_name] , identifier[part_name] , identifier[segment_id] )
keyword[return] identifier[self] . identifier[_send_request] ( literal[string] , identifier[url] , literal[string] , literal[string] ) | def _get_network(self, network_info):
"""Send network get request to DCNM.
:param network_info: contains network info to query.
"""
org_name = network_info.get('organizationName', '')
part_name = network_info.get('partitionName', '')
segment_id = network_info['segmentId']
url = self._network_url % (org_name, part_name, segment_id)
return self._send_request('GET', url, '', 'network') |
def _split_area(self, xs, lower, upper):
"""
Splits area plots at nans and returns x- and y-coordinates for
each area separated by nans.
"""
xnan = np.array([np.datetime64('nat') if xs.dtype.kind == 'M' else np.nan])
ynan = np.array([np.datetime64('nat') if lower.dtype.kind == 'M' else np.nan])
split = np.where(~isfinite(xs) | ~isfinite(lower) | ~isfinite(upper))[0]
xvals = np.split(xs, split)
lower = np.split(lower, split)
upper = np.split(upper, split)
band_x, band_y = [], []
for i, (x, l, u) in enumerate(zip(xvals, lower, upper)):
if i:
x, l, u = x[1:], l[1:], u[1:]
if not len(x):
continue
band_x += [np.append(x, x[::-1]), xnan]
band_y += [np.append(l, u[::-1]), ynan]
if len(band_x):
xs = np.concatenate(band_x[:-1])
ys = np.concatenate(band_y[:-1])
return xs, ys
return [], [] | def function[_split_area, parameter[self, xs, lower, upper]]:
constant[
Splits area plots at nans and returns x- and y-coordinates for
each area separated by nans.
]
variable[xnan] assign[=] call[name[np].array, parameter[list[[<ast.IfExp object at 0x7da2046211e0>]]]]
variable[ynan] assign[=] call[name[np].array, parameter[list[[<ast.IfExp object at 0x7da2046219f0>]]]]
variable[split] assign[=] call[call[name[np].where, parameter[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da2044c35e0> <ast.BitOr object at 0x7da2590d6aa0> <ast.UnaryOp object at 0x7da2044c0220>] <ast.BitOr object at 0x7da2590d6aa0> <ast.UnaryOp object at 0x7da2044c11b0>]]]][constant[0]]
variable[xvals] assign[=] call[name[np].split, parameter[name[xs], name[split]]]
variable[lower] assign[=] call[name[np].split, parameter[name[lower], name[split]]]
variable[upper] assign[=] call[name[np].split, parameter[name[upper], name[split]]]
<ast.Tuple object at 0x7da20c991f30> assign[=] tuple[[<ast.List object at 0x7da20c991030>, <ast.List object at 0x7da20c9900a0>]]
for taget[tuple[[<ast.Name object at 0x7da20c993850>, <ast.Tuple object at 0x7da20c9933d0>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[xvals], name[lower], name[upper]]]]]] begin[:]
if name[i] begin[:]
<ast.Tuple object at 0x7da20c991390> assign[=] tuple[[<ast.Subscript object at 0x7da20c993ee0>, <ast.Subscript object at 0x7da20c991840>, <ast.Subscript object at 0x7da20c9915d0>]]
if <ast.UnaryOp object at 0x7da1b1b0ff70> begin[:]
continue
<ast.AugAssign object at 0x7da1b1b0c760>
<ast.AugAssign object at 0x7da204622050>
if call[name[len], parameter[name[band_x]]] begin[:]
variable[xs] assign[=] call[name[np].concatenate, parameter[call[name[band_x]][<ast.Slice object at 0x7da204622c80>]]]
variable[ys] assign[=] call[name[np].concatenate, parameter[call[name[band_y]][<ast.Slice object at 0x7da204620ca0>]]]
return[tuple[[<ast.Name object at 0x7da204622470>, <ast.Name object at 0x7da204623b50>]]]
return[tuple[[<ast.List object at 0x7da204620c70>, <ast.List object at 0x7da204621840>]]] | keyword[def] identifier[_split_area] ( identifier[self] , identifier[xs] , identifier[lower] , identifier[upper] ):
literal[string]
identifier[xnan] = identifier[np] . identifier[array] ([ identifier[np] . identifier[datetime64] ( literal[string] ) keyword[if] identifier[xs] . identifier[dtype] . identifier[kind] == literal[string] keyword[else] identifier[np] . identifier[nan] ])
identifier[ynan] = identifier[np] . identifier[array] ([ identifier[np] . identifier[datetime64] ( literal[string] ) keyword[if] identifier[lower] . identifier[dtype] . identifier[kind] == literal[string] keyword[else] identifier[np] . identifier[nan] ])
identifier[split] = identifier[np] . identifier[where] (~ identifier[isfinite] ( identifier[xs] )|~ identifier[isfinite] ( identifier[lower] )|~ identifier[isfinite] ( identifier[upper] ))[ literal[int] ]
identifier[xvals] = identifier[np] . identifier[split] ( identifier[xs] , identifier[split] )
identifier[lower] = identifier[np] . identifier[split] ( identifier[lower] , identifier[split] )
identifier[upper] = identifier[np] . identifier[split] ( identifier[upper] , identifier[split] )
identifier[band_x] , identifier[band_y] =[],[]
keyword[for] identifier[i] ,( identifier[x] , identifier[l] , identifier[u] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[xvals] , identifier[lower] , identifier[upper] )):
keyword[if] identifier[i] :
identifier[x] , identifier[l] , identifier[u] = identifier[x] [ literal[int] :], identifier[l] [ literal[int] :], identifier[u] [ literal[int] :]
keyword[if] keyword[not] identifier[len] ( identifier[x] ):
keyword[continue]
identifier[band_x] +=[ identifier[np] . identifier[append] ( identifier[x] , identifier[x] [::- literal[int] ]), identifier[xnan] ]
identifier[band_y] +=[ identifier[np] . identifier[append] ( identifier[l] , identifier[u] [::- literal[int] ]), identifier[ynan] ]
keyword[if] identifier[len] ( identifier[band_x] ):
identifier[xs] = identifier[np] . identifier[concatenate] ( identifier[band_x] [:- literal[int] ])
identifier[ys] = identifier[np] . identifier[concatenate] ( identifier[band_y] [:- literal[int] ])
keyword[return] identifier[xs] , identifier[ys]
keyword[return] [],[] | def _split_area(self, xs, lower, upper):
"""
Splits area plots at nans and returns x- and y-coordinates for
each area separated by nans.
"""
xnan = np.array([np.datetime64('nat') if xs.dtype.kind == 'M' else np.nan])
ynan = np.array([np.datetime64('nat') if lower.dtype.kind == 'M' else np.nan])
split = np.where(~isfinite(xs) | ~isfinite(lower) | ~isfinite(upper))[0]
xvals = np.split(xs, split)
lower = np.split(lower, split)
upper = np.split(upper, split)
(band_x, band_y) = ([], [])
for (i, (x, l, u)) in enumerate(zip(xvals, lower, upper)):
if i:
(x, l, u) = (x[1:], l[1:], u[1:]) # depends on [control=['if'], data=[]]
if not len(x):
continue # depends on [control=['if'], data=[]]
band_x += [np.append(x, x[::-1]), xnan]
band_y += [np.append(l, u[::-1]), ynan] # depends on [control=['for'], data=[]]
if len(band_x):
xs = np.concatenate(band_x[:-1])
ys = np.concatenate(band_y[:-1])
return (xs, ys) # depends on [control=['if'], data=[]]
return ([], []) |
def update_alert(self, alert):
'''**Description**
Update a modified threshold-based alert.
**Arguments**
- **alert**: one modified alert object of the same format as those in the list returned by :func:`~SdcClient.get_alerts`.
**Success Return Value**
The updated alert.
**Example**
`examples/update_alert.py <https://github.com/draios/python-sdc-client/blob/master/examples/update_alert.py>`_
'''
if 'id' not in alert:
return [False, "Invalid alert format"]
res = requests.put(self.url + '/api/alerts/' + str(alert['id']), headers=self.hdrs, data=json.dumps({"alert": alert}), verify=self.ssl_verify)
return self._request_result(res) | def function[update_alert, parameter[self, alert]]:
constant[**Description**
Update a modified threshold-based alert.
**Arguments**
- **alert**: one modified alert object of the same format as those in the list returned by :func:`~SdcClient.get_alerts`.
**Success Return Value**
The updated alert.
**Example**
`examples/update_alert.py <https://github.com/draios/python-sdc-client/blob/master/examples/update_alert.py>`_
]
if compare[constant[id] <ast.NotIn object at 0x7da2590d7190> name[alert]] begin[:]
return[list[[<ast.Constant object at 0x7da18f722dd0>, <ast.Constant object at 0x7da18f722c20>]]]
variable[res] assign[=] call[name[requests].put, parameter[binary_operation[binary_operation[name[self].url + constant[/api/alerts/]] + call[name[str], parameter[call[name[alert]][constant[id]]]]]]]
return[call[name[self]._request_result, parameter[name[res]]]] | keyword[def] identifier[update_alert] ( identifier[self] , identifier[alert] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[alert] :
keyword[return] [ keyword[False] , literal[string] ]
identifier[res] = identifier[requests] . identifier[put] ( identifier[self] . identifier[url] + literal[string] + identifier[str] ( identifier[alert] [ literal[string] ]), identifier[headers] = identifier[self] . identifier[hdrs] , identifier[data] = identifier[json] . identifier[dumps] ({ literal[string] : identifier[alert] }), identifier[verify] = identifier[self] . identifier[ssl_verify] )
keyword[return] identifier[self] . identifier[_request_result] ( identifier[res] ) | def update_alert(self, alert):
"""**Description**
Update a modified threshold-based alert.
**Arguments**
- **alert**: one modified alert object of the same format as those in the list returned by :func:`~SdcClient.get_alerts`.
**Success Return Value**
The updated alert.
**Example**
`examples/update_alert.py <https://github.com/draios/python-sdc-client/blob/master/examples/update_alert.py>`_
"""
if 'id' not in alert:
return [False, 'Invalid alert format'] # depends on [control=['if'], data=[]]
res = requests.put(self.url + '/api/alerts/' + str(alert['id']), headers=self.hdrs, data=json.dumps({'alert': alert}), verify=self.ssl_verify)
return self._request_result(res) |
def control_group(self, control_group_id, ctrl, shift, alt):
"""Act on a control group, selecting, setting, etc."""
action = sc_pb.Action()
select = action.action_ui.control_group
mod = sc_ui.ActionControlGroup
if not ctrl and not shift and not alt:
select.action = mod.Recall
elif ctrl and not shift and not alt:
select.action = mod.Set
elif not ctrl and shift and not alt:
select.action = mod.Append
elif not ctrl and not shift and alt:
select.action = mod.SetAndSteal
elif not ctrl and shift and alt:
select.action = mod.AppendAndSteal
else:
return # unknown
select.control_group_index = control_group_id
return action | def function[control_group, parameter[self, control_group_id, ctrl, shift, alt]]:
constant[Act on a control group, selecting, setting, etc.]
variable[action] assign[=] call[name[sc_pb].Action, parameter[]]
variable[select] assign[=] name[action].action_ui.control_group
variable[mod] assign[=] name[sc_ui].ActionControlGroup
if <ast.BoolOp object at 0x7da2047eb400> begin[:]
name[select].action assign[=] name[mod].Recall
name[select].control_group_index assign[=] name[control_group_id]
return[name[action]] | keyword[def] identifier[control_group] ( identifier[self] , identifier[control_group_id] , identifier[ctrl] , identifier[shift] , identifier[alt] ):
literal[string]
identifier[action] = identifier[sc_pb] . identifier[Action] ()
identifier[select] = identifier[action] . identifier[action_ui] . identifier[control_group]
identifier[mod] = identifier[sc_ui] . identifier[ActionControlGroup]
keyword[if] keyword[not] identifier[ctrl] keyword[and] keyword[not] identifier[shift] keyword[and] keyword[not] identifier[alt] :
identifier[select] . identifier[action] = identifier[mod] . identifier[Recall]
keyword[elif] identifier[ctrl] keyword[and] keyword[not] identifier[shift] keyword[and] keyword[not] identifier[alt] :
identifier[select] . identifier[action] = identifier[mod] . identifier[Set]
keyword[elif] keyword[not] identifier[ctrl] keyword[and] identifier[shift] keyword[and] keyword[not] identifier[alt] :
identifier[select] . identifier[action] = identifier[mod] . identifier[Append]
keyword[elif] keyword[not] identifier[ctrl] keyword[and] keyword[not] identifier[shift] keyword[and] identifier[alt] :
identifier[select] . identifier[action] = identifier[mod] . identifier[SetAndSteal]
keyword[elif] keyword[not] identifier[ctrl] keyword[and] identifier[shift] keyword[and] identifier[alt] :
identifier[select] . identifier[action] = identifier[mod] . identifier[AppendAndSteal]
keyword[else] :
keyword[return]
identifier[select] . identifier[control_group_index] = identifier[control_group_id]
keyword[return] identifier[action] | def control_group(self, control_group_id, ctrl, shift, alt):
"""Act on a control group, selecting, setting, etc."""
action = sc_pb.Action()
select = action.action_ui.control_group
mod = sc_ui.ActionControlGroup
if not ctrl and (not shift) and (not alt):
select.action = mod.Recall # depends on [control=['if'], data=[]]
elif ctrl and (not shift) and (not alt):
select.action = mod.Set # depends on [control=['if'], data=[]]
elif not ctrl and shift and (not alt):
select.action = mod.Append # depends on [control=['if'], data=[]]
elif not ctrl and (not shift) and alt:
select.action = mod.SetAndSteal # depends on [control=['if'], data=[]]
elif not ctrl and shift and alt:
select.action = mod.AppendAndSteal # depends on [control=['if'], data=[]]
else:
return # unknown
select.control_group_index = control_group_id
return action |
def authorized(remote_app=None):
"""Authorized handler callback."""
if remote_app not in current_oauthclient.handlers:
return abort(404)
state_token = request.args.get('state')
# Verify state parameter
try:
assert state_token
# Checks authenticity and integrity of state and decodes the value.
state = serializer.loads(state_token)
# Verify that state is for this session, app and that next parameter
# have not been modified.
assert state['sid'] == _create_identifier()
assert state['app'] == remote_app
# Store next URL
set_session_next_url(remote_app, state['next'])
except (AssertionError, BadData):
if current_app.config.get('OAUTHCLIENT_STATE_ENABLED', True) or (
not(current_app.debug or current_app.testing)):
abort(403)
try:
handler = current_oauthclient.handlers[remote_app]()
except OAuthException as e:
if e.type == 'invalid_response':
abort(500)
else:
raise
return handler | def function[authorized, parameter[remote_app]]:
constant[Authorized handler callback.]
if compare[name[remote_app] <ast.NotIn object at 0x7da2590d7190> name[current_oauthclient].handlers] begin[:]
return[call[name[abort], parameter[constant[404]]]]
variable[state_token] assign[=] call[name[request].args.get, parameter[constant[state]]]
<ast.Try object at 0x7da1b255ec80>
<ast.Try object at 0x7da1b255e3e0>
return[name[handler]] | keyword[def] identifier[authorized] ( identifier[remote_app] = keyword[None] ):
literal[string]
keyword[if] identifier[remote_app] keyword[not] keyword[in] identifier[current_oauthclient] . identifier[handlers] :
keyword[return] identifier[abort] ( literal[int] )
identifier[state_token] = identifier[request] . identifier[args] . identifier[get] ( literal[string] )
keyword[try] :
keyword[assert] identifier[state_token]
identifier[state] = identifier[serializer] . identifier[loads] ( identifier[state_token] )
keyword[assert] identifier[state] [ literal[string] ]== identifier[_create_identifier] ()
keyword[assert] identifier[state] [ literal[string] ]== identifier[remote_app]
identifier[set_session_next_url] ( identifier[remote_app] , identifier[state] [ literal[string] ])
keyword[except] ( identifier[AssertionError] , identifier[BadData] ):
keyword[if] identifier[current_app] . identifier[config] . identifier[get] ( literal[string] , keyword[True] ) keyword[or] (
keyword[not] ( identifier[current_app] . identifier[debug] keyword[or] identifier[current_app] . identifier[testing] )):
identifier[abort] ( literal[int] )
keyword[try] :
identifier[handler] = identifier[current_oauthclient] . identifier[handlers] [ identifier[remote_app] ]()
keyword[except] identifier[OAuthException] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[type] == literal[string] :
identifier[abort] ( literal[int] )
keyword[else] :
keyword[raise]
keyword[return] identifier[handler] | def authorized(remote_app=None):
"""Authorized handler callback."""
if remote_app not in current_oauthclient.handlers:
return abort(404) # depends on [control=['if'], data=[]]
state_token = request.args.get('state')
# Verify state parameter
try:
assert state_token
# Checks authenticity and integrity of state and decodes the value.
state = serializer.loads(state_token)
# Verify that state is for this session, app and that next parameter
# have not been modified.
assert state['sid'] == _create_identifier()
assert state['app'] == remote_app
# Store next URL
set_session_next_url(remote_app, state['next']) # depends on [control=['try'], data=[]]
except (AssertionError, BadData):
if current_app.config.get('OAUTHCLIENT_STATE_ENABLED', True) or not (current_app.debug or current_app.testing):
abort(403) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
handler = current_oauthclient.handlers[remote_app]() # depends on [control=['try'], data=[]]
except OAuthException as e:
if e.type == 'invalid_response':
abort(500) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']]
return handler |
def avail_images(conn=None, call=None):
'''
Return a list of the server appliances that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not conn:
conn = get_conn()
ret = {}
for appliance in conn.list_appliances():
ret[appliance['name']] = appliance
return ret | def function[avail_images, parameter[conn, call]]:
constant[
Return a list of the server appliances that are on the provider
]
if compare[name[call] equal[==] constant[action]] begin[:]
<ast.Raise object at 0x7da20c7c9c60>
if <ast.UnaryOp object at 0x7da18fe91720> begin[:]
variable[conn] assign[=] call[name[get_conn], parameter[]]
variable[ret] assign[=] dictionary[[], []]
for taget[name[appliance]] in starred[call[name[conn].list_appliances, parameter[]]] begin[:]
call[name[ret]][call[name[appliance]][constant[name]]] assign[=] name[appliance]
return[name[ret]] | keyword[def] identifier[avail_images] ( identifier[conn] = keyword[None] , identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] == literal[string] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
literal[string]
)
keyword[if] keyword[not] identifier[conn] :
identifier[conn] = identifier[get_conn] ()
identifier[ret] ={}
keyword[for] identifier[appliance] keyword[in] identifier[conn] . identifier[list_appliances] ():
identifier[ret] [ identifier[appliance] [ literal[string] ]]= identifier[appliance]
keyword[return] identifier[ret] | def avail_images(conn=None, call=None):
"""
Return a list of the server appliances that are on the provider
"""
if call == 'action':
raise SaltCloudSystemExit('The avail_images function must be called with -f or --function, or with the --list-images option') # depends on [control=['if'], data=[]]
if not conn:
conn = get_conn() # depends on [control=['if'], data=[]]
ret = {}
for appliance in conn.list_appliances():
ret[appliance['name']] = appliance # depends on [control=['for'], data=['appliance']]
return ret |
def delta(d1, d2, opt='d'):
"""Compute difference between given 2 dates in month/day.
"""
delta = 0
if opt == 'm':
while True:
mdays = monthrange(d1.year, d1.month)[1]
d1 += timedelta(days=mdays)
if d1 <= d2:
delta += 1
else:
break
else:
delta = (d2 - d1).days
return delta | def function[delta, parameter[d1, d2, opt]]:
constant[Compute difference between given 2 dates in month/day.
]
variable[delta] assign[=] constant[0]
if compare[name[opt] equal[==] constant[m]] begin[:]
while constant[True] begin[:]
variable[mdays] assign[=] call[call[name[monthrange], parameter[name[d1].year, name[d1].month]]][constant[1]]
<ast.AugAssign object at 0x7da1b06276a0>
if compare[name[d1] less_or_equal[<=] name[d2]] begin[:]
<ast.AugAssign object at 0x7da207f02770>
return[name[delta]] | keyword[def] identifier[delta] ( identifier[d1] , identifier[d2] , identifier[opt] = literal[string] ):
literal[string]
identifier[delta] = literal[int]
keyword[if] identifier[opt] == literal[string] :
keyword[while] keyword[True] :
identifier[mdays] = identifier[monthrange] ( identifier[d1] . identifier[year] , identifier[d1] . identifier[month] )[ literal[int] ]
identifier[d1] += identifier[timedelta] ( identifier[days] = identifier[mdays] )
keyword[if] identifier[d1] <= identifier[d2] :
identifier[delta] += literal[int]
keyword[else] :
keyword[break]
keyword[else] :
identifier[delta] =( identifier[d2] - identifier[d1] ). identifier[days]
keyword[return] identifier[delta] | def delta(d1, d2, opt='d'):
"""Compute difference between given 2 dates in month/day.
"""
delta = 0
if opt == 'm':
while True:
mdays = monthrange(d1.year, d1.month)[1]
d1 += timedelta(days=mdays)
if d1 <= d2:
delta += 1 # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
delta = (d2 - d1).days
return delta |
def _parse_textgroup_wrapper(self, cts_file):
""" Wraps with a Try/Except the textgroup parsing from a cts file
:param cts_file: Path to the CTS File
:type cts_file: str
:return: CtsTextgroupMetadata
"""
try:
return self._parse_textgroup(cts_file)
except Exception as E:
self.logger.error("Error parsing %s ", cts_file)
if self.RAISE_ON_GENERIC_PARSING_ERROR:
raise E | def function[_parse_textgroup_wrapper, parameter[self, cts_file]]:
constant[ Wraps with a Try/Except the textgroup parsing from a cts file
:param cts_file: Path to the CTS File
:type cts_file: str
:return: CtsTextgroupMetadata
]
<ast.Try object at 0x7da20c6aaf20> | keyword[def] identifier[_parse_textgroup_wrapper] ( identifier[self] , identifier[cts_file] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_parse_textgroup] ( identifier[cts_file] )
keyword[except] identifier[Exception] keyword[as] identifier[E] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] , identifier[cts_file] )
keyword[if] identifier[self] . identifier[RAISE_ON_GENERIC_PARSING_ERROR] :
keyword[raise] identifier[E] | def _parse_textgroup_wrapper(self, cts_file):
""" Wraps with a Try/Except the textgroup parsing from a cts file
:param cts_file: Path to the CTS File
:type cts_file: str
:return: CtsTextgroupMetadata
"""
try:
return self._parse_textgroup(cts_file) # depends on [control=['try'], data=[]]
except Exception as E:
self.logger.error('Error parsing %s ', cts_file)
if self.RAISE_ON_GENERIC_PARSING_ERROR:
raise E # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['E']] |
def _insertGlyph(self, glyph, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseLayer.__setitem__` and :meth:`BaseFont.__setitem__`.
This must return an instance of a :class:`BaseGlyph` subclass.
**glyph** will be a glyph object with the attributes necessary
for copying as defined in :meth:`BaseGlyph.copy` An environment
must not insert **glyph** directly. Instead the data from
**glyph** should be copied to a new glyph instead. **name**
will be a :ref:`type-string` representing a glyph name. It
will have been normalized with :func:`normalizers.normalizeGlyphName`.
**name** will have been tested to make sure that no glyph with
the same name exists in the layer.
Subclasses may override this method.
"""
if glyph.name is None or (name != glyph.name and glyph.name in self):
glyph = glyph.copy()
glyph.name = name
dest = self.newGlyph(name, clear=kwargs.get("clear", True))
dest.copyData(glyph)
return dest | def function[_insertGlyph, parameter[self, glyph, name]]:
constant[
This is the environment implementation of
:meth:`BaseLayer.__setitem__` and :meth:`BaseFont.__setitem__`.
This must return an instance of a :class:`BaseGlyph` subclass.
**glyph** will be a glyph object with the attributes necessary
for copying as defined in :meth:`BaseGlyph.copy` An environment
must not insert **glyph** directly. Instead the data from
**glyph** should be copied to a new glyph instead. **name**
will be a :ref:`type-string` representing a glyph name. It
will have been normalized with :func:`normalizers.normalizeGlyphName`.
**name** will have been tested to make sure that no glyph with
the same name exists in the layer.
Subclasses may override this method.
]
if <ast.BoolOp object at 0x7da20c76fe20> begin[:]
variable[glyph] assign[=] call[name[glyph].copy, parameter[]]
name[glyph].name assign[=] name[name]
variable[dest] assign[=] call[name[self].newGlyph, parameter[name[name]]]
call[name[dest].copyData, parameter[name[glyph]]]
return[name[dest]] | keyword[def] identifier[_insertGlyph] ( identifier[self] , identifier[glyph] , identifier[name] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[glyph] . identifier[name] keyword[is] keyword[None] keyword[or] ( identifier[name] != identifier[glyph] . identifier[name] keyword[and] identifier[glyph] . identifier[name] keyword[in] identifier[self] ):
identifier[glyph] = identifier[glyph] . identifier[copy] ()
identifier[glyph] . identifier[name] = identifier[name]
identifier[dest] = identifier[self] . identifier[newGlyph] ( identifier[name] , identifier[clear] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[True] ))
identifier[dest] . identifier[copyData] ( identifier[glyph] )
keyword[return] identifier[dest] | def _insertGlyph(self, glyph, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseLayer.__setitem__` and :meth:`BaseFont.__setitem__`.
This must return an instance of a :class:`BaseGlyph` subclass.
**glyph** will be a glyph object with the attributes necessary
for copying as defined in :meth:`BaseGlyph.copy` An environment
must not insert **glyph** directly. Instead the data from
**glyph** should be copied to a new glyph instead. **name**
will be a :ref:`type-string` representing a glyph name. It
will have been normalized with :func:`normalizers.normalizeGlyphName`.
**name** will have been tested to make sure that no glyph with
the same name exists in the layer.
Subclasses may override this method.
"""
if glyph.name is None or (name != glyph.name and glyph.name in self):
glyph = glyph.copy()
glyph.name = name # depends on [control=['if'], data=[]]
dest = self.newGlyph(name, clear=kwargs.get('clear', True))
dest.copyData(glyph)
return dest |
def _prepare_env(self, kwargs):
"""Returns a modifed copy of kwargs['env'], and a copy of kwargs with 'env' removed.
If there is no 'env' field in the kwargs, os.environ.copy() is used.
env['PATH'] is set/modified to contain the Node distribution's bin directory at the front.
:param kwargs: The original kwargs.
:returns: An (env, kwargs) tuple containing the modified env and kwargs copies.
:rtype: (dict, dict)
"""
kwargs = kwargs.copy()
env = kwargs.pop('env', os.environ).copy()
env['PATH'] = create_path_env_var(self.extra_paths, env=env, prepend=True)
return env, kwargs | def function[_prepare_env, parameter[self, kwargs]]:
constant[Returns a modifed copy of kwargs['env'], and a copy of kwargs with 'env' removed.
If there is no 'env' field in the kwargs, os.environ.copy() is used.
env['PATH'] is set/modified to contain the Node distribution's bin directory at the front.
:param kwargs: The original kwargs.
:returns: An (env, kwargs) tuple containing the modified env and kwargs copies.
:rtype: (dict, dict)
]
variable[kwargs] assign[=] call[name[kwargs].copy, parameter[]]
variable[env] assign[=] call[call[name[kwargs].pop, parameter[constant[env], name[os].environ]].copy, parameter[]]
call[name[env]][constant[PATH]] assign[=] call[name[create_path_env_var], parameter[name[self].extra_paths]]
return[tuple[[<ast.Name object at 0x7da1b1eed870>, <ast.Name object at 0x7da1b1eeeb30>]]] | keyword[def] identifier[_prepare_env] ( identifier[self] , identifier[kwargs] ):
literal[string]
identifier[kwargs] = identifier[kwargs] . identifier[copy] ()
identifier[env] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[os] . identifier[environ] ). identifier[copy] ()
identifier[env] [ literal[string] ]= identifier[create_path_env_var] ( identifier[self] . identifier[extra_paths] , identifier[env] = identifier[env] , identifier[prepend] = keyword[True] )
keyword[return] identifier[env] , identifier[kwargs] | def _prepare_env(self, kwargs):
"""Returns a modifed copy of kwargs['env'], and a copy of kwargs with 'env' removed.
If there is no 'env' field in the kwargs, os.environ.copy() is used.
env['PATH'] is set/modified to contain the Node distribution's bin directory at the front.
:param kwargs: The original kwargs.
:returns: An (env, kwargs) tuple containing the modified env and kwargs copies.
:rtype: (dict, dict)
"""
kwargs = kwargs.copy()
env = kwargs.pop('env', os.environ).copy()
env['PATH'] = create_path_env_var(self.extra_paths, env=env, prepend=True)
return (env, kwargs) |
def split_n(string, seps, reg=False):
r"""Split strings into n-dimensional list.
::
from torequests.utils import split_n
ss = '''a b c d e f 1 2 3 4 5 6
a b c d e f 1 2 3 4 5 6
a b c d e f 1 2 3 4 5 6'''
print(split_n(ss, ('\n', ' ', ' ')))
# [[['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']], [['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']], [['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']]]
print(split_n(ss, ['\s+'], reg=1))
# ['a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6', 'a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6', 'a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6']
"""
deep = len(seps)
if not deep:
return string
return [split_n(i, seps[1:]) for i in _re_split_mixin(string, seps[0], reg=reg)] | def function[split_n, parameter[string, seps, reg]]:
constant[Split strings into n-dimensional list.
::
from torequests.utils import split_n
ss = '''a b c d e f 1 2 3 4 5 6
a b c d e f 1 2 3 4 5 6
a b c d e f 1 2 3 4 5 6'''
print(split_n(ss, ('\n', ' ', ' ')))
# [[['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']], [['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']], [['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']]]
print(split_n(ss, ['\s+'], reg=1))
# ['a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6', 'a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6', 'a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6']
]
variable[deep] assign[=] call[name[len], parameter[name[seps]]]
if <ast.UnaryOp object at 0x7da204346a70> begin[:]
return[name[string]]
return[<ast.ListComp object at 0x7da204347fa0>] | keyword[def] identifier[split_n] ( identifier[string] , identifier[seps] , identifier[reg] = keyword[False] ):
literal[string]
identifier[deep] = identifier[len] ( identifier[seps] )
keyword[if] keyword[not] identifier[deep] :
keyword[return] identifier[string]
keyword[return] [ identifier[split_n] ( identifier[i] , identifier[seps] [ literal[int] :]) keyword[for] identifier[i] keyword[in] identifier[_re_split_mixin] ( identifier[string] , identifier[seps] [ literal[int] ], identifier[reg] = identifier[reg] )] | def split_n(string, seps, reg=False):
"""Split strings into n-dimensional list.
::
from torequests.utils import split_n
ss = '''a b c d e f 1 2 3 4 5 6
a b c d e f 1 2 3 4 5 6
a b c d e f 1 2 3 4 5 6'''
print(split_n(ss, ('\\n', ' ', ' ')))
# [[['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']], [['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']], [['a', 'b', 'c'], ['d', 'e', 'f'], ['1', '2', '3'], ['4', '5', '6']]]
print(split_n(ss, ['\\s+'], reg=1))
# ['a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6', 'a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6', 'a', 'b', 'c', 'd', 'e', 'f', '1', '2', '3', '4', '5', '6']
"""
deep = len(seps)
if not deep:
return string # depends on [control=['if'], data=[]]
return [split_n(i, seps[1:]) for i in _re_split_mixin(string, seps[0], reg=reg)] |
def nvrtcAddNameExpression(self, prog, name_expression):
"""
Notes the given name expression denoting a __global__ function or
function template instantiation.
"""
code = self._lib.nvrtcAddNameExpression(prog,
c_char_p(encode_str(name_expression)))
self._throw_on_error(code)
return | def function[nvrtcAddNameExpression, parameter[self, prog, name_expression]]:
constant[
Notes the given name expression denoting a __global__ function or
function template instantiation.
]
variable[code] assign[=] call[name[self]._lib.nvrtcAddNameExpression, parameter[name[prog], call[name[c_char_p], parameter[call[name[encode_str], parameter[name[name_expression]]]]]]]
call[name[self]._throw_on_error, parameter[name[code]]]
return[None] | keyword[def] identifier[nvrtcAddNameExpression] ( identifier[self] , identifier[prog] , identifier[name_expression] ):
literal[string]
identifier[code] = identifier[self] . identifier[_lib] . identifier[nvrtcAddNameExpression] ( identifier[prog] ,
identifier[c_char_p] ( identifier[encode_str] ( identifier[name_expression] )))
identifier[self] . identifier[_throw_on_error] ( identifier[code] )
keyword[return] | def nvrtcAddNameExpression(self, prog, name_expression):
"""
Notes the given name expression denoting a __global__ function or
function template instantiation.
"""
code = self._lib.nvrtcAddNameExpression(prog, c_char_p(encode_str(name_expression)))
self._throw_on_error(code)
return |
def unpublish_object(content_type_pk, obj_pk):
"""
Unbuild all views related to a object and then sync to S3.
Accepts primary keys to retrieve a model object that
inherits bakery's BuildableModel class.
"""
ct = ContentType.objects.get_for_id(content_type_pk)
obj = ct.get_object_for_this_type(pk=obj_pk)
try:
# Unbuild the object
logger.info("unpublish_object task has received %s" % obj)
obj.unbuild()
# Run the `publish` management command unless the
# ALLOW_BAKERY_AUTO_PUBLISHING variable is explictly set to False.
if not getattr(settings, 'ALLOW_BAKERY_AUTO_PUBLISHING', True):
logger.info("Not running publish command because \
ALLOW_BAKERY_AUTO_PUBLISHING is False")
else:
management.call_command("publish")
except Exception:
# Log the error if this crashes
logger.error("Task Error: unpublish_object", exc_info=True) | def function[unpublish_object, parameter[content_type_pk, obj_pk]]:
constant[
Unbuild all views related to a object and then sync to S3.
Accepts primary keys to retrieve a model object that
inherits bakery's BuildableModel class.
]
variable[ct] assign[=] call[name[ContentType].objects.get_for_id, parameter[name[content_type_pk]]]
variable[obj] assign[=] call[name[ct].get_object_for_this_type, parameter[]]
<ast.Try object at 0x7da2045676a0> | keyword[def] identifier[unpublish_object] ( identifier[content_type_pk] , identifier[obj_pk] ):
literal[string]
identifier[ct] = identifier[ContentType] . identifier[objects] . identifier[get_for_id] ( identifier[content_type_pk] )
identifier[obj] = identifier[ct] . identifier[get_object_for_this_type] ( identifier[pk] = identifier[obj_pk] )
keyword[try] :
identifier[logger] . identifier[info] ( literal[string] % identifier[obj] )
identifier[obj] . identifier[unbuild] ()
keyword[if] keyword[not] identifier[getattr] ( identifier[settings] , literal[string] , keyword[True] ):
identifier[logger] . identifier[info] ( literal[string] )
keyword[else] :
identifier[management] . identifier[call_command] ( literal[string] )
keyword[except] identifier[Exception] :
identifier[logger] . identifier[error] ( literal[string] , identifier[exc_info] = keyword[True] ) | def unpublish_object(content_type_pk, obj_pk):
"""
Unbuild all views related to a object and then sync to S3.
Accepts primary keys to retrieve a model object that
inherits bakery's BuildableModel class.
"""
ct = ContentType.objects.get_for_id(content_type_pk)
obj = ct.get_object_for_this_type(pk=obj_pk)
try:
# Unbuild the object
logger.info('unpublish_object task has received %s' % obj)
obj.unbuild()
# Run the `publish` management command unless the
# ALLOW_BAKERY_AUTO_PUBLISHING variable is explictly set to False.
if not getattr(settings, 'ALLOW_BAKERY_AUTO_PUBLISHING', True):
logger.info('Not running publish command because ALLOW_BAKERY_AUTO_PUBLISHING is False') # depends on [control=['if'], data=[]]
else:
management.call_command('publish') # depends on [control=['try'], data=[]]
except Exception:
# Log the error if this crashes
logger.error('Task Error: unpublish_object', exc_info=True) # depends on [control=['except'], data=[]] |
def update_vrf_table(self, route_dist, prefix=None, next_hop=None,
route_family=None, route_type=None, tunnel_type=None,
is_withdraw=False, redundancy_mode=None,
pmsi_tunnel_type=None, **kwargs):
"""Update a BGP route in the VRF table identified by `route_dist`
with the given `next_hop`.
If `is_withdraw` is False, which is the default, add a BGP route
to the VRF table identified by `route_dist` with the given
`next_hop`.
If `is_withdraw` is True, remove a BGP route from the VRF table
and the given `next_hop` is ignored.
If `route_family` is VRF_RF_L2_EVPN, `route_type` and `kwargs`
are required to construct EVPN NLRI and `prefix` is ignored.
``redundancy_mode`` specifies a redundancy mode type.
` `pmsi_tunnel_type` specifies the type of the PMSI tunnel attribute
used to encode the multicast tunnel identifier.
This field is advertised only if route_type is
EVPN_MULTICAST_ETAG_ROUTE.
Returns assigned VPN label.
"""
from ryu.services.protocols.bgp.core import BgpCoreError
assert route_dist
if is_withdraw:
gen_lbl = False
next_hop = None
else:
gen_lbl = True
if not (is_valid_ipv4(next_hop) or is_valid_ipv6(next_hop)):
raise BgpCoreError(
desc='Invalid IPv4/IPv6 nexthop: %s' % next_hop)
vrf_table = self._tables.get((route_dist, route_family))
if vrf_table is None:
raise BgpCoreError(
desc='VRF table does not exist: route_dist=%s, '
'route_family=%s' % (route_dist, route_family))
vni = kwargs.get('vni', None)
if route_family == VRF_RF_IPV4:
if not is_valid_ipv4_prefix(prefix):
raise BgpCoreError(desc='Invalid IPv4 prefix: %s' % prefix)
ip, masklen = prefix.split('/')
prefix = IPAddrPrefix(int(masklen), ip)
elif route_family == VRF_RF_IPV6:
if not is_valid_ipv6_prefix(prefix):
raise BgpCoreError(desc='Invalid IPv6 prefix: %s' % prefix)
ip6, masklen = prefix.split('/')
prefix = IP6AddrPrefix(int(masklen), ip6)
elif route_family == VRF_RF_L2_EVPN:
assert route_type
if route_type == EvpnMacIPAdvertisementNLRI.ROUTE_TYPE_NAME:
# MPLS labels will be assigned automatically
kwargs['mpls_labels'] = []
if route_type == EvpnInclusiveMulticastEthernetTagNLRI.ROUTE_TYPE_NAME:
# Inclusive Multicast Ethernet Tag Route does not have "vni",
# omit "vni" from "kwargs" here.
vni = kwargs.pop('vni', None)
subclass = EvpnNLRI._lookup_type_name(route_type)
kwargs['route_dist'] = route_dist
esi = kwargs.get('esi', None)
if esi is not None:
if isinstance(esi, dict):
esi_type = esi.get('type', 0)
esi_class = EvpnEsi._lookup_type(esi_type)
kwargs['esi'] = esi_class.from_jsondict(esi)
else: # isinstance(esi, numbers.Integral)
kwargs['esi'] = EvpnArbitraryEsi(
type_desc.Int9.from_user(esi))
if vni is not None:
# Disable to generate MPLS labels,
# because encapsulation type is not MPLS.
from ryu.services.protocols.bgp.api.prefix import (
TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE)
assert tunnel_type in [
None, TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]
gen_lbl = False
prefix = subclass(**kwargs)
else:
raise BgpCoreError(
desc='Unsupported route family %s' % route_family)
# We do not check if we have a path to given prefix, we issue
# withdrawal. Hence multiple withdrawals have not side effect.
return vrf_table.insert_vrf_path(
nlri=prefix, next_hop=next_hop, gen_lbl=gen_lbl,
is_withdraw=is_withdraw, redundancy_mode=redundancy_mode,
vni=vni, tunnel_type=tunnel_type,
pmsi_tunnel_type=pmsi_tunnel_type) | def function[update_vrf_table, parameter[self, route_dist, prefix, next_hop, route_family, route_type, tunnel_type, is_withdraw, redundancy_mode, pmsi_tunnel_type]]:
constant[Update a BGP route in the VRF table identified by `route_dist`
with the given `next_hop`.
If `is_withdraw` is False, which is the default, add a BGP route
to the VRF table identified by `route_dist` with the given
`next_hop`.
If `is_withdraw` is True, remove a BGP route from the VRF table
and the given `next_hop` is ignored.
If `route_family` is VRF_RF_L2_EVPN, `route_type` and `kwargs`
are required to construct EVPN NLRI and `prefix` is ignored.
``redundancy_mode`` specifies a redundancy mode type.
` `pmsi_tunnel_type` specifies the type of the PMSI tunnel attribute
used to encode the multicast tunnel identifier.
This field is advertised only if route_type is
EVPN_MULTICAST_ETAG_ROUTE.
Returns assigned VPN label.
]
from relative_module[ryu.services.protocols.bgp.core] import module[BgpCoreError]
assert[name[route_dist]]
if name[is_withdraw] begin[:]
variable[gen_lbl] assign[=] constant[False]
variable[next_hop] assign[=] constant[None]
variable[vrf_table] assign[=] call[name[self]._tables.get, parameter[tuple[[<ast.Name object at 0x7da1b1a236d0>, <ast.Name object at 0x7da1b1a223b0>]]]]
if compare[name[vrf_table] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1a22e60>
variable[vni] assign[=] call[name[kwargs].get, parameter[constant[vni], constant[None]]]
if compare[name[route_family] equal[==] name[VRF_RF_IPV4]] begin[:]
if <ast.UnaryOp object at 0x7da1b1a20d60> begin[:]
<ast.Raise object at 0x7da1b1a23df0>
<ast.Tuple object at 0x7da1b1a235e0> assign[=] call[name[prefix].split, parameter[constant[/]]]
variable[prefix] assign[=] call[name[IPAddrPrefix], parameter[call[name[int], parameter[name[masklen]]], name[ip]]]
return[call[name[vrf_table].insert_vrf_path, parameter[]]] | keyword[def] identifier[update_vrf_table] ( identifier[self] , identifier[route_dist] , identifier[prefix] = keyword[None] , identifier[next_hop] = keyword[None] ,
identifier[route_family] = keyword[None] , identifier[route_type] = keyword[None] , identifier[tunnel_type] = keyword[None] ,
identifier[is_withdraw] = keyword[False] , identifier[redundancy_mode] = keyword[None] ,
identifier[pmsi_tunnel_type] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[ryu] . identifier[services] . identifier[protocols] . identifier[bgp] . identifier[core] keyword[import] identifier[BgpCoreError]
keyword[assert] identifier[route_dist]
keyword[if] identifier[is_withdraw] :
identifier[gen_lbl] = keyword[False]
identifier[next_hop] = keyword[None]
keyword[else] :
identifier[gen_lbl] = keyword[True]
keyword[if] keyword[not] ( identifier[is_valid_ipv4] ( identifier[next_hop] ) keyword[or] identifier[is_valid_ipv6] ( identifier[next_hop] )):
keyword[raise] identifier[BgpCoreError] (
identifier[desc] = literal[string] % identifier[next_hop] )
identifier[vrf_table] = identifier[self] . identifier[_tables] . identifier[get] (( identifier[route_dist] , identifier[route_family] ))
keyword[if] identifier[vrf_table] keyword[is] keyword[None] :
keyword[raise] identifier[BgpCoreError] (
identifier[desc] = literal[string]
literal[string] %( identifier[route_dist] , identifier[route_family] ))
identifier[vni] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[route_family] == identifier[VRF_RF_IPV4] :
keyword[if] keyword[not] identifier[is_valid_ipv4_prefix] ( identifier[prefix] ):
keyword[raise] identifier[BgpCoreError] ( identifier[desc] = literal[string] % identifier[prefix] )
identifier[ip] , identifier[masklen] = identifier[prefix] . identifier[split] ( literal[string] )
identifier[prefix] = identifier[IPAddrPrefix] ( identifier[int] ( identifier[masklen] ), identifier[ip] )
keyword[elif] identifier[route_family] == identifier[VRF_RF_IPV6] :
keyword[if] keyword[not] identifier[is_valid_ipv6_prefix] ( identifier[prefix] ):
keyword[raise] identifier[BgpCoreError] ( identifier[desc] = literal[string] % identifier[prefix] )
identifier[ip6] , identifier[masklen] = identifier[prefix] . identifier[split] ( literal[string] )
identifier[prefix] = identifier[IP6AddrPrefix] ( identifier[int] ( identifier[masklen] ), identifier[ip6] )
keyword[elif] identifier[route_family] == identifier[VRF_RF_L2_EVPN] :
keyword[assert] identifier[route_type]
keyword[if] identifier[route_type] == identifier[EvpnMacIPAdvertisementNLRI] . identifier[ROUTE_TYPE_NAME] :
identifier[kwargs] [ literal[string] ]=[]
keyword[if] identifier[route_type] == identifier[EvpnInclusiveMulticastEthernetTagNLRI] . identifier[ROUTE_TYPE_NAME] :
identifier[vni] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[subclass] = identifier[EvpnNLRI] . identifier[_lookup_type_name] ( identifier[route_type] )
identifier[kwargs] [ literal[string] ]= identifier[route_dist]
identifier[esi] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[esi] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[esi] , identifier[dict] ):
identifier[esi_type] = identifier[esi] . identifier[get] ( literal[string] , literal[int] )
identifier[esi_class] = identifier[EvpnEsi] . identifier[_lookup_type] ( identifier[esi_type] )
identifier[kwargs] [ literal[string] ]= identifier[esi_class] . identifier[from_jsondict] ( identifier[esi] )
keyword[else] :
identifier[kwargs] [ literal[string] ]= identifier[EvpnArbitraryEsi] (
identifier[type_desc] . identifier[Int9] . identifier[from_user] ( identifier[esi] ))
keyword[if] identifier[vni] keyword[is] keyword[not] keyword[None] :
keyword[from] identifier[ryu] . identifier[services] . identifier[protocols] . identifier[bgp] . identifier[api] . identifier[prefix] keyword[import] (
identifier[TUNNEL_TYPE_VXLAN] , identifier[TUNNEL_TYPE_NVGRE] )
keyword[assert] identifier[tunnel_type] keyword[in] [
keyword[None] , identifier[TUNNEL_TYPE_VXLAN] , identifier[TUNNEL_TYPE_NVGRE] ]
identifier[gen_lbl] = keyword[False]
identifier[prefix] = identifier[subclass] (** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[BgpCoreError] (
identifier[desc] = literal[string] % identifier[route_family] )
keyword[return] identifier[vrf_table] . identifier[insert_vrf_path] (
identifier[nlri] = identifier[prefix] , identifier[next_hop] = identifier[next_hop] , identifier[gen_lbl] = identifier[gen_lbl] ,
identifier[is_withdraw] = identifier[is_withdraw] , identifier[redundancy_mode] = identifier[redundancy_mode] ,
identifier[vni] = identifier[vni] , identifier[tunnel_type] = identifier[tunnel_type] ,
identifier[pmsi_tunnel_type] = identifier[pmsi_tunnel_type] ) | def update_vrf_table(self, route_dist, prefix=None, next_hop=None, route_family=None, route_type=None, tunnel_type=None, is_withdraw=False, redundancy_mode=None, pmsi_tunnel_type=None, **kwargs):
"""Update a BGP route in the VRF table identified by `route_dist`
with the given `next_hop`.
If `is_withdraw` is False, which is the default, add a BGP route
to the VRF table identified by `route_dist` with the given
`next_hop`.
If `is_withdraw` is True, remove a BGP route from the VRF table
and the given `next_hop` is ignored.
If `route_family` is VRF_RF_L2_EVPN, `route_type` and `kwargs`
are required to construct EVPN NLRI and `prefix` is ignored.
``redundancy_mode`` specifies a redundancy mode type.
` `pmsi_tunnel_type` specifies the type of the PMSI tunnel attribute
used to encode the multicast tunnel identifier.
This field is advertised only if route_type is
EVPN_MULTICAST_ETAG_ROUTE.
Returns assigned VPN label.
"""
from ryu.services.protocols.bgp.core import BgpCoreError
assert route_dist
if is_withdraw:
gen_lbl = False
next_hop = None # depends on [control=['if'], data=[]]
else:
gen_lbl = True
if not (is_valid_ipv4(next_hop) or is_valid_ipv6(next_hop)):
raise BgpCoreError(desc='Invalid IPv4/IPv6 nexthop: %s' % next_hop) # depends on [control=['if'], data=[]]
vrf_table = self._tables.get((route_dist, route_family))
if vrf_table is None:
raise BgpCoreError(desc='VRF table does not exist: route_dist=%s, route_family=%s' % (route_dist, route_family)) # depends on [control=['if'], data=[]]
vni = kwargs.get('vni', None)
if route_family == VRF_RF_IPV4:
if not is_valid_ipv4_prefix(prefix):
raise BgpCoreError(desc='Invalid IPv4 prefix: %s' % prefix) # depends on [control=['if'], data=[]]
(ip, masklen) = prefix.split('/')
prefix = IPAddrPrefix(int(masklen), ip) # depends on [control=['if'], data=[]]
elif route_family == VRF_RF_IPV6:
if not is_valid_ipv6_prefix(prefix):
raise BgpCoreError(desc='Invalid IPv6 prefix: %s' % prefix) # depends on [control=['if'], data=[]]
(ip6, masklen) = prefix.split('/')
prefix = IP6AddrPrefix(int(masklen), ip6) # depends on [control=['if'], data=[]]
elif route_family == VRF_RF_L2_EVPN:
assert route_type
if route_type == EvpnMacIPAdvertisementNLRI.ROUTE_TYPE_NAME:
# MPLS labels will be assigned automatically
kwargs['mpls_labels'] = [] # depends on [control=['if'], data=[]]
if route_type == EvpnInclusiveMulticastEthernetTagNLRI.ROUTE_TYPE_NAME:
# Inclusive Multicast Ethernet Tag Route does not have "vni",
# omit "vni" from "kwargs" here.
vni = kwargs.pop('vni', None) # depends on [control=['if'], data=[]]
subclass = EvpnNLRI._lookup_type_name(route_type)
kwargs['route_dist'] = route_dist
esi = kwargs.get('esi', None)
if esi is not None:
if isinstance(esi, dict):
esi_type = esi.get('type', 0)
esi_class = EvpnEsi._lookup_type(esi_type)
kwargs['esi'] = esi_class.from_jsondict(esi) # depends on [control=['if'], data=[]]
else: # isinstance(esi, numbers.Integral)
kwargs['esi'] = EvpnArbitraryEsi(type_desc.Int9.from_user(esi)) # depends on [control=['if'], data=['esi']]
if vni is not None:
# Disable to generate MPLS labels,
# because encapsulation type is not MPLS.
from ryu.services.protocols.bgp.api.prefix import TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE
assert tunnel_type in [None, TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]
gen_lbl = False # depends on [control=['if'], data=[]]
prefix = subclass(**kwargs) # depends on [control=['if'], data=[]]
else:
raise BgpCoreError(desc='Unsupported route family %s' % route_family)
# We do not check if we have a path to given prefix, we issue
# withdrawal. Hence multiple withdrawals have not side effect.
return vrf_table.insert_vrf_path(nlri=prefix, next_hop=next_hop, gen_lbl=gen_lbl, is_withdraw=is_withdraw, redundancy_mode=redundancy_mode, vni=vni, tunnel_type=tunnel_type, pmsi_tunnel_type=pmsi_tunnel_type) |
def add_unary(self, name, input_name, output_name, mode, alpha = 1.0,
shift = 0, scale = 1.0, epsilon = 1e-6):
"""
Add a Unary layer. Applies the specified function (mode) to all the elements of the input.
Please see the UnaryFunctionLayerParams message in Core ML neural network
protobuf for more information about the operation of this layer.
Prior to the application of the function the input can be scaled and shifted by using the 'scale',
'shift' parameters.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
Unary function.
Allowed values: 'sqrt', 'rsqrt', 'inverse', 'power', 'exp', 'log', 'abs', threshold'.
alpha: float
constant used in with modes 'power' and 'threshold'.
shift, scale: float
input is modified by scale and shift prior to the application of the unary function.
epsilon: float
small bias to prevent division by zero.
See Also
--------
add_activation
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.unary
spec_layer_params.epsilon = epsilon
spec_layer_params.alpha = alpha
spec_layer_params.shift = shift
spec_layer_params.scale = scale
if mode == 'sqrt':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('SQRT')
elif mode == 'rsqrt':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('RSQRT')
elif mode == 'inverse':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('INVERSE')
elif mode == 'power':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('POWER')
elif mode == 'exp':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('EXP')
elif mode == 'log':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('LOG')
elif mode == 'abs':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('ABS')
elif mode == 'threshold':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('THRESHOLD')
else:
raise NotImplementedError('Unknown unary function %s ' % mode) | def function[add_unary, parameter[self, name, input_name, output_name, mode, alpha, shift, scale, epsilon]]:
constant[
Add a Unary layer. Applies the specified function (mode) to all the elements of the input.
Please see the UnaryFunctionLayerParams message in Core ML neural network
protobuf for more information about the operation of this layer.
Prior to the application of the function the input can be scaled and shifted by using the 'scale',
'shift' parameters.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
Unary function.
Allowed values: 'sqrt', 'rsqrt', 'inverse', 'power', 'exp', 'log', 'abs', threshold'.
alpha: float
constant used in with modes 'power' and 'threshold'.
shift, scale: float
input is modified by scale and shift prior to the application of the unary function.
epsilon: float
small bias to prevent division by zero.
See Also
--------
add_activation
]
variable[spec] assign[=] name[self].spec
variable[nn_spec] assign[=] name[self].nn_spec
variable[spec_layer] assign[=] call[name[nn_spec].layers.add, parameter[]]
name[spec_layer].name assign[=] name[name]
call[name[spec_layer].input.append, parameter[name[input_name]]]
call[name[spec_layer].output.append, parameter[name[output_name]]]
variable[spec_layer_params] assign[=] name[spec_layer].unary
name[spec_layer_params].epsilon assign[=] name[epsilon]
name[spec_layer_params].alpha assign[=] name[alpha]
name[spec_layer_params].shift assign[=] name[shift]
name[spec_layer_params].scale assign[=] name[scale]
if compare[name[mode] equal[==] constant[sqrt]] begin[:]
name[spec_layer_params].type assign[=] call[name[_NeuralNetwork_pb2].UnaryFunctionLayerParams.Operation.Value, parameter[constant[SQRT]]] | keyword[def] identifier[add_unary] ( identifier[self] , identifier[name] , identifier[input_name] , identifier[output_name] , identifier[mode] , identifier[alpha] = literal[int] ,
identifier[shift] = literal[int] , identifier[scale] = literal[int] , identifier[epsilon] = literal[int] ):
literal[string]
identifier[spec] = identifier[self] . identifier[spec]
identifier[nn_spec] = identifier[self] . identifier[nn_spec]
identifier[spec_layer] = identifier[nn_spec] . identifier[layers] . identifier[add] ()
identifier[spec_layer] . identifier[name] = identifier[name]
identifier[spec_layer] . identifier[input] . identifier[append] ( identifier[input_name] )
identifier[spec_layer] . identifier[output] . identifier[append] ( identifier[output_name] )
identifier[spec_layer_params] = identifier[spec_layer] . identifier[unary]
identifier[spec_layer_params] . identifier[epsilon] = identifier[epsilon]
identifier[spec_layer_params] . identifier[alpha] = identifier[alpha]
identifier[spec_layer_params] . identifier[shift] = identifier[shift]
identifier[spec_layer_params] . identifier[scale] = identifier[scale]
keyword[if] identifier[mode] == literal[string] :
identifier[spec_layer_params] . identifier[type] = identifier[_NeuralNetwork_pb2] . identifier[UnaryFunctionLayerParams] . identifier[Operation] . identifier[Value] ( literal[string] )
keyword[elif] identifier[mode] == literal[string] :
identifier[spec_layer_params] . identifier[type] = identifier[_NeuralNetwork_pb2] . identifier[UnaryFunctionLayerParams] . identifier[Operation] . identifier[Value] ( literal[string] )
keyword[elif] identifier[mode] == literal[string] :
identifier[spec_layer_params] . identifier[type] = identifier[_NeuralNetwork_pb2] . identifier[UnaryFunctionLayerParams] . identifier[Operation] . identifier[Value] ( literal[string] )
keyword[elif] identifier[mode] == literal[string] :
identifier[spec_layer_params] . identifier[type] = identifier[_NeuralNetwork_pb2] . identifier[UnaryFunctionLayerParams] . identifier[Operation] . identifier[Value] ( literal[string] )
keyword[elif] identifier[mode] == literal[string] :
identifier[spec_layer_params] . identifier[type] = identifier[_NeuralNetwork_pb2] . identifier[UnaryFunctionLayerParams] . identifier[Operation] . identifier[Value] ( literal[string] )
keyword[elif] identifier[mode] == literal[string] :
identifier[spec_layer_params] . identifier[type] = identifier[_NeuralNetwork_pb2] . identifier[UnaryFunctionLayerParams] . identifier[Operation] . identifier[Value] ( literal[string] )
keyword[elif] identifier[mode] == literal[string] :
identifier[spec_layer_params] . identifier[type] = identifier[_NeuralNetwork_pb2] . identifier[UnaryFunctionLayerParams] . identifier[Operation] . identifier[Value] ( literal[string] )
keyword[elif] identifier[mode] == literal[string] :
identifier[spec_layer_params] . identifier[type] = identifier[_NeuralNetwork_pb2] . identifier[UnaryFunctionLayerParams] . identifier[Operation] . identifier[Value] ( literal[string] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] % identifier[mode] ) | def add_unary(self, name, input_name, output_name, mode, alpha=1.0, shift=0, scale=1.0, epsilon=1e-06):
"""
Add a Unary layer. Applies the specified function (mode) to all the elements of the input.
Please see the UnaryFunctionLayerParams message in Core ML neural network
protobuf for more information about the operation of this layer.
Prior to the application of the function the input can be scaled and shifted by using the 'scale',
'shift' parameters.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
Unary function.
Allowed values: 'sqrt', 'rsqrt', 'inverse', 'power', 'exp', 'log', 'abs', threshold'.
alpha: float
constant used in with modes 'power' and 'threshold'.
shift, scale: float
input is modified by scale and shift prior to the application of the unary function.
epsilon: float
small bias to prevent division by zero.
See Also
--------
add_activation
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.unary
spec_layer_params.epsilon = epsilon
spec_layer_params.alpha = alpha
spec_layer_params.shift = shift
spec_layer_params.scale = scale
if mode == 'sqrt':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('SQRT') # depends on [control=['if'], data=[]]
elif mode == 'rsqrt':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('RSQRT') # depends on [control=['if'], data=[]]
elif mode == 'inverse':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('INVERSE') # depends on [control=['if'], data=[]]
elif mode == 'power':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('POWER') # depends on [control=['if'], data=[]]
elif mode == 'exp':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('EXP') # depends on [control=['if'], data=[]]
elif mode == 'log':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('LOG') # depends on [control=['if'], data=[]]
elif mode == 'abs':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('ABS') # depends on [control=['if'], data=[]]
elif mode == 'threshold':
spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value('THRESHOLD') # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('Unknown unary function %s ' % mode) |
def cast_to_list(emoticons_list):
"""
Fix list of emoticons with a single name
to a list for easier future iterations,
and cast iterables to list.
"""
emoticons_tuple = []
for emoticons, image in emoticons_list:
if isinstance(emoticons, basestring):
emoticons = [emoticons]
else:
emoticons = list(emoticons)
emoticons_tuple.append((emoticons, image))
return emoticons_tuple | def function[cast_to_list, parameter[emoticons_list]]:
constant[
Fix list of emoticons with a single name
to a list for easier future iterations,
and cast iterables to list.
]
variable[emoticons_tuple] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b26c7e80>, <ast.Name object at 0x7da1b26c4a30>]]] in starred[name[emoticons_list]] begin[:]
if call[name[isinstance], parameter[name[emoticons], name[basestring]]] begin[:]
variable[emoticons] assign[=] list[[<ast.Name object at 0x7da1b26c5000>]]
call[name[emoticons_tuple].append, parameter[tuple[[<ast.Name object at 0x7da1b26c4c10>, <ast.Name object at 0x7da1b26c44c0>]]]]
return[name[emoticons_tuple]] | keyword[def] identifier[cast_to_list] ( identifier[emoticons_list] ):
literal[string]
identifier[emoticons_tuple] =[]
keyword[for] identifier[emoticons] , identifier[image] keyword[in] identifier[emoticons_list] :
keyword[if] identifier[isinstance] ( identifier[emoticons] , identifier[basestring] ):
identifier[emoticons] =[ identifier[emoticons] ]
keyword[else] :
identifier[emoticons] = identifier[list] ( identifier[emoticons] )
identifier[emoticons_tuple] . identifier[append] (( identifier[emoticons] , identifier[image] ))
keyword[return] identifier[emoticons_tuple] | def cast_to_list(emoticons_list):
"""
Fix list of emoticons with a single name
to a list for easier future iterations,
and cast iterables to list.
"""
emoticons_tuple = []
for (emoticons, image) in emoticons_list:
if isinstance(emoticons, basestring):
emoticons = [emoticons] # depends on [control=['if'], data=[]]
else:
emoticons = list(emoticons)
emoticons_tuple.append((emoticons, image)) # depends on [control=['for'], data=[]]
return emoticons_tuple |
def exists(self, path):
"""
Check if a given path exists on the filesystem.
:type path: str
:param path: the path to look for
:rtype: bool
:return: :obj:`True` if ``path`` exists
"""
_complain_ifclosed(self.closed)
return self.fs.exists(path) | def function[exists, parameter[self, path]]:
constant[
Check if a given path exists on the filesystem.
:type path: str
:param path: the path to look for
:rtype: bool
:return: :obj:`True` if ``path`` exists
]
call[name[_complain_ifclosed], parameter[name[self].closed]]
return[call[name[self].fs.exists, parameter[name[path]]]] | keyword[def] identifier[exists] ( identifier[self] , identifier[path] ):
literal[string]
identifier[_complain_ifclosed] ( identifier[self] . identifier[closed] )
keyword[return] identifier[self] . identifier[fs] . identifier[exists] ( identifier[path] ) | def exists(self, path):
"""
Check if a given path exists on the filesystem.
:type path: str
:param path: the path to look for
:rtype: bool
:return: :obj:`True` if ``path`` exists
"""
_complain_ifclosed(self.closed)
return self.fs.exists(path) |
def transfer_metadata_to_dict(self, meta):
"""transfer str to dict.
output should be like: {'a':1, 'b':2, 'c':3}
"""
dic = {}
arr = meta.strip(' ,').split(',')
for i in arr:
temp = i.split('=')
key = temp[0].strip()
value = temp[1].strip()
dic[key] = value
return dic | def function[transfer_metadata_to_dict, parameter[self, meta]]:
constant[transfer str to dict.
output should be like: {'a':1, 'b':2, 'c':3}
]
variable[dic] assign[=] dictionary[[], []]
variable[arr] assign[=] call[call[name[meta].strip, parameter[constant[ ,]]].split, parameter[constant[,]]]
for taget[name[i]] in starred[name[arr]] begin[:]
variable[temp] assign[=] call[name[i].split, parameter[constant[=]]]
variable[key] assign[=] call[call[name[temp]][constant[0]].strip, parameter[]]
variable[value] assign[=] call[call[name[temp]][constant[1]].strip, parameter[]]
call[name[dic]][name[key]] assign[=] name[value]
return[name[dic]] | keyword[def] identifier[transfer_metadata_to_dict] ( identifier[self] , identifier[meta] ):
literal[string]
identifier[dic] ={}
identifier[arr] = identifier[meta] . identifier[strip] ( literal[string] ). identifier[split] ( literal[string] )
keyword[for] identifier[i] keyword[in] identifier[arr] :
identifier[temp] = identifier[i] . identifier[split] ( literal[string] )
identifier[key] = identifier[temp] [ literal[int] ]. identifier[strip] ()
identifier[value] = identifier[temp] [ literal[int] ]. identifier[strip] ()
identifier[dic] [ identifier[key] ]= identifier[value]
keyword[return] identifier[dic] | def transfer_metadata_to_dict(self, meta):
"""transfer str to dict.
output should be like: {'a':1, 'b':2, 'c':3}
"""
dic = {}
arr = meta.strip(' ,').split(',')
for i in arr:
temp = i.split('=')
key = temp[0].strip()
value = temp[1].strip()
dic[key] = value # depends on [control=['for'], data=['i']]
return dic |
def edit_conf(edit_config=False, load_config=None, **kwargs):
"""
Edit the Andes config file which occurs first in the search path.
Parameters
----------
edit_config : bool
If ``True``, try to open up an editor and edit the config file.
Otherwise returns.
load_config : None or str, optional
Path to the config file, which will be placed to the first in the
search order.
kwargs : dict
Other keyword arguments.
Returns
-------
bool
``True`` is a config file is found and an editor is opened. ``False``
if ``edit_config`` is False.
"""
ret = False
# no `edit-config` supplied
if edit_config == '':
return ret
conf_path = misc.get_config_load_path(load_config)
if conf_path is not None:
logger.info('Editing config file {}'.format(conf_path))
if edit_config is None:
# use the following default editors
if platform.system() == 'Linux':
editor = os.environ.get('EDITOR', 'gedit')
elif platform.system() == 'Darwin':
editor = os.environ.get('EDITOR', 'vim')
elif platform.system() == 'Windows':
editor = 'notepad.exe'
else:
# use `edit_config` as default editor
editor = edit_config
call([editor, conf_path])
ret = True
else:
logger.info('Config file does not exist. Save config with \'andes '
'--save-config\'')
ret = True
return ret | def function[edit_conf, parameter[edit_config, load_config]]:
constant[
Edit the Andes config file which occurs first in the search path.
Parameters
----------
edit_config : bool
If ``True``, try to open up an editor and edit the config file.
Otherwise returns.
load_config : None or str, optional
Path to the config file, which will be placed to the first in the
search order.
kwargs : dict
Other keyword arguments.
Returns
-------
bool
``True`` is a config file is found and an editor is opened. ``False``
if ``edit_config`` is False.
]
variable[ret] assign[=] constant[False]
if compare[name[edit_config] equal[==] constant[]] begin[:]
return[name[ret]]
variable[conf_path] assign[=] call[name[misc].get_config_load_path, parameter[name[load_config]]]
if compare[name[conf_path] is_not constant[None]] begin[:]
call[name[logger].info, parameter[call[constant[Editing config file {}].format, parameter[name[conf_path]]]]]
if compare[name[edit_config] is constant[None]] begin[:]
if compare[call[name[platform].system, parameter[]] equal[==] constant[Linux]] begin[:]
variable[editor] assign[=] call[name[os].environ.get, parameter[constant[EDITOR], constant[gedit]]]
call[name[call], parameter[list[[<ast.Name object at 0x7da20cabfb80>, <ast.Name object at 0x7da20cabed70>]]]]
variable[ret] assign[=] constant[True]
return[name[ret]] | keyword[def] identifier[edit_conf] ( identifier[edit_config] = keyword[False] , identifier[load_config] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[ret] = keyword[False]
keyword[if] identifier[edit_config] == literal[string] :
keyword[return] identifier[ret]
identifier[conf_path] = identifier[misc] . identifier[get_config_load_path] ( identifier[load_config] )
keyword[if] identifier[conf_path] keyword[is] keyword[not] keyword[None] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[conf_path] ))
keyword[if] identifier[edit_config] keyword[is] keyword[None] :
keyword[if] identifier[platform] . identifier[system] ()== literal[string] :
identifier[editor] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] )
keyword[elif] identifier[platform] . identifier[system] ()== literal[string] :
identifier[editor] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] )
keyword[elif] identifier[platform] . identifier[system] ()== literal[string] :
identifier[editor] = literal[string]
keyword[else] :
identifier[editor] = identifier[edit_config]
identifier[call] ([ identifier[editor] , identifier[conf_path] ])
identifier[ret] = keyword[True]
keyword[else] :
identifier[logger] . identifier[info] ( literal[string]
literal[string] )
identifier[ret] = keyword[True]
keyword[return] identifier[ret] | def edit_conf(edit_config=False, load_config=None, **kwargs):
"""
Edit the Andes config file which occurs first in the search path.
Parameters
----------
edit_config : bool
If ``True``, try to open up an editor and edit the config file.
Otherwise returns.
load_config : None or str, optional
Path to the config file, which will be placed to the first in the
search order.
kwargs : dict
Other keyword arguments.
Returns
-------
bool
``True`` is a config file is found and an editor is opened. ``False``
if ``edit_config`` is False.
"""
ret = False
# no `edit-config` supplied
if edit_config == '':
return ret # depends on [control=['if'], data=[]]
conf_path = misc.get_config_load_path(load_config)
if conf_path is not None:
logger.info('Editing config file {}'.format(conf_path))
if edit_config is None:
# use the following default editors
if platform.system() == 'Linux':
editor = os.environ.get('EDITOR', 'gedit') # depends on [control=['if'], data=[]]
elif platform.system() == 'Darwin':
editor = os.environ.get('EDITOR', 'vim') # depends on [control=['if'], data=[]]
elif platform.system() == 'Windows':
editor = 'notepad.exe' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# use `edit_config` as default editor
editor = edit_config
call([editor, conf_path])
ret = True # depends on [control=['if'], data=['conf_path']]
else:
logger.info("Config file does not exist. Save config with 'andes --save-config'")
ret = True
return ret |
def p_package_version_1(self, p):
"""package_version : PKG_VERSION LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_pkg_vers(self.document, value)
except OrderError:
self.order_error('PackageVersion', 'PackageName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('PackageVersion', p.lineno(1)) | def function[p_package_version_1, parameter[self, p]]:
constant[package_version : PKG_VERSION LINE]
<ast.Try object at 0x7da1b014cdf0> | keyword[def] identifier[p_package_version_1] ( identifier[self] , identifier[p] ):
literal[string]
keyword[try] :
keyword[if] identifier[six] . identifier[PY2] :
identifier[value] = identifier[p] [ literal[int] ]. identifier[decode] ( identifier[encoding] = literal[string] )
keyword[else] :
identifier[value] = identifier[p] [ literal[int] ]
identifier[self] . identifier[builder] . identifier[set_pkg_vers] ( identifier[self] . identifier[document] , identifier[value] )
keyword[except] identifier[OrderError] :
identifier[self] . identifier[order_error] ( literal[string] , literal[string] , identifier[p] . identifier[lineno] ( literal[int] ))
keyword[except] identifier[CardinalityError] :
identifier[self] . identifier[more_than_one_error] ( literal[string] , identifier[p] . identifier[lineno] ( literal[int] )) | def p_package_version_1(self, p):
"""package_version : PKG_VERSION LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8') # depends on [control=['if'], data=[]]
else:
value = p[2]
self.builder.set_pkg_vers(self.document, value) # depends on [control=['try'], data=[]]
except OrderError:
self.order_error('PackageVersion', 'PackageName', p.lineno(1)) # depends on [control=['except'], data=[]]
except CardinalityError:
self.more_than_one_error('PackageVersion', p.lineno(1)) # depends on [control=['except'], data=[]] |
def add_chart(self, chart, row, col):
"""
Adds a chart to the worksheet at (row, col).
:param xltable.Chart Chart: chart to add to the workbook.
:param int row: Row to add the chart at.
"""
self.__charts.append((chart, (row, col))) | def function[add_chart, parameter[self, chart, row, col]]:
constant[
Adds a chart to the worksheet at (row, col).
:param xltable.Chart Chart: chart to add to the workbook.
:param int row: Row to add the chart at.
]
call[name[self].__charts.append, parameter[tuple[[<ast.Name object at 0x7da1b242bb50>, <ast.Tuple object at 0x7da1b242a5c0>]]]] | keyword[def] identifier[add_chart] ( identifier[self] , identifier[chart] , identifier[row] , identifier[col] ):
literal[string]
identifier[self] . identifier[__charts] . identifier[append] (( identifier[chart] ,( identifier[row] , identifier[col] ))) | def add_chart(self, chart, row, col):
"""
Adds a chart to the worksheet at (row, col).
:param xltable.Chart Chart: chart to add to the workbook.
:param int row: Row to add the chart at.
"""
self.__charts.append((chart, (row, col))) |
def rebuild( self ):
"""
Rebuilds the information for this scene.
"""
self._buildData.clear()
self._dateGrid.clear()
self._dateTimeGrid.clear()
curr_min = self._minimumDate
curr_max = self._maximumDate
self._maximumDate = QDate()
self._minimumDate = QDate()
self.markForRebuild(False)
# rebuilds the month view
if ( self.currentMode() == XCalendarScene.Mode.Month ):
self.rebuildMonth()
elif ( self.currentMode() in (XCalendarScene.Mode.Week,
XCalendarScene.Mode.Day)):
self.rebuildDays()
# rebuild the items in the scene
items = sorted(self.items())
for item in items:
item.setPos(0, 0)
item.hide()
for item in items:
if ( isinstance(item, XCalendarItem) ):
item.rebuild()
if ( curr_min != self._minimumDate or curr_max != self._maximumDate ):
parent = self.parent()
if ( parent and not parent.signalsBlocked() ):
parent.dateRangeChanged.emit(self._minimumDate,
self._maximumDate) | def function[rebuild, parameter[self]]:
constant[
Rebuilds the information for this scene.
]
call[name[self]._buildData.clear, parameter[]]
call[name[self]._dateGrid.clear, parameter[]]
call[name[self]._dateTimeGrid.clear, parameter[]]
variable[curr_min] assign[=] name[self]._minimumDate
variable[curr_max] assign[=] name[self]._maximumDate
name[self]._maximumDate assign[=] call[name[QDate], parameter[]]
name[self]._minimumDate assign[=] call[name[QDate], parameter[]]
call[name[self].markForRebuild, parameter[constant[False]]]
if compare[call[name[self].currentMode, parameter[]] equal[==] name[XCalendarScene].Mode.Month] begin[:]
call[name[self].rebuildMonth, parameter[]]
variable[items] assign[=] call[name[sorted], parameter[call[name[self].items, parameter[]]]]
for taget[name[item]] in starred[name[items]] begin[:]
call[name[item].setPos, parameter[constant[0], constant[0]]]
call[name[item].hide, parameter[]]
for taget[name[item]] in starred[name[items]] begin[:]
if call[name[isinstance], parameter[name[item], name[XCalendarItem]]] begin[:]
call[name[item].rebuild, parameter[]]
if <ast.BoolOp object at 0x7da18f09f8b0> begin[:]
variable[parent] assign[=] call[name[self].parent, parameter[]]
if <ast.BoolOp object at 0x7da2054a4220> begin[:]
call[name[parent].dateRangeChanged.emit, parameter[name[self]._minimumDate, name[self]._maximumDate]] | keyword[def] identifier[rebuild] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_buildData] . identifier[clear] ()
identifier[self] . identifier[_dateGrid] . identifier[clear] ()
identifier[self] . identifier[_dateTimeGrid] . identifier[clear] ()
identifier[curr_min] = identifier[self] . identifier[_minimumDate]
identifier[curr_max] = identifier[self] . identifier[_maximumDate]
identifier[self] . identifier[_maximumDate] = identifier[QDate] ()
identifier[self] . identifier[_minimumDate] = identifier[QDate] ()
identifier[self] . identifier[markForRebuild] ( keyword[False] )
keyword[if] ( identifier[self] . identifier[currentMode] ()== identifier[XCalendarScene] . identifier[Mode] . identifier[Month] ):
identifier[self] . identifier[rebuildMonth] ()
keyword[elif] ( identifier[self] . identifier[currentMode] () keyword[in] ( identifier[XCalendarScene] . identifier[Mode] . identifier[Week] ,
identifier[XCalendarScene] . identifier[Mode] . identifier[Day] )):
identifier[self] . identifier[rebuildDays] ()
identifier[items] = identifier[sorted] ( identifier[self] . identifier[items] ())
keyword[for] identifier[item] keyword[in] identifier[items] :
identifier[item] . identifier[setPos] ( literal[int] , literal[int] )
identifier[item] . identifier[hide] ()
keyword[for] identifier[item] keyword[in] identifier[items] :
keyword[if] ( identifier[isinstance] ( identifier[item] , identifier[XCalendarItem] )):
identifier[item] . identifier[rebuild] ()
keyword[if] ( identifier[curr_min] != identifier[self] . identifier[_minimumDate] keyword[or] identifier[curr_max] != identifier[self] . identifier[_maximumDate] ):
identifier[parent] = identifier[self] . identifier[parent] ()
keyword[if] ( identifier[parent] keyword[and] keyword[not] identifier[parent] . identifier[signalsBlocked] ()):
identifier[parent] . identifier[dateRangeChanged] . identifier[emit] ( identifier[self] . identifier[_minimumDate] ,
identifier[self] . identifier[_maximumDate] ) | def rebuild(self):
"""
Rebuilds the information for this scene.
"""
self._buildData.clear()
self._dateGrid.clear()
self._dateTimeGrid.clear()
curr_min = self._minimumDate
curr_max = self._maximumDate
self._maximumDate = QDate()
self._minimumDate = QDate()
self.markForRebuild(False) # rebuilds the month view
if self.currentMode() == XCalendarScene.Mode.Month:
self.rebuildMonth() # depends on [control=['if'], data=[]]
elif self.currentMode() in (XCalendarScene.Mode.Week, XCalendarScene.Mode.Day):
self.rebuildDays() # depends on [control=['if'], data=[]] # rebuild the items in the scene
items = sorted(self.items())
for item in items:
item.setPos(0, 0)
item.hide() # depends on [control=['for'], data=['item']]
for item in items:
if isinstance(item, XCalendarItem):
item.rebuild() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
if curr_min != self._minimumDate or curr_max != self._maximumDate:
parent = self.parent()
if parent and (not parent.signalsBlocked()):
parent.dateRangeChanged.emit(self._minimumDate, self._maximumDate) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def is_valid(self, context, sid):
"""Identify if the given session ID is currently valid.
Return True if valid, False if explicitly invalid, None if unknown.
"""
record = self._Document.find_one(sid, project=('expires', ))
if not record:
return
return not record._expired | def function[is_valid, parameter[self, context, sid]]:
constant[Identify if the given session ID is currently valid.
Return True if valid, False if explicitly invalid, None if unknown.
]
variable[record] assign[=] call[name[self]._Document.find_one, parameter[name[sid]]]
if <ast.UnaryOp object at 0x7da207f029b0> begin[:]
return[None]
return[<ast.UnaryOp object at 0x7da207f03fd0>] | keyword[def] identifier[is_valid] ( identifier[self] , identifier[context] , identifier[sid] ):
literal[string]
identifier[record] = identifier[self] . identifier[_Document] . identifier[find_one] ( identifier[sid] , identifier[project] =( literal[string] ,))
keyword[if] keyword[not] identifier[record] :
keyword[return]
keyword[return] keyword[not] identifier[record] . identifier[_expired] | def is_valid(self, context, sid):
"""Identify if the given session ID is currently valid.
Return True if valid, False if explicitly invalid, None if unknown.
"""
record = self._Document.find_one(sid, project=('expires',))
if not record:
return # depends on [control=['if'], data=[]]
return not record._expired |
def find_root(node):
"""Find the top level namespace."""
# Scamper up to the top level namespace
while node.type != syms.file_input:
node = node.parent
if not node:
raise ValueError("root found before file_input node was found.")
return node | def function[find_root, parameter[node]]:
constant[Find the top level namespace.]
while compare[name[node].type not_equal[!=] name[syms].file_input] begin[:]
variable[node] assign[=] name[node].parent
if <ast.UnaryOp object at 0x7da18dc04130> begin[:]
<ast.Raise object at 0x7da18dc06590>
return[name[node]] | keyword[def] identifier[find_root] ( identifier[node] ):
literal[string]
keyword[while] identifier[node] . identifier[type] != identifier[syms] . identifier[file_input] :
identifier[node] = identifier[node] . identifier[parent]
keyword[if] keyword[not] identifier[node] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[node] | def find_root(node):
"""Find the top level namespace."""
# Scamper up to the top level namespace
while node.type != syms.file_input:
node = node.parent
if not node:
raise ValueError('root found before file_input node was found.') # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return node |
def sing(a, b, c=False, name='yetone'):
"""sing a song
hehe
:param a: I'm a
:param b: I'm b
:param c: I'm c
:param name: I'm name
"""
print('test0.sing: <a: {}, b: {}, c: {}> by {}'.format(a, b, c, name)) | def function[sing, parameter[a, b, c, name]]:
constant[sing a song
hehe
:param a: I'm a
:param b: I'm b
:param c: I'm c
:param name: I'm name
]
call[name[print], parameter[call[constant[test0.sing: <a: {}, b: {}, c: {}> by {}].format, parameter[name[a], name[b], name[c], name[name]]]]] | keyword[def] identifier[sing] ( identifier[a] , identifier[b] , identifier[c] = keyword[False] , identifier[name] = literal[string] ):
literal[string]
identifier[print] ( literal[string] . identifier[format] ( identifier[a] , identifier[b] , identifier[c] , identifier[name] )) | def sing(a, b, c=False, name='yetone'):
"""sing a song
hehe
:param a: I'm a
:param b: I'm b
:param c: I'm c
:param name: I'm name
"""
print('test0.sing: <a: {}, b: {}, c: {}> by {}'.format(a, b, c, name)) |
def filters(self):
"""List of filters available for the dataset."""
if self._filters is None:
self._filters, self._attributes = self._fetch_configuration()
return self._filters | def function[filters, parameter[self]]:
constant[List of filters available for the dataset.]
if compare[name[self]._filters is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b2649e10> assign[=] call[name[self]._fetch_configuration, parameter[]]
return[name[self]._filters] | keyword[def] identifier[filters] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_filters] keyword[is] keyword[None] :
identifier[self] . identifier[_filters] , identifier[self] . identifier[_attributes] = identifier[self] . identifier[_fetch_configuration] ()
keyword[return] identifier[self] . identifier[_filters] | def filters(self):
"""List of filters available for the dataset."""
if self._filters is None:
(self._filters, self._attributes) = self._fetch_configuration() # depends on [control=['if'], data=[]]
return self._filters |
def find_conflicts_within_selection_set(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
parent_type: Optional[GraphQLNamedType],
selection_set: SelectionSetNode,
) -> List[Conflict]:
"""Find conflicts within selection set.
Find all conflicts found "within" a selection set, including those found via
spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document.
"""
conflicts: List[Conflict] = []
field_map, fragment_names = get_fields_and_fragment_names(
context, cached_fields_and_fragment_names, parent_type, selection_set
)
# (A) Find all conflicts "within" the fields of this selection set.
# Note: this is the *only place* `collect_conflicts_within` is called.
collect_conflicts_within(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
field_map,
)
if fragment_names:
compared_fragments: Set[str] = set()
# (B) Then collect conflicts between these fields and those represented by each
# spread fragment name found.
for i, fragment_name in enumerate(fragment_names):
collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
compared_fragment_pairs,
False,
field_map,
fragment_name,
)
# (C) Then compare this fragment with all other fragments found in this
# selection set to collect conflicts within fragments spread together.
# This compares each item in the list of fragment names to every other
# item in that same list (except for itself).
for other_fragment_name in fragment_names[i + 1 :]:
collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
False,
fragment_name,
other_fragment_name,
)
return conflicts | def function[find_conflicts_within_selection_set, parameter[context, cached_fields_and_fragment_names, compared_fragment_pairs, parent_type, selection_set]]:
constant[Find conflicts within selection set.
Find all conflicts found "within" a selection set, including those found via
spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document.
]
<ast.AnnAssign object at 0x7da1b22e9f00>
<ast.Tuple object at 0x7da1b22ea6e0> assign[=] call[name[get_fields_and_fragment_names], parameter[name[context], name[cached_fields_and_fragment_names], name[parent_type], name[selection_set]]]
call[name[collect_conflicts_within], parameter[name[context], name[conflicts], name[cached_fields_and_fragment_names], name[compared_fragment_pairs], name[field_map]]]
if name[fragment_names] begin[:]
<ast.AnnAssign object at 0x7da1b22eab60>
for taget[tuple[[<ast.Name object at 0x7da1b22e8880>, <ast.Name object at 0x7da1b22e9e70>]]] in starred[call[name[enumerate], parameter[name[fragment_names]]]] begin[:]
call[name[collect_conflicts_between_fields_and_fragment], parameter[name[context], name[conflicts], name[cached_fields_and_fragment_names], name[compared_fragments], name[compared_fragment_pairs], constant[False], name[field_map], name[fragment_name]]]
for taget[name[other_fragment_name]] in starred[call[name[fragment_names]][<ast.Slice object at 0x7da1b22ebe80>]] begin[:]
call[name[collect_conflicts_between_fragments], parameter[name[context], name[conflicts], name[cached_fields_and_fragment_names], name[compared_fragment_pairs], constant[False], name[fragment_name], name[other_fragment_name]]]
return[name[conflicts]] | keyword[def] identifier[find_conflicts_within_selection_set] (
identifier[context] : identifier[ValidationContext] ,
identifier[cached_fields_and_fragment_names] : identifier[Dict] ,
identifier[compared_fragment_pairs] : literal[string] ,
identifier[parent_type] : identifier[Optional] [ identifier[GraphQLNamedType] ],
identifier[selection_set] : identifier[SelectionSetNode] ,
)-> identifier[List] [ identifier[Conflict] ]:
literal[string]
identifier[conflicts] : identifier[List] [ identifier[Conflict] ]=[]
identifier[field_map] , identifier[fragment_names] = identifier[get_fields_and_fragment_names] (
identifier[context] , identifier[cached_fields_and_fragment_names] , identifier[parent_type] , identifier[selection_set]
)
identifier[collect_conflicts_within] (
identifier[context] ,
identifier[conflicts] ,
identifier[cached_fields_and_fragment_names] ,
identifier[compared_fragment_pairs] ,
identifier[field_map] ,
)
keyword[if] identifier[fragment_names] :
identifier[compared_fragments] : identifier[Set] [ identifier[str] ]= identifier[set] ()
keyword[for] identifier[i] , identifier[fragment_name] keyword[in] identifier[enumerate] ( identifier[fragment_names] ):
identifier[collect_conflicts_between_fields_and_fragment] (
identifier[context] ,
identifier[conflicts] ,
identifier[cached_fields_and_fragment_names] ,
identifier[compared_fragments] ,
identifier[compared_fragment_pairs] ,
keyword[False] ,
identifier[field_map] ,
identifier[fragment_name] ,
)
keyword[for] identifier[other_fragment_name] keyword[in] identifier[fragment_names] [ identifier[i] + literal[int] :]:
identifier[collect_conflicts_between_fragments] (
identifier[context] ,
identifier[conflicts] ,
identifier[cached_fields_and_fragment_names] ,
identifier[compared_fragment_pairs] ,
keyword[False] ,
identifier[fragment_name] ,
identifier[other_fragment_name] ,
)
keyword[return] identifier[conflicts] | def find_conflicts_within_selection_set(context: ValidationContext, cached_fields_and_fragment_names: Dict, compared_fragment_pairs: 'PairSet', parent_type: Optional[GraphQLNamedType], selection_set: SelectionSetNode) -> List[Conflict]:
"""Find conflicts within selection set.
Find all conflicts found "within" a selection set, including those found via
spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document.
"""
conflicts: List[Conflict] = []
(field_map, fragment_names) = get_fields_and_fragment_names(context, cached_fields_and_fragment_names, parent_type, selection_set)
# (A) Find all conflicts "within" the fields of this selection set.
# Note: this is the *only place* `collect_conflicts_within` is called.
collect_conflicts_within(context, conflicts, cached_fields_and_fragment_names, compared_fragment_pairs, field_map)
if fragment_names:
compared_fragments: Set[str] = set()
# (B) Then collect conflicts between these fields and those represented by each
# spread fragment name found.
for (i, fragment_name) in enumerate(fragment_names):
collect_conflicts_between_fields_and_fragment(context, conflicts, cached_fields_and_fragment_names, compared_fragments, compared_fragment_pairs, False, field_map, fragment_name)
# (C) Then compare this fragment with all other fragments found in this
# selection set to collect conflicts within fragments spread together.
# This compares each item in the list of fragment names to every other
# item in that same list (except for itself).
for other_fragment_name in fragment_names[i + 1:]:
collect_conflicts_between_fragments(context, conflicts, cached_fields_and_fragment_names, compared_fragment_pairs, False, fragment_name, other_fragment_name) # depends on [control=['for'], data=['other_fragment_name']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return conflicts |
def _get_base_command(self):
"""Returns the base command plus command-line options.
Handles everything up to and including the classpath. The
positional training parameters are added by the
_input_handler_decorator method.
"""
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
jvm_command = "java"
jvm_args = self._commandline_join(
[self.Parameters[k] for k in self._jvm_parameters])
cp_args = '-cp "%s" %s' % (self._get_jar_fp(), self.TrainingClass)
command_parts = [cd_command, jvm_command, jvm_args, cp_args]
return self._commandline_join(command_parts).strip() | def function[_get_base_command, parameter[self]]:
constant[Returns the base command plus command-line options.
Handles everything up to and including the classpath. The
positional training parameters are added by the
_input_handler_decorator method.
]
variable[cd_command] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da1b0b56c80>, <ast.Call object at 0x7da1b0b57430>, <ast.Constant object at 0x7da1b0b561a0>]]]]
variable[jvm_command] assign[=] constant[java]
variable[jvm_args] assign[=] call[name[self]._commandline_join, parameter[<ast.ListComp object at 0x7da1b0b56380>]]
variable[cp_args] assign[=] binary_operation[constant[-cp "%s" %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b0b56b00>, <ast.Attribute object at 0x7da1b0b578b0>]]]
variable[command_parts] assign[=] list[[<ast.Name object at 0x7da1b0b54fd0>, <ast.Name object at 0x7da1b0b54760>, <ast.Name object at 0x7da1b0b541f0>, <ast.Name object at 0x7da1b0b545b0>]]
return[call[call[name[self]._commandline_join, parameter[name[command_parts]]].strip, parameter[]]] | keyword[def] identifier[_get_base_command] ( identifier[self] ):
literal[string]
identifier[cd_command] = literal[string] . identifier[join] ([ literal[string] , identifier[str] ( identifier[self] . identifier[WorkingDir] ), literal[string] ])
identifier[jvm_command] = literal[string]
identifier[jvm_args] = identifier[self] . identifier[_commandline_join] (
[ identifier[self] . identifier[Parameters] [ identifier[k] ] keyword[for] identifier[k] keyword[in] identifier[self] . identifier[_jvm_parameters] ])
identifier[cp_args] = literal[string] %( identifier[self] . identifier[_get_jar_fp] (), identifier[self] . identifier[TrainingClass] )
identifier[command_parts] =[ identifier[cd_command] , identifier[jvm_command] , identifier[jvm_args] , identifier[cp_args] ]
keyword[return] identifier[self] . identifier[_commandline_join] ( identifier[command_parts] ). identifier[strip] () | def _get_base_command(self):
"""Returns the base command plus command-line options.
Handles everything up to and including the classpath. The
positional training parameters are added by the
_input_handler_decorator method.
"""
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
jvm_command = 'java'
jvm_args = self._commandline_join([self.Parameters[k] for k in self._jvm_parameters])
cp_args = '-cp "%s" %s' % (self._get_jar_fp(), self.TrainingClass)
command_parts = [cd_command, jvm_command, jvm_args, cp_args]
return self._commandline_join(command_parts).strip() |
def filter_items(self, items, navigation_type=None):
"""Filters sitetree item's children if hidden and by navigation type.
NB: We do not apply any filters to sitetree in admin app.
:param list items:
:param str|unicode navigation_type: sitetree, breadcrumbs, menu
:rtype: list
"""
if self.current_app_is_admin():
return items
items_filtered = []
context = self.current_page_context
check_access = self.check_access
for item in items:
if item.hidden:
continue
if not check_access(item, context):
continue
if not getattr(item, 'in%s' % navigation_type, True): # Hidden for current nav type
continue
items_filtered.append(item)
return items_filtered | def function[filter_items, parameter[self, items, navigation_type]]:
constant[Filters sitetree item's children if hidden and by navigation type.
NB: We do not apply any filters to sitetree in admin app.
:param list items:
:param str|unicode navigation_type: sitetree, breadcrumbs, menu
:rtype: list
]
if call[name[self].current_app_is_admin, parameter[]] begin[:]
return[name[items]]
variable[items_filtered] assign[=] list[[]]
variable[context] assign[=] name[self].current_page_context
variable[check_access] assign[=] name[self].check_access
for taget[name[item]] in starred[name[items]] begin[:]
if name[item].hidden begin[:]
continue
if <ast.UnaryOp object at 0x7da20c7c8160> begin[:]
continue
if <ast.UnaryOp object at 0x7da20c7ca440> begin[:]
continue
call[name[items_filtered].append, parameter[name[item]]]
return[name[items_filtered]] | keyword[def] identifier[filter_items] ( identifier[self] , identifier[items] , identifier[navigation_type] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[current_app_is_admin] ():
keyword[return] identifier[items]
identifier[items_filtered] =[]
identifier[context] = identifier[self] . identifier[current_page_context]
identifier[check_access] = identifier[self] . identifier[check_access]
keyword[for] identifier[item] keyword[in] identifier[items] :
keyword[if] identifier[item] . identifier[hidden] :
keyword[continue]
keyword[if] keyword[not] identifier[check_access] ( identifier[item] , identifier[context] ):
keyword[continue]
keyword[if] keyword[not] identifier[getattr] ( identifier[item] , literal[string] % identifier[navigation_type] , keyword[True] ):
keyword[continue]
identifier[items_filtered] . identifier[append] ( identifier[item] )
keyword[return] identifier[items_filtered] | def filter_items(self, items, navigation_type=None):
"""Filters sitetree item's children if hidden and by navigation type.
NB: We do not apply any filters to sitetree in admin app.
:param list items:
:param str|unicode navigation_type: sitetree, breadcrumbs, menu
:rtype: list
"""
if self.current_app_is_admin():
return items # depends on [control=['if'], data=[]]
items_filtered = []
context = self.current_page_context
check_access = self.check_access
for item in items:
if item.hidden:
continue # depends on [control=['if'], data=[]]
if not check_access(item, context):
continue # depends on [control=['if'], data=[]]
if not getattr(item, 'in%s' % navigation_type, True): # Hidden for current nav type
continue # depends on [control=['if'], data=[]]
items_filtered.append(item) # depends on [control=['for'], data=['item']]
return items_filtered |
def get_page(search_text):
"""
formats the entire search result in a table output
"""
lst = search_aikif(search_text)
txt = '<table class="as-table as-table-zebra as-table-horizontal">'
for result in lst:
txt += '<TR><TD>' + result + '</TD></TR>'
txt += '</TABLE>\n\n'
return txt | def function[get_page, parameter[search_text]]:
constant[
formats the entire search result in a table output
]
variable[lst] assign[=] call[name[search_aikif], parameter[name[search_text]]]
variable[txt] assign[=] constant[<table class="as-table as-table-zebra as-table-horizontal">]
for taget[name[result]] in starred[name[lst]] begin[:]
<ast.AugAssign object at 0x7da18f00e6e0>
<ast.AugAssign object at 0x7da18f00df30>
return[name[txt]] | keyword[def] identifier[get_page] ( identifier[search_text] ):
literal[string]
identifier[lst] = identifier[search_aikif] ( identifier[search_text] )
identifier[txt] = literal[string]
keyword[for] identifier[result] keyword[in] identifier[lst] :
identifier[txt] += literal[string] + identifier[result] + literal[string]
identifier[txt] += literal[string]
keyword[return] identifier[txt] | def get_page(search_text):
"""
formats the entire search result in a table output
"""
lst = search_aikif(search_text)
txt = '<table class="as-table as-table-zebra as-table-horizontal">'
for result in lst:
txt += '<TR><TD>' + result + '</TD></TR>' # depends on [control=['for'], data=['result']]
txt += '</TABLE>\n\n'
return txt |
def getAllElementsOfHirarchy(self):
""" returns ALL elements of the complete hirarchy as a flat list
"""
allElements=[]
for element in self.getAllElements():
allElements.append(element)
if isinstance(element, BaseElement):
allElements.extend(element.getAllElementsOfHirarchy())
return allElements | def function[getAllElementsOfHirarchy, parameter[self]]:
constant[ returns ALL elements of the complete hirarchy as a flat list
]
variable[allElements] assign[=] list[[]]
for taget[name[element]] in starred[call[name[self].getAllElements, parameter[]]] begin[:]
call[name[allElements].append, parameter[name[element]]]
if call[name[isinstance], parameter[name[element], name[BaseElement]]] begin[:]
call[name[allElements].extend, parameter[call[name[element].getAllElementsOfHirarchy, parameter[]]]]
return[name[allElements]] | keyword[def] identifier[getAllElementsOfHirarchy] ( identifier[self] ):
literal[string]
identifier[allElements] =[]
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[getAllElements] ():
identifier[allElements] . identifier[append] ( identifier[element] )
keyword[if] identifier[isinstance] ( identifier[element] , identifier[BaseElement] ):
identifier[allElements] . identifier[extend] ( identifier[element] . identifier[getAllElementsOfHirarchy] ())
keyword[return] identifier[allElements] | def getAllElementsOfHirarchy(self):
""" returns ALL elements of the complete hirarchy as a flat list
"""
allElements = []
for element in self.getAllElements():
allElements.append(element)
if isinstance(element, BaseElement):
allElements.extend(element.getAllElementsOfHirarchy()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['element']]
return allElements |
def handle_onchain_secretreveal(
target_state: TargetTransferState,
state_change: ContractReceiveSecretReveal,
channel_state: NettingChannelState,
) -> TransitionResult[TargetTransferState]:
""" Validates and handles a ContractReceiveSecretReveal state change. """
valid_secret = is_valid_secret_reveal(
state_change=state_change,
transfer_secrethash=target_state.transfer.lock.secrethash,
secret=state_change.secret,
)
if valid_secret:
channel.register_onchain_secret(
channel_state=channel_state,
secret=state_change.secret,
secrethash=state_change.secrethash,
secret_reveal_block_number=state_change.block_number,
)
target_state.state = TargetTransferState.ONCHAIN_UNLOCK
target_state.secret = state_change.secret
return TransitionResult(target_state, list()) | def function[handle_onchain_secretreveal, parameter[target_state, state_change, channel_state]]:
constant[ Validates and handles a ContractReceiveSecretReveal state change. ]
variable[valid_secret] assign[=] call[name[is_valid_secret_reveal], parameter[]]
if name[valid_secret] begin[:]
call[name[channel].register_onchain_secret, parameter[]]
name[target_state].state assign[=] name[TargetTransferState].ONCHAIN_UNLOCK
name[target_state].secret assign[=] name[state_change].secret
return[call[name[TransitionResult], parameter[name[target_state], call[name[list], parameter[]]]]] | keyword[def] identifier[handle_onchain_secretreveal] (
identifier[target_state] : identifier[TargetTransferState] ,
identifier[state_change] : identifier[ContractReceiveSecretReveal] ,
identifier[channel_state] : identifier[NettingChannelState] ,
)-> identifier[TransitionResult] [ identifier[TargetTransferState] ]:
literal[string]
identifier[valid_secret] = identifier[is_valid_secret_reveal] (
identifier[state_change] = identifier[state_change] ,
identifier[transfer_secrethash] = identifier[target_state] . identifier[transfer] . identifier[lock] . identifier[secrethash] ,
identifier[secret] = identifier[state_change] . identifier[secret] ,
)
keyword[if] identifier[valid_secret] :
identifier[channel] . identifier[register_onchain_secret] (
identifier[channel_state] = identifier[channel_state] ,
identifier[secret] = identifier[state_change] . identifier[secret] ,
identifier[secrethash] = identifier[state_change] . identifier[secrethash] ,
identifier[secret_reveal_block_number] = identifier[state_change] . identifier[block_number] ,
)
identifier[target_state] . identifier[state] = identifier[TargetTransferState] . identifier[ONCHAIN_UNLOCK]
identifier[target_state] . identifier[secret] = identifier[state_change] . identifier[secret]
keyword[return] identifier[TransitionResult] ( identifier[target_state] , identifier[list] ()) | def handle_onchain_secretreveal(target_state: TargetTransferState, state_change: ContractReceiveSecretReveal, channel_state: NettingChannelState) -> TransitionResult[TargetTransferState]:
""" Validates and handles a ContractReceiveSecretReveal state change. """
valid_secret = is_valid_secret_reveal(state_change=state_change, transfer_secrethash=target_state.transfer.lock.secrethash, secret=state_change.secret)
if valid_secret:
channel.register_onchain_secret(channel_state=channel_state, secret=state_change.secret, secrethash=state_change.secrethash, secret_reveal_block_number=state_change.block_number)
target_state.state = TargetTransferState.ONCHAIN_UNLOCK
target_state.secret = state_change.secret # depends on [control=['if'], data=[]]
return TransitionResult(target_state, list()) |
def sethead(self, ref):
"""Set head to a git ref."""
log.debug('[%s] Setting to ref %s', self.name, ref)
try:
ref = self.repo.rev_parse(ref)
except gitdb.exc.BadObject:
# Probably means we don't have it cached yet.
# So maybe we can fetch it.
ref = self.fetchref(ref)
log.debug('[%s] Setting head to %s', self.name, ref)
self.repo.head.reset(ref, working_tree=True)
log.debug('[%s] Head object: %s', self.name, self.currenthead) | def function[sethead, parameter[self, ref]]:
constant[Set head to a git ref.]
call[name[log].debug, parameter[constant[[%s] Setting to ref %s], name[self].name, name[ref]]]
<ast.Try object at 0x7da207f9b190>
call[name[log].debug, parameter[constant[[%s] Setting head to %s], name[self].name, name[ref]]]
call[name[self].repo.head.reset, parameter[name[ref]]]
call[name[log].debug, parameter[constant[[%s] Head object: %s], name[self].name, name[self].currenthead]] | keyword[def] identifier[sethead] ( identifier[self] , identifier[ref] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[name] , identifier[ref] )
keyword[try] :
identifier[ref] = identifier[self] . identifier[repo] . identifier[rev_parse] ( identifier[ref] )
keyword[except] identifier[gitdb] . identifier[exc] . identifier[BadObject] :
identifier[ref] = identifier[self] . identifier[fetchref] ( identifier[ref] )
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[name] , identifier[ref] )
identifier[self] . identifier[repo] . identifier[head] . identifier[reset] ( identifier[ref] , identifier[working_tree] = keyword[True] )
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[name] , identifier[self] . identifier[currenthead] ) | def sethead(self, ref):
"""Set head to a git ref."""
log.debug('[%s] Setting to ref %s', self.name, ref)
try:
ref = self.repo.rev_parse(ref) # depends on [control=['try'], data=[]]
except gitdb.exc.BadObject:
# Probably means we don't have it cached yet.
# So maybe we can fetch it.
ref = self.fetchref(ref) # depends on [control=['except'], data=[]]
log.debug('[%s] Setting head to %s', self.name, ref)
self.repo.head.reset(ref, working_tree=True)
log.debug('[%s] Head object: %s', self.name, self.currenthead) |
def increment(self, key, value=1):
"""
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
"""
self._store.increment(self.tagged_item_key(key), value) | def function[increment, parameter[self, key, value]]:
constant[
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
]
call[name[self]._store.increment, parameter[call[name[self].tagged_item_key, parameter[name[key]]], name[value]]] | keyword[def] identifier[increment] ( identifier[self] , identifier[key] , identifier[value] = literal[int] ):
literal[string]
identifier[self] . identifier[_store] . identifier[increment] ( identifier[self] . identifier[tagged_item_key] ( identifier[key] ), identifier[value] ) | def increment(self, key, value=1):
"""
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
"""
self._store.increment(self.tagged_item_key(key), value) |
def get_extra(request):
"""Return information about a module / collection that cannot be cached."""
settings = get_current_registry().settings
exports_dirs = settings['exports-directories'].split()
args = request.matchdict
if args['page_ident_hash']:
context_id, context_version = split_ident_hash(args['ident_hash'])
try:
id, version = split_ident_hash(args['page_ident_hash'])
except IdentHashShortId as e:
id = get_uuid(e.id)
version = e.version
except IdentHashMissingVersion as e:
# Ideally we would find the page version
# that is in the book instead of latest
id = e.id
version = get_latest_version(e.id)
else:
context_id = context_version = None
id, version = split_ident_hash(args['ident_hash'])
results = {}
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
results['downloads'] = \
list(get_export_allowable_types(cursor, exports_dirs,
id, version))
results['isLatest'] = is_latest(id, version)
results['latestVersion'] = get_latest_version(id)
results['headVersion'] = get_head_version(id)
results['canPublish'] = get_module_can_publish(cursor, id)
results['state'] = get_state(cursor, id, version)
results['books'] = get_books_containing_page(cursor, id, version,
context_id,
context_version)
formatAuthors(results['books'])
resp = request.response
resp.content_type = 'application/json'
resp.body = json.dumps(results, default=json_serial)
return resp | def function[get_extra, parameter[request]]:
constant[Return information about a module / collection that cannot be cached.]
variable[settings] assign[=] call[name[get_current_registry], parameter[]].settings
variable[exports_dirs] assign[=] call[call[name[settings]][constant[exports-directories]].split, parameter[]]
variable[args] assign[=] name[request].matchdict
if call[name[args]][constant[page_ident_hash]] begin[:]
<ast.Tuple object at 0x7da1b18664d0> assign[=] call[name[split_ident_hash], parameter[call[name[args]][constant[ident_hash]]]]
<ast.Try object at 0x7da1b1864b50>
variable[results] assign[=] dictionary[[], []]
with call[name[db_connect], parameter[]] begin[:]
with call[name[db_connection].cursor, parameter[]] begin[:]
call[name[results]][constant[downloads]] assign[=] call[name[list], parameter[call[name[get_export_allowable_types], parameter[name[cursor], name[exports_dirs], name[id], name[version]]]]]
call[name[results]][constant[isLatest]] assign[=] call[name[is_latest], parameter[name[id], name[version]]]
call[name[results]][constant[latestVersion]] assign[=] call[name[get_latest_version], parameter[name[id]]]
call[name[results]][constant[headVersion]] assign[=] call[name[get_head_version], parameter[name[id]]]
call[name[results]][constant[canPublish]] assign[=] call[name[get_module_can_publish], parameter[name[cursor], name[id]]]
call[name[results]][constant[state]] assign[=] call[name[get_state], parameter[name[cursor], name[id], name[version]]]
call[name[results]][constant[books]] assign[=] call[name[get_books_containing_page], parameter[name[cursor], name[id], name[version], name[context_id], name[context_version]]]
call[name[formatAuthors], parameter[call[name[results]][constant[books]]]]
variable[resp] assign[=] name[request].response
name[resp].content_type assign[=] constant[application/json]
name[resp].body assign[=] call[name[json].dumps, parameter[name[results]]]
return[name[resp]] | keyword[def] identifier[get_extra] ( identifier[request] ):
literal[string]
identifier[settings] = identifier[get_current_registry] (). identifier[settings]
identifier[exports_dirs] = identifier[settings] [ literal[string] ]. identifier[split] ()
identifier[args] = identifier[request] . identifier[matchdict]
keyword[if] identifier[args] [ literal[string] ]:
identifier[context_id] , identifier[context_version] = identifier[split_ident_hash] ( identifier[args] [ literal[string] ])
keyword[try] :
identifier[id] , identifier[version] = identifier[split_ident_hash] ( identifier[args] [ literal[string] ])
keyword[except] identifier[IdentHashShortId] keyword[as] identifier[e] :
identifier[id] = identifier[get_uuid] ( identifier[e] . identifier[id] )
identifier[version] = identifier[e] . identifier[version]
keyword[except] identifier[IdentHashMissingVersion] keyword[as] identifier[e] :
identifier[id] = identifier[e] . identifier[id]
identifier[version] = identifier[get_latest_version] ( identifier[e] . identifier[id] )
keyword[else] :
identifier[context_id] = identifier[context_version] = keyword[None]
identifier[id] , identifier[version] = identifier[split_ident_hash] ( identifier[args] [ literal[string] ])
identifier[results] ={}
keyword[with] identifier[db_connect] () keyword[as] identifier[db_connection] :
keyword[with] identifier[db_connection] . identifier[cursor] () keyword[as] identifier[cursor] :
identifier[results] [ literal[string] ]= identifier[list] ( identifier[get_export_allowable_types] ( identifier[cursor] , identifier[exports_dirs] ,
identifier[id] , identifier[version] ))
identifier[results] [ literal[string] ]= identifier[is_latest] ( identifier[id] , identifier[version] )
identifier[results] [ literal[string] ]= identifier[get_latest_version] ( identifier[id] )
identifier[results] [ literal[string] ]= identifier[get_head_version] ( identifier[id] )
identifier[results] [ literal[string] ]= identifier[get_module_can_publish] ( identifier[cursor] , identifier[id] )
identifier[results] [ literal[string] ]= identifier[get_state] ( identifier[cursor] , identifier[id] , identifier[version] )
identifier[results] [ literal[string] ]= identifier[get_books_containing_page] ( identifier[cursor] , identifier[id] , identifier[version] ,
identifier[context_id] ,
identifier[context_version] )
identifier[formatAuthors] ( identifier[results] [ literal[string] ])
identifier[resp] = identifier[request] . identifier[response]
identifier[resp] . identifier[content_type] = literal[string]
identifier[resp] . identifier[body] = identifier[json] . identifier[dumps] ( identifier[results] , identifier[default] = identifier[json_serial] )
keyword[return] identifier[resp] | def get_extra(request):
"""Return information about a module / collection that cannot be cached."""
settings = get_current_registry().settings
exports_dirs = settings['exports-directories'].split()
args = request.matchdict
if args['page_ident_hash']:
(context_id, context_version) = split_ident_hash(args['ident_hash'])
try:
(id, version) = split_ident_hash(args['page_ident_hash']) # depends on [control=['try'], data=[]]
except IdentHashShortId as e:
id = get_uuid(e.id)
version = e.version # depends on [control=['except'], data=['e']]
except IdentHashMissingVersion as e:
# Ideally we would find the page version
# that is in the book instead of latest
id = e.id
version = get_latest_version(e.id) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
else:
context_id = context_version = None
(id, version) = split_ident_hash(args['ident_hash'])
results = {}
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
results['downloads'] = list(get_export_allowable_types(cursor, exports_dirs, id, version))
results['isLatest'] = is_latest(id, version)
results['latestVersion'] = get_latest_version(id)
results['headVersion'] = get_head_version(id)
results['canPublish'] = get_module_can_publish(cursor, id)
results['state'] = get_state(cursor, id, version)
results['books'] = get_books_containing_page(cursor, id, version, context_id, context_version)
formatAuthors(results['books']) # depends on [control=['with'], data=['cursor']] # depends on [control=['with'], data=['db_connection']]
resp = request.response
resp.content_type = 'application/json'
resp.body = json.dumps(results, default=json_serial)
return resp |
def remove(cls, name, rc_file='~/.odoorpcrc'):
"""Remove the session identified by `name` from the `rc_file` file:
.. doctest::
:options: +SKIP
>>> import odoorpc
>>> odoorpc.ODOO.remove('foo')
True
*Python 2:*
:raise: `ValueError` (if the session does not exist)
:raise: `IOError`
*Python 3:*
:raise: `ValueError` (if the session does not exist)
:raise: `PermissionError`
:raise: `FileNotFoundError`
"""
data = session.get(name, rc_file)
if data.get('type') != cls.__name__:
raise error.InternalError(
"'{0}' session is not of type '{1}'".format(
name, cls.__name__))
return session.remove(name, rc_file) | def function[remove, parameter[cls, name, rc_file]]:
constant[Remove the session identified by `name` from the `rc_file` file:
.. doctest::
:options: +SKIP
>>> import odoorpc
>>> odoorpc.ODOO.remove('foo')
True
*Python 2:*
:raise: `ValueError` (if the session does not exist)
:raise: `IOError`
*Python 3:*
:raise: `ValueError` (if the session does not exist)
:raise: `PermissionError`
:raise: `FileNotFoundError`
]
variable[data] assign[=] call[name[session].get, parameter[name[name], name[rc_file]]]
if compare[call[name[data].get, parameter[constant[type]]] not_equal[!=] name[cls].__name__] begin[:]
<ast.Raise object at 0x7da2044c0f10>
return[call[name[session].remove, parameter[name[name], name[rc_file]]]] | keyword[def] identifier[remove] ( identifier[cls] , identifier[name] , identifier[rc_file] = literal[string] ):
literal[string]
identifier[data] = identifier[session] . identifier[get] ( identifier[name] , identifier[rc_file] )
keyword[if] identifier[data] . identifier[get] ( literal[string] )!= identifier[cls] . identifier[__name__] :
keyword[raise] identifier[error] . identifier[InternalError] (
literal[string] . identifier[format] (
identifier[name] , identifier[cls] . identifier[__name__] ))
keyword[return] identifier[session] . identifier[remove] ( identifier[name] , identifier[rc_file] ) | def remove(cls, name, rc_file='~/.odoorpcrc'):
"""Remove the session identified by `name` from the `rc_file` file:
.. doctest::
:options: +SKIP
>>> import odoorpc
>>> odoorpc.ODOO.remove('foo')
True
*Python 2:*
:raise: `ValueError` (if the session does not exist)
:raise: `IOError`
*Python 3:*
:raise: `ValueError` (if the session does not exist)
:raise: `PermissionError`
:raise: `FileNotFoundError`
"""
data = session.get(name, rc_file)
if data.get('type') != cls.__name__:
raise error.InternalError("'{0}' session is not of type '{1}'".format(name, cls.__name__)) # depends on [control=['if'], data=[]]
return session.remove(name, rc_file) |
def update(self, d):
"""Works like regular update, but only actually updates when the new
value and the old value differ. This is necessary to prevent
certain infinite loops.
:arg d: a dictionary
"""
for (k, v) in d.items():
if k not in self or self[k] != v:
self[k] = v | def function[update, parameter[self, d]]:
constant[Works like regular update, but only actually updates when the new
value and the old value differ. This is necessary to prevent
certain infinite loops.
:arg d: a dictionary
]
for taget[tuple[[<ast.Name object at 0x7da1b0c52e30>, <ast.Name object at 0x7da1b0c53670>]]] in starred[call[name[d].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b0c52350> begin[:]
call[name[self]][name[k]] assign[=] name[v] | keyword[def] identifier[update] ( identifier[self] , identifier[d] ):
literal[string]
keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[d] . identifier[items] ():
keyword[if] identifier[k] keyword[not] keyword[in] identifier[self] keyword[or] identifier[self] [ identifier[k] ]!= identifier[v] :
identifier[self] [ identifier[k] ]= identifier[v] | def update(self, d):
"""Works like regular update, but only actually updates when the new
value and the old value differ. This is necessary to prevent
certain infinite loops.
:arg d: a dictionary
"""
for (k, v) in d.items():
if k not in self or self[k] != v:
self[k] = v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def __store_recent_file(self, file):
"""
Stores given recent file into the settings.
:param file: File to store.
:type file: unicode
"""
LOGGER.debug("> Storing '{0}' file in recent files.".format(file))
recentFiles = [foundations.strings.to_string(recentFile)
for recentFile in self.__settings.get_key(self.__settings_section, "recentFiles").toStringList()
if foundations.common.path_exists(recentFile)]
if not recentFiles:
recentFiles = []
if file in recentFiles:
recentFiles.pop(recentFiles.index(file))
recentFiles.insert(0, file)
del recentFiles[self.__maximum_recent_files:]
recentFiles = self.__settings.set_key(self.__settings_section, "recentFiles", recentFiles)
self.recent_files_changed.emit() | def function[__store_recent_file, parameter[self, file]]:
constant[
Stores given recent file into the settings.
:param file: File to store.
:type file: unicode
]
call[name[LOGGER].debug, parameter[call[constant[> Storing '{0}' file in recent files.].format, parameter[name[file]]]]]
variable[recentFiles] assign[=] <ast.ListComp object at 0x7da1b0913f10>
if <ast.UnaryOp object at 0x7da1b0966b60> begin[:]
variable[recentFiles] assign[=] list[[]]
if compare[name[file] in name[recentFiles]] begin[:]
call[name[recentFiles].pop, parameter[call[name[recentFiles].index, parameter[name[file]]]]]
call[name[recentFiles].insert, parameter[constant[0], name[file]]]
<ast.Delete object at 0x7da1b0966860>
variable[recentFiles] assign[=] call[name[self].__settings.set_key, parameter[name[self].__settings_section, constant[recentFiles], name[recentFiles]]]
call[name[self].recent_files_changed.emit, parameter[]] | keyword[def] identifier[__store_recent_file] ( identifier[self] , identifier[file] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[file] ))
identifier[recentFiles] =[ identifier[foundations] . identifier[strings] . identifier[to_string] ( identifier[recentFile] )
keyword[for] identifier[recentFile] keyword[in] identifier[self] . identifier[__settings] . identifier[get_key] ( identifier[self] . identifier[__settings_section] , literal[string] ). identifier[toStringList] ()
keyword[if] identifier[foundations] . identifier[common] . identifier[path_exists] ( identifier[recentFile] )]
keyword[if] keyword[not] identifier[recentFiles] :
identifier[recentFiles] =[]
keyword[if] identifier[file] keyword[in] identifier[recentFiles] :
identifier[recentFiles] . identifier[pop] ( identifier[recentFiles] . identifier[index] ( identifier[file] ))
identifier[recentFiles] . identifier[insert] ( literal[int] , identifier[file] )
keyword[del] identifier[recentFiles] [ identifier[self] . identifier[__maximum_recent_files] :]
identifier[recentFiles] = identifier[self] . identifier[__settings] . identifier[set_key] ( identifier[self] . identifier[__settings_section] , literal[string] , identifier[recentFiles] )
identifier[self] . identifier[recent_files_changed] . identifier[emit] () | def __store_recent_file(self, file):
"""
Stores given recent file into the settings.
:param file: File to store.
:type file: unicode
"""
LOGGER.debug("> Storing '{0}' file in recent files.".format(file))
recentFiles = [foundations.strings.to_string(recentFile) for recentFile in self.__settings.get_key(self.__settings_section, 'recentFiles').toStringList() if foundations.common.path_exists(recentFile)]
if not recentFiles:
recentFiles = [] # depends on [control=['if'], data=[]]
if file in recentFiles:
recentFiles.pop(recentFiles.index(file)) # depends on [control=['if'], data=['file', 'recentFiles']]
recentFiles.insert(0, file)
del recentFiles[self.__maximum_recent_files:]
recentFiles = self.__settings.set_key(self.__settings_section, 'recentFiles', recentFiles)
self.recent_files_changed.emit() |
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_update(self._filter, self._doc, False, self._upsert,
collation=self._collation) | def function[_add_to_bulk, parameter[self, bulkobj]]:
constant[Add this operation to the _Bulk instance `bulkobj`.]
call[name[bulkobj].add_update, parameter[name[self]._filter, name[self]._doc, constant[False], name[self]._upsert]] | keyword[def] identifier[_add_to_bulk] ( identifier[self] , identifier[bulkobj] ):
literal[string]
identifier[bulkobj] . identifier[add_update] ( identifier[self] . identifier[_filter] , identifier[self] . identifier[_doc] , keyword[False] , identifier[self] . identifier[_upsert] ,
identifier[collation] = identifier[self] . identifier[_collation] ) | def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_update(self._filter, self._doc, False, self._upsert, collation=self._collation) |
def handle_version(self, message_header, message):
"""
This method will handle the Version message and
will send a VerAck message when it receives the
Version message.
:param message_header: The Version message header
:param message: The Version message
"""
log.debug("handle version")
verack = VerAck()
log.debug("send VerAck")
self.send_message(verack)
self.verack = True
# begin!
self.send_getheaders( self.first_block_hash ) | def function[handle_version, parameter[self, message_header, message]]:
constant[
This method will handle the Version message and
will send a VerAck message when it receives the
Version message.
:param message_header: The Version message header
:param message: The Version message
]
call[name[log].debug, parameter[constant[handle version]]]
variable[verack] assign[=] call[name[VerAck], parameter[]]
call[name[log].debug, parameter[constant[send VerAck]]]
call[name[self].send_message, parameter[name[verack]]]
name[self].verack assign[=] constant[True]
call[name[self].send_getheaders, parameter[name[self].first_block_hash]] | keyword[def] identifier[handle_version] ( identifier[self] , identifier[message_header] , identifier[message] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] )
identifier[verack] = identifier[VerAck] ()
identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[send_message] ( identifier[verack] )
identifier[self] . identifier[verack] = keyword[True]
identifier[self] . identifier[send_getheaders] ( identifier[self] . identifier[first_block_hash] ) | def handle_version(self, message_header, message):
"""
This method will handle the Version message and
will send a VerAck message when it receives the
Version message.
:param message_header: The Version message header
:param message: The Version message
"""
log.debug('handle version')
verack = VerAck()
log.debug('send VerAck')
self.send_message(verack)
self.verack = True
# begin!
self.send_getheaders(self.first_block_hash) |
def dust2bed(args):
"""
%prog dust2bed fastafile
Use dustmasker to find low-complexity regions (LCRs) in the genome.
"""
from jcvi.formats.base import read_block
p = OptionParser(dust2bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
interval = fastafile + ".iv"
if need_update(fastafile, interval):
cmd = "dustmasker -in {0}".format(fastafile)
sh(cmd, outfile=interval)
fp = open(interval)
bedfile = fastafile.rsplit(".", 1)[0] + ".dust.bed"
fw = must_open(bedfile, "w")
nlines = 0
nbases = 0
for header, block in read_block(fp, ">"):
header = header.strip(">")
for b in block:
start, end = b.split(" - ")
start, end = int(start), int(end)
print("\t".join(str(x) for x in (header, start, end)), file=fw)
nlines += 1
nbases += end - start
logging.debug("A total of {0} DUST intervals ({1} bp) exported to `{2}`".\
format(nlines, nbases, bedfile)) | def function[dust2bed, parameter[args]]:
constant[
%prog dust2bed fastafile
Use dustmasker to find low-complexity regions (LCRs) in the genome.
]
from relative_module[jcvi.formats.base] import module[read_block]
variable[p] assign[=] call[name[OptionParser], parameter[name[dust2bed].__doc__]]
<ast.Tuple object at 0x7da18f723730> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18f7227d0>]]
<ast.Tuple object at 0x7da18f722bf0> assign[=] name[args]
variable[interval] assign[=] binary_operation[name[fastafile] + constant[.iv]]
if call[name[need_update], parameter[name[fastafile], name[interval]]] begin[:]
variable[cmd] assign[=] call[constant[dustmasker -in {0}].format, parameter[name[fastafile]]]
call[name[sh], parameter[name[cmd]]]
variable[fp] assign[=] call[name[open], parameter[name[interval]]]
variable[bedfile] assign[=] binary_operation[call[call[name[fastafile].rsplit, parameter[constant[.], constant[1]]]][constant[0]] + constant[.dust.bed]]
variable[fw] assign[=] call[name[must_open], parameter[name[bedfile], constant[w]]]
variable[nlines] assign[=] constant[0]
variable[nbases] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da18f7225c0>, <ast.Name object at 0x7da18f722d10>]]] in starred[call[name[read_block], parameter[name[fp], constant[>]]]] begin[:]
variable[header] assign[=] call[name[header].strip, parameter[constant[>]]]
for taget[name[b]] in starred[name[block]] begin[:]
<ast.Tuple object at 0x7da18f721120> assign[=] call[name[b].split, parameter[constant[ - ]]]
<ast.Tuple object at 0x7da18f58cf40> assign[=] tuple[[<ast.Call object at 0x7da18f58e230>, <ast.Call object at 0x7da18f58e7a0>]]
call[name[print], parameter[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da18f58dff0>]]]]
<ast.AugAssign object at 0x7da18f58eec0>
<ast.AugAssign object at 0x7da18f58ea70>
call[name[logging].debug, parameter[call[constant[A total of {0} DUST intervals ({1} bp) exported to `{2}`].format, parameter[name[nlines], name[nbases], name[bedfile]]]]] | keyword[def] identifier[dust2bed] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[formats] . identifier[base] keyword[import] identifier[read_block]
identifier[p] = identifier[OptionParser] ( identifier[dust2bed] . identifier[__doc__] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[fastafile] ,= identifier[args]
identifier[interval] = identifier[fastafile] + literal[string]
keyword[if] identifier[need_update] ( identifier[fastafile] , identifier[interval] ):
identifier[cmd] = literal[string] . identifier[format] ( identifier[fastafile] )
identifier[sh] ( identifier[cmd] , identifier[outfile] = identifier[interval] )
identifier[fp] = identifier[open] ( identifier[interval] )
identifier[bedfile] = identifier[fastafile] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]+ literal[string]
identifier[fw] = identifier[must_open] ( identifier[bedfile] , literal[string] )
identifier[nlines] = literal[int]
identifier[nbases] = literal[int]
keyword[for] identifier[header] , identifier[block] keyword[in] identifier[read_block] ( identifier[fp] , literal[string] ):
identifier[header] = identifier[header] . identifier[strip] ( literal[string] )
keyword[for] identifier[b] keyword[in] identifier[block] :
identifier[start] , identifier[end] = identifier[b] . identifier[split] ( literal[string] )
identifier[start] , identifier[end] = identifier[int] ( identifier[start] ), identifier[int] ( identifier[end] )
identifier[print] ( literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[header] , identifier[start] , identifier[end] )), identifier[file] = identifier[fw] )
identifier[nlines] += literal[int]
identifier[nbases] += identifier[end] - identifier[start]
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[nlines] , identifier[nbases] , identifier[bedfile] )) | def dust2bed(args):
"""
%prog dust2bed fastafile
Use dustmasker to find low-complexity regions (LCRs) in the genome.
"""
from jcvi.formats.base import read_block
p = OptionParser(dust2bed.__doc__)
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(fastafile,) = args
interval = fastafile + '.iv'
if need_update(fastafile, interval):
cmd = 'dustmasker -in {0}'.format(fastafile)
sh(cmd, outfile=interval) # depends on [control=['if'], data=[]]
fp = open(interval)
bedfile = fastafile.rsplit('.', 1)[0] + '.dust.bed'
fw = must_open(bedfile, 'w')
nlines = 0
nbases = 0
for (header, block) in read_block(fp, '>'):
header = header.strip('>')
for b in block:
(start, end) = b.split(' - ')
(start, end) = (int(start), int(end))
print('\t'.join((str(x) for x in (header, start, end))), file=fw)
nlines += 1
nbases += end - start # depends on [control=['for'], data=['b']] # depends on [control=['for'], data=[]]
logging.debug('A total of {0} DUST intervals ({1} bp) exported to `{2}`'.format(nlines, nbases, bedfile)) |
def arc_data(self):
"""Return the map from filenames to lists of line number pairs."""
return dict(
[(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)]
) | def function[arc_data, parameter[self]]:
constant[Return the map from filenames to lists of line number pairs.]
return[call[name[dict], parameter[<ast.ListComp object at 0x7da2041db850>]]] | keyword[def] identifier[arc_data] ( identifier[self] ):
literal[string]
keyword[return] identifier[dict] (
[( identifier[f] , identifier[sorted] ( identifier[amap] . identifier[keys] ())) keyword[for] identifier[f] , identifier[amap] keyword[in] identifier[iitems] ( identifier[self] . identifier[arcs] )]
) | def arc_data(self):
"""Return the map from filenames to lists of line number pairs."""
return dict([(f, sorted(amap.keys())) for (f, amap) in iitems(self.arcs)]) |
def _set_sub_prop(container, keys, value):
"""Set a nested value in a dictionary.
Arguments:
container (dict):
A dictionary which may contain other dictionaries as values.
keys (iterable):
A sequence of keys to attempt to set the value for. Each item in
the sequence represents a deeper nesting. The first key is for
the top level. If there is a dictionary there, the second key
attempts to get the value within that, and so on.
value (object): Value to set within the container.
Examples:
Set a top-level value (equivalent to ``container['key'] = 'value'``).
>>> container = {}
>>> _set_sub_prop(container, ['key'], 'value')
>>> container
{'key': 'value'}
Set a nested value.
>>> container = {}
>>> _set_sub_prop(container, ['key', 'subkey'], 'value')
>>> container
{'key': {'subkey': 'value'}}
Replace a nested value.
>>> container = {'key': {'subkey': 'prev'}}
>>> _set_sub_prop(container, ['key', 'subkey'], 'new')
>>> container
{'key': {'subkey': 'new'}}
"""
sub_val = container
for key in keys[:-1]:
if key not in sub_val:
sub_val[key] = {}
sub_val = sub_val[key]
sub_val[keys[-1]] = value | def function[_set_sub_prop, parameter[container, keys, value]]:
constant[Set a nested value in a dictionary.
Arguments:
container (dict):
A dictionary which may contain other dictionaries as values.
keys (iterable):
A sequence of keys to attempt to set the value for. Each item in
the sequence represents a deeper nesting. The first key is for
the top level. If there is a dictionary there, the second key
attempts to get the value within that, and so on.
value (object): Value to set within the container.
Examples:
Set a top-level value (equivalent to ``container['key'] = 'value'``).
>>> container = {}
>>> _set_sub_prop(container, ['key'], 'value')
>>> container
{'key': 'value'}
Set a nested value.
>>> container = {}
>>> _set_sub_prop(container, ['key', 'subkey'], 'value')
>>> container
{'key': {'subkey': 'value'}}
Replace a nested value.
>>> container = {'key': {'subkey': 'prev'}}
>>> _set_sub_prop(container, ['key', 'subkey'], 'new')
>>> container
{'key': {'subkey': 'new'}}
]
variable[sub_val] assign[=] name[container]
for taget[name[key]] in starred[call[name[keys]][<ast.Slice object at 0x7da20e956d40>]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[sub_val]] begin[:]
call[name[sub_val]][name[key]] assign[=] dictionary[[], []]
variable[sub_val] assign[=] call[name[sub_val]][name[key]]
call[name[sub_val]][call[name[keys]][<ast.UnaryOp object at 0x7da20e9575e0>]] assign[=] name[value] | keyword[def] identifier[_set_sub_prop] ( identifier[container] , identifier[keys] , identifier[value] ):
literal[string]
identifier[sub_val] = identifier[container]
keyword[for] identifier[key] keyword[in] identifier[keys] [:- literal[int] ]:
keyword[if] identifier[key] keyword[not] keyword[in] identifier[sub_val] :
identifier[sub_val] [ identifier[key] ]={}
identifier[sub_val] = identifier[sub_val] [ identifier[key] ]
identifier[sub_val] [ identifier[keys] [- literal[int] ]]= identifier[value] | def _set_sub_prop(container, keys, value):
"""Set a nested value in a dictionary.
Arguments:
container (dict):
A dictionary which may contain other dictionaries as values.
keys (iterable):
A sequence of keys to attempt to set the value for. Each item in
the sequence represents a deeper nesting. The first key is for
the top level. If there is a dictionary there, the second key
attempts to get the value within that, and so on.
value (object): Value to set within the container.
Examples:
Set a top-level value (equivalent to ``container['key'] = 'value'``).
>>> container = {}
>>> _set_sub_prop(container, ['key'], 'value')
>>> container
{'key': 'value'}
Set a nested value.
>>> container = {}
>>> _set_sub_prop(container, ['key', 'subkey'], 'value')
>>> container
{'key': {'subkey': 'value'}}
Replace a nested value.
>>> container = {'key': {'subkey': 'prev'}}
>>> _set_sub_prop(container, ['key', 'subkey'], 'new')
>>> container
{'key': {'subkey': 'new'}}
"""
sub_val = container
for key in keys[:-1]:
if key not in sub_val:
sub_val[key] = {} # depends on [control=['if'], data=['key', 'sub_val']]
sub_val = sub_val[key] # depends on [control=['for'], data=['key']]
sub_val[keys[-1]] = value |
def update(self):
"""Update the IRQ stats."""
# Init new stats
stats = self.get_init_value()
# IRQ plugin only available on GNU/Linux
if not LINUX:
return self.stats
if self.input_method == 'local':
# Grab the stats
stats = self.irq.get()
elif self.input_method == 'snmp':
# not available
pass
# Get the TOP 5 (by rate/s)
stats = sorted(stats,
key=operator.itemgetter('irq_rate'),
reverse=True)[:5]
# Update the stats
self.stats = stats
return self.stats | def function[update, parameter[self]]:
constant[Update the IRQ stats.]
variable[stats] assign[=] call[name[self].get_init_value, parameter[]]
if <ast.UnaryOp object at 0x7da18c4cef50> begin[:]
return[name[self].stats]
if compare[name[self].input_method equal[==] constant[local]] begin[:]
variable[stats] assign[=] call[name[self].irq.get, parameter[]]
variable[stats] assign[=] call[call[name[sorted], parameter[name[stats]]]][<ast.Slice object at 0x7da18c4cda50>]
name[self].stats assign[=] name[stats]
return[name[self].stats] | keyword[def] identifier[update] ( identifier[self] ):
literal[string]
identifier[stats] = identifier[self] . identifier[get_init_value] ()
keyword[if] keyword[not] identifier[LINUX] :
keyword[return] identifier[self] . identifier[stats]
keyword[if] identifier[self] . identifier[input_method] == literal[string] :
identifier[stats] = identifier[self] . identifier[irq] . identifier[get] ()
keyword[elif] identifier[self] . identifier[input_method] == literal[string] :
keyword[pass]
identifier[stats] = identifier[sorted] ( identifier[stats] ,
identifier[key] = identifier[operator] . identifier[itemgetter] ( literal[string] ),
identifier[reverse] = keyword[True] )[: literal[int] ]
identifier[self] . identifier[stats] = identifier[stats]
keyword[return] identifier[self] . identifier[stats] | def update(self):
"""Update the IRQ stats."""
# Init new stats
stats = self.get_init_value()
# IRQ plugin only available on GNU/Linux
if not LINUX:
return self.stats # depends on [control=['if'], data=[]]
if self.input_method == 'local':
# Grab the stats
stats = self.irq.get() # depends on [control=['if'], data=[]]
elif self.input_method == 'snmp':
# not available
pass # depends on [control=['if'], data=[]]
# Get the TOP 5 (by rate/s)
stats = sorted(stats, key=operator.itemgetter('irq_rate'), reverse=True)[:5]
# Update the stats
self.stats = stats
return self.stats |
def add_finder_patterns(matrix, is_micro):
"""\
Adds the finder pattern(s) to the matrix.
QR Codes get three finder patterns, Micro QR Codes have just one finder
pattern.
ISO/IEC 18004:2015(E) -- 6.3.3 Finder pattern (page 16)
ISO/IEC 18004:2015(E) -- 6.3.4 Separator (page 17)
:param matrix: The matrix.
:param bool is_micro: Indicates if the matrix represents a Micro QR Code.
"""
add_finder_pattern(matrix, 0, 0) # Upper left corner
if not is_micro:
add_finder_pattern(matrix, 0, -7) # Upper right corner
add_finder_pattern(matrix, -7, 0) | def function[add_finder_patterns, parameter[matrix, is_micro]]:
constant[ Adds the finder pattern(s) to the matrix.
QR Codes get three finder patterns, Micro QR Codes have just one finder
pattern.
ISO/IEC 18004:2015(E) -- 6.3.3 Finder pattern (page 16)
ISO/IEC 18004:2015(E) -- 6.3.4 Separator (page 17)
:param matrix: The matrix.
:param bool is_micro: Indicates if the matrix represents a Micro QR Code.
]
call[name[add_finder_pattern], parameter[name[matrix], constant[0], constant[0]]]
if <ast.UnaryOp object at 0x7da1b0b29840> begin[:]
call[name[add_finder_pattern], parameter[name[matrix], constant[0], <ast.UnaryOp object at 0x7da2047eb040>]]
call[name[add_finder_pattern], parameter[name[matrix], <ast.UnaryOp object at 0x7da20e962cb0>, constant[0]]] | keyword[def] identifier[add_finder_patterns] ( identifier[matrix] , identifier[is_micro] ):
literal[string]
identifier[add_finder_pattern] ( identifier[matrix] , literal[int] , literal[int] )
keyword[if] keyword[not] identifier[is_micro] :
identifier[add_finder_pattern] ( identifier[matrix] , literal[int] ,- literal[int] )
identifier[add_finder_pattern] ( identifier[matrix] ,- literal[int] , literal[int] ) | def add_finder_patterns(matrix, is_micro):
""" Adds the finder pattern(s) to the matrix.
QR Codes get three finder patterns, Micro QR Codes have just one finder
pattern.
ISO/IEC 18004:2015(E) -- 6.3.3 Finder pattern (page 16)
ISO/IEC 18004:2015(E) -- 6.3.4 Separator (page 17)
:param matrix: The matrix.
:param bool is_micro: Indicates if the matrix represents a Micro QR Code.
"""
add_finder_pattern(matrix, 0, 0) # Upper left corner
if not is_micro:
add_finder_pattern(matrix, 0, -7) # Upper right corner
add_finder_pattern(matrix, -7, 0) # depends on [control=['if'], data=[]] |
def runserver(hostname, port, no_reloader, debugger, no_evalex, threaded, processes):
"""Start a new development server."""
app = make_app()
reloader = not no_reloader
evalex = not no_evalex
run_simple(
hostname,
port,
app,
use_reloader=reloader,
use_debugger=debugger,
use_evalex=evalex,
threaded=threaded,
processes=processes,
) | def function[runserver, parameter[hostname, port, no_reloader, debugger, no_evalex, threaded, processes]]:
constant[Start a new development server.]
variable[app] assign[=] call[name[make_app], parameter[]]
variable[reloader] assign[=] <ast.UnaryOp object at 0x7da204620100>
variable[evalex] assign[=] <ast.UnaryOp object at 0x7da204623760>
call[name[run_simple], parameter[name[hostname], name[port], name[app]]] | keyword[def] identifier[runserver] ( identifier[hostname] , identifier[port] , identifier[no_reloader] , identifier[debugger] , identifier[no_evalex] , identifier[threaded] , identifier[processes] ):
literal[string]
identifier[app] = identifier[make_app] ()
identifier[reloader] = keyword[not] identifier[no_reloader]
identifier[evalex] = keyword[not] identifier[no_evalex]
identifier[run_simple] (
identifier[hostname] ,
identifier[port] ,
identifier[app] ,
identifier[use_reloader] = identifier[reloader] ,
identifier[use_debugger] = identifier[debugger] ,
identifier[use_evalex] = identifier[evalex] ,
identifier[threaded] = identifier[threaded] ,
identifier[processes] = identifier[processes] ,
) | def runserver(hostname, port, no_reloader, debugger, no_evalex, threaded, processes):
"""Start a new development server."""
app = make_app()
reloader = not no_reloader
evalex = not no_evalex
run_simple(hostname, port, app, use_reloader=reloader, use_debugger=debugger, use_evalex=evalex, threaded=threaded, processes=processes) |
def cuid(self):
"""
Generate a full-length cuid as a string.
"""
# start with a hardcoded lowercase c
identifier = "c"
# add a timestamp in milliseconds since the epoch, in base 36
millis = int(time.time() * 1000)
identifier += _to_base36(millis)
# use a counter to ensure no collisions on the same machine
# in the same millisecond
count = _pad(_to_base36(self.counter), BLOCK_SIZE)
identifier += count
# add the process fingerprint
identifier += self.fingerprint
# add a couple of random blocks
identifier += _random_block()
identifier += _random_block()
return identifier | def function[cuid, parameter[self]]:
constant[
Generate a full-length cuid as a string.
]
variable[identifier] assign[=] constant[c]
variable[millis] assign[=] call[name[int], parameter[binary_operation[call[name[time].time, parameter[]] * constant[1000]]]]
<ast.AugAssign object at 0x7da204565210>
variable[count] assign[=] call[name[_pad], parameter[call[name[_to_base36], parameter[name[self].counter]], name[BLOCK_SIZE]]]
<ast.AugAssign object at 0x7da2045669e0>
<ast.AugAssign object at 0x7da204564220>
<ast.AugAssign object at 0x7da204564340>
<ast.AugAssign object at 0x7da204567fd0>
return[name[identifier]] | keyword[def] identifier[cuid] ( identifier[self] ):
literal[string]
identifier[identifier] = literal[string]
identifier[millis] = identifier[int] ( identifier[time] . identifier[time] ()* literal[int] )
identifier[identifier] += identifier[_to_base36] ( identifier[millis] )
identifier[count] = identifier[_pad] ( identifier[_to_base36] ( identifier[self] . identifier[counter] ), identifier[BLOCK_SIZE] )
identifier[identifier] += identifier[count]
identifier[identifier] += identifier[self] . identifier[fingerprint]
identifier[identifier] += identifier[_random_block] ()
identifier[identifier] += identifier[_random_block] ()
keyword[return] identifier[identifier] | def cuid(self):
"""
Generate a full-length cuid as a string.
"""
# start with a hardcoded lowercase c
identifier = 'c'
# add a timestamp in milliseconds since the epoch, in base 36
millis = int(time.time() * 1000)
identifier += _to_base36(millis)
# use a counter to ensure no collisions on the same machine
# in the same millisecond
count = _pad(_to_base36(self.counter), BLOCK_SIZE)
identifier += count
# add the process fingerprint
identifier += self.fingerprint
# add a couple of random blocks
identifier += _random_block()
identifier += _random_block()
return identifier |
def sigma_points(self, x, P):
"""
Computes the implex sigma points for an unscented Kalman filter
given the mean (x) and covariance(P) of the filter.
Returns tuple of the sigma points and weights.
Works with both scalar and array inputs:
sigma_points (5, 9, 2) # mean 5, covariance 9
sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I
Parameters
----------
x : An array-like object of the means of length n
Can be a scalar if 1D.
examples: 1, [1,2], np.array([1,2])
P : scalar, or np.array
Covariance of the filter. If scalar, is treated as eye(n)*P.
Returns
-------
sigmas : np.array, of size (n, n+1)
Two dimensional array of sigma points. Each column contains all of
the sigmas for one dimension in the problem space.
Ordered by Xi_0, Xi_{1..n}
"""
if self.n != np.size(x):
raise ValueError("expected size(x) {}, but size is {}".format(
self.n, np.size(x)))
n = self.n
if np.isscalar(x):
x = np.asarray([x])
x = x.reshape(-1, 1)
if np.isscalar(P):
P = np.eye(n) * P
else:
P = np.atleast_2d(P)
U = self.sqrt(P)
lambda_ = n / (n + 1)
Istar = np.array([[-1/np.sqrt(2*lambda_), 1/np.sqrt(2*lambda_)]])
for d in range(2, n+1):
row = np.ones((1, Istar.shape[1] + 1)) * 1. / np.sqrt(lambda_*d*(d + 1))
row[0, -1] = -d / np.sqrt(lambda_ * d * (d + 1))
Istar = np.r_[np.c_[Istar, np.zeros((Istar.shape[0]))], row]
I = np.sqrt(n)*Istar
scaled_unitary = U.dot(I)
sigmas = self.subtract(x, -scaled_unitary)
return sigmas.T | def function[sigma_points, parameter[self, x, P]]:
constant[
Computes the implex sigma points for an unscented Kalman filter
given the mean (x) and covariance(P) of the filter.
Returns tuple of the sigma points and weights.
Works with both scalar and array inputs:
sigma_points (5, 9, 2) # mean 5, covariance 9
sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I
Parameters
----------
x : An array-like object of the means of length n
Can be a scalar if 1D.
examples: 1, [1,2], np.array([1,2])
P : scalar, or np.array
Covariance of the filter. If scalar, is treated as eye(n)*P.
Returns
-------
sigmas : np.array, of size (n, n+1)
Two dimensional array of sigma points. Each column contains all of
the sigmas for one dimension in the problem space.
Ordered by Xi_0, Xi_{1..n}
]
if compare[name[self].n not_equal[!=] call[name[np].size, parameter[name[x]]]] begin[:]
<ast.Raise object at 0x7da1b26aca60>
variable[n] assign[=] name[self].n
if call[name[np].isscalar, parameter[name[x]]] begin[:]
variable[x] assign[=] call[name[np].asarray, parameter[list[[<ast.Name object at 0x7da1b26ac070>]]]]
variable[x] assign[=] call[name[x].reshape, parameter[<ast.UnaryOp object at 0x7da1b26ae9e0>, constant[1]]]
if call[name[np].isscalar, parameter[name[P]]] begin[:]
variable[P] assign[=] binary_operation[call[name[np].eye, parameter[name[n]]] * name[P]]
variable[U] assign[=] call[name[self].sqrt, parameter[name[P]]]
variable[lambda_] assign[=] binary_operation[name[n] / binary_operation[name[n] + constant[1]]]
variable[Istar] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da1b26adea0>]]]]
for taget[name[d]] in starred[call[name[range], parameter[constant[2], binary_operation[name[n] + constant[1]]]]] begin[:]
variable[row] assign[=] binary_operation[binary_operation[call[name[np].ones, parameter[tuple[[<ast.Constant object at 0x7da1b26ae170>, <ast.BinOp object at 0x7da1b26ad4e0>]]]] * constant[1.0]] / call[name[np].sqrt, parameter[binary_operation[binary_operation[name[lambda_] * name[d]] * binary_operation[name[d] + constant[1]]]]]]
call[name[row]][tuple[[<ast.Constant object at 0x7da1b26ade10>, <ast.UnaryOp object at 0x7da1b26afca0>]]] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b26ae050> / call[name[np].sqrt, parameter[binary_operation[binary_operation[name[lambda_] * name[d]] * binary_operation[name[d] + constant[1]]]]]]
variable[Istar] assign[=] call[name[np].r_][tuple[[<ast.Subscript object at 0x7da1b26ae8c0>, <ast.Name object at 0x7da1b26ac640>]]]
variable[I] assign[=] binary_operation[call[name[np].sqrt, parameter[name[n]]] * name[Istar]]
variable[scaled_unitary] assign[=] call[name[U].dot, parameter[name[I]]]
variable[sigmas] assign[=] call[name[self].subtract, parameter[name[x], <ast.UnaryOp object at 0x7da1b26af2b0>]]
return[name[sigmas].T] | keyword[def] identifier[sigma_points] ( identifier[self] , identifier[x] , identifier[P] ):
literal[string]
keyword[if] identifier[self] . identifier[n] != identifier[np] . identifier[size] ( identifier[x] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[self] . identifier[n] , identifier[np] . identifier[size] ( identifier[x] )))
identifier[n] = identifier[self] . identifier[n]
keyword[if] identifier[np] . identifier[isscalar] ( identifier[x] ):
identifier[x] = identifier[np] . identifier[asarray] ([ identifier[x] ])
identifier[x] = identifier[x] . identifier[reshape] (- literal[int] , literal[int] )
keyword[if] identifier[np] . identifier[isscalar] ( identifier[P] ):
identifier[P] = identifier[np] . identifier[eye] ( identifier[n] )* identifier[P]
keyword[else] :
identifier[P] = identifier[np] . identifier[atleast_2d] ( identifier[P] )
identifier[U] = identifier[self] . identifier[sqrt] ( identifier[P] )
identifier[lambda_] = identifier[n] /( identifier[n] + literal[int] )
identifier[Istar] = identifier[np] . identifier[array] ([[- literal[int] / identifier[np] . identifier[sqrt] ( literal[int] * identifier[lambda_] ), literal[int] / identifier[np] . identifier[sqrt] ( literal[int] * identifier[lambda_] )]])
keyword[for] identifier[d] keyword[in] identifier[range] ( literal[int] , identifier[n] + literal[int] ):
identifier[row] = identifier[np] . identifier[ones] (( literal[int] , identifier[Istar] . identifier[shape] [ literal[int] ]+ literal[int] ))* literal[int] / identifier[np] . identifier[sqrt] ( identifier[lambda_] * identifier[d] *( identifier[d] + literal[int] ))
identifier[row] [ literal[int] ,- literal[int] ]=- identifier[d] / identifier[np] . identifier[sqrt] ( identifier[lambda_] * identifier[d] *( identifier[d] + literal[int] ))
identifier[Istar] = identifier[np] . identifier[r_] [ identifier[np] . identifier[c_] [ identifier[Istar] , identifier[np] . identifier[zeros] (( identifier[Istar] . identifier[shape] [ literal[int] ]))], identifier[row] ]
identifier[I] = identifier[np] . identifier[sqrt] ( identifier[n] )* identifier[Istar]
identifier[scaled_unitary] = identifier[U] . identifier[dot] ( identifier[I] )
identifier[sigmas] = identifier[self] . identifier[subtract] ( identifier[x] ,- identifier[scaled_unitary] )
keyword[return] identifier[sigmas] . identifier[T] | def sigma_points(self, x, P):
"""
Computes the implex sigma points for an unscented Kalman filter
given the mean (x) and covariance(P) of the filter.
Returns tuple of the sigma points and weights.
Works with both scalar and array inputs:
sigma_points (5, 9, 2) # mean 5, covariance 9
sigma_points ([5, 2], 9*eye(2), 2) # means 5 and 2, covariance 9I
Parameters
----------
x : An array-like object of the means of length n
Can be a scalar if 1D.
examples: 1, [1,2], np.array([1,2])
P : scalar, or np.array
Covariance of the filter. If scalar, is treated as eye(n)*P.
Returns
-------
sigmas : np.array, of size (n, n+1)
Two dimensional array of sigma points. Each column contains all of
the sigmas for one dimension in the problem space.
Ordered by Xi_0, Xi_{1..n}
"""
if self.n != np.size(x):
raise ValueError('expected size(x) {}, but size is {}'.format(self.n, np.size(x))) # depends on [control=['if'], data=[]]
n = self.n
if np.isscalar(x):
x = np.asarray([x]) # depends on [control=['if'], data=[]]
x = x.reshape(-1, 1)
if np.isscalar(P):
P = np.eye(n) * P # depends on [control=['if'], data=[]]
else:
P = np.atleast_2d(P)
U = self.sqrt(P)
lambda_ = n / (n + 1)
Istar = np.array([[-1 / np.sqrt(2 * lambda_), 1 / np.sqrt(2 * lambda_)]])
for d in range(2, n + 1):
row = np.ones((1, Istar.shape[1] + 1)) * 1.0 / np.sqrt(lambda_ * d * (d + 1))
row[0, -1] = -d / np.sqrt(lambda_ * d * (d + 1))
Istar = np.r_[np.c_[Istar, np.zeros(Istar.shape[0])], row] # depends on [control=['for'], data=['d']]
I = np.sqrt(n) * Istar
scaled_unitary = U.dot(I)
sigmas = self.subtract(x, -scaled_unitary)
return sigmas.T |
def datasetItems(self):
"""
Returns the items in this scene mapped with their dataset instance.
:return {<XChartDataset>: <QGraphicsItem>, ..}
"""
out = {}
for item in self.items():
if isinstance(item, XChartDatasetItem):
out[item.dataset()] = item
return out | def function[datasetItems, parameter[self]]:
constant[
Returns the items in this scene mapped with their dataset instance.
:return {<XChartDataset>: <QGraphicsItem>, ..}
]
variable[out] assign[=] dictionary[[], []]
for taget[name[item]] in starred[call[name[self].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[item], name[XChartDatasetItem]]] begin[:]
call[name[out]][call[name[item].dataset, parameter[]]] assign[=] name[item]
return[name[out]] | keyword[def] identifier[datasetItems] ( identifier[self] ):
literal[string]
identifier[out] ={}
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[item] , identifier[XChartDatasetItem] ):
identifier[out] [ identifier[item] . identifier[dataset] ()]= identifier[item]
keyword[return] identifier[out] | def datasetItems(self):
"""
Returns the items in this scene mapped with their dataset instance.
:return {<XChartDataset>: <QGraphicsItem>, ..}
"""
out = {}
for item in self.items():
if isinstance(item, XChartDatasetItem):
out[item.dataset()] = item # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return out |
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.extensions['invenio-records-rest'] = _RecordRESTState(app) | def function[init_app, parameter[self, app]]:
constant[Flask application initialization.]
call[name[self].init_config, parameter[name[app]]]
call[name[app].extensions][constant[invenio-records-rest]] assign[=] call[name[_RecordRESTState], parameter[name[app]]] | keyword[def] identifier[init_app] ( identifier[self] , identifier[app] ):
literal[string]
identifier[self] . identifier[init_config] ( identifier[app] )
identifier[app] . identifier[extensions] [ literal[string] ]= identifier[_RecordRESTState] ( identifier[app] ) | def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.extensions['invenio-records-rest'] = _RecordRESTState(app) |
def addButton(
self,
fnc,
states=("On", "Off"),
c=("w", "w"),
bc=("dg", "dr"),
pos=(20, 40),
size=24,
font="arial",
bold=False,
italic=False,
alpha=1,
angle=0,
):
"""Add a button to the renderer window.
:param list states: a list of possible states ['On', 'Off']
:param c: a list of colors for each state
:param bc: a list of background colors for each state
:param pos: 2D position in pixels from left-bottom corner
:param size: size of button font
:param str font: font type (arial, courier, times)
:param bool bold: bold face (False)
:param bool italic: italic face (False)
:param float alpha: opacity level
:param float angle: anticlockwise rotation in degrees
.. hint:: |buttons| |buttons.py|_
"""
return addons.addButton(fnc, states, c, bc, pos, size, font, bold, italic, alpha, angle) | def function[addButton, parameter[self, fnc, states, c, bc, pos, size, font, bold, italic, alpha, angle]]:
constant[Add a button to the renderer window.
:param list states: a list of possible states ['On', 'Off']
:param c: a list of colors for each state
:param bc: a list of background colors for each state
:param pos: 2D position in pixels from left-bottom corner
:param size: size of button font
:param str font: font type (arial, courier, times)
:param bool bold: bold face (False)
:param bool italic: italic face (False)
:param float alpha: opacity level
:param float angle: anticlockwise rotation in degrees
.. hint:: |buttons| |buttons.py|_
]
return[call[name[addons].addButton, parameter[name[fnc], name[states], name[c], name[bc], name[pos], name[size], name[font], name[bold], name[italic], name[alpha], name[angle]]]] | keyword[def] identifier[addButton] (
identifier[self] ,
identifier[fnc] ,
identifier[states] =( literal[string] , literal[string] ),
identifier[c] =( literal[string] , literal[string] ),
identifier[bc] =( literal[string] , literal[string] ),
identifier[pos] =( literal[int] , literal[int] ),
identifier[size] = literal[int] ,
identifier[font] = literal[string] ,
identifier[bold] = keyword[False] ,
identifier[italic] = keyword[False] ,
identifier[alpha] = literal[int] ,
identifier[angle] = literal[int] ,
):
literal[string]
keyword[return] identifier[addons] . identifier[addButton] ( identifier[fnc] , identifier[states] , identifier[c] , identifier[bc] , identifier[pos] , identifier[size] , identifier[font] , identifier[bold] , identifier[italic] , identifier[alpha] , identifier[angle] ) | def addButton(self, fnc, states=('On', 'Off'), c=('w', 'w'), bc=('dg', 'dr'), pos=(20, 40), size=24, font='arial', bold=False, italic=False, alpha=1, angle=0):
"""Add a button to the renderer window.
:param list states: a list of possible states ['On', 'Off']
:param c: a list of colors for each state
:param bc: a list of background colors for each state
:param pos: 2D position in pixels from left-bottom corner
:param size: size of button font
:param str font: font type (arial, courier, times)
:param bool bold: bold face (False)
:param bool italic: italic face (False)
:param float alpha: opacity level
:param float angle: anticlockwise rotation in degrees
.. hint:: |buttons| |buttons.py|_
"""
return addons.addButton(fnc, states, c, bc, pos, size, font, bold, italic, alpha, angle) |
def _set_designated_forwarder_hold_time(self, v, load=False):
"""
Setter method for designated_forwarder_hold_time, mapped from YANG variable /cluster/designated_forwarder_hold_time (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_designated_forwarder_hold_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_designated_forwarder_hold_time() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'1..60']}), is_leaf=True, yang_name="designated-forwarder-hold-time", rest_name="designated-forwarder-hold-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Time in seconds to wait before electing a designated forwarder (Range:<1-60>, default:3)', u'cli-suppress-show-conf-path': None, u'cli-suppress-show-match': None}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """designated_forwarder_hold_time must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'1..60']}), is_leaf=True, yang_name="designated-forwarder-hold-time", rest_name="designated-forwarder-hold-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Time in seconds to wait before electing a designated forwarder (Range:<1-60>, default:3)', u'cli-suppress-show-conf-path': None, u'cli-suppress-show-match': None}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='uint16', is_config=True)""",
})
self.__designated_forwarder_hold_time = t
if hasattr(self, '_set'):
self._set() | def function[_set_designated_forwarder_hold_time, parameter[self, v, load]]:
constant[
Setter method for designated_forwarder_hold_time, mapped from YANG variable /cluster/designated_forwarder_hold_time (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_designated_forwarder_hold_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_designated_forwarder_hold_time() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f722890>
name[self].__designated_forwarder_hold_time assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_designated_forwarder_hold_time] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[int] , identifier[restriction_dict] ={ literal[string] :[ literal[string] ]}, identifier[int_size] = literal[int] ), identifier[restriction_dict] ={ literal[string] :[ literal[string] ]}), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__designated_forwarder_hold_time] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_designated_forwarder_hold_time(self, v, load=False):
"""
Setter method for designated_forwarder_hold_time, mapped from YANG variable /cluster/designated_forwarder_hold_time (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_designated_forwarder_hold_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_designated_forwarder_hold_time() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']}, int_size=16), restriction_dict={'range': [u'1..60']}), is_leaf=True, yang_name='designated-forwarder-hold-time', rest_name='designated-forwarder-hold-time', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Time in seconds to wait before electing a designated forwarder (Range:<1-60>, default:3)', u'cli-suppress-show-conf-path': None, u'cli-suppress-show-match': None}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='uint16', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'designated_forwarder_hold_time must be of a type compatible with uint16', 'defined-type': 'uint16', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={\'range\': [\'0..65535\']},int_size=16), restriction_dict={\'range\': [u\'1..60\']}), is_leaf=True, yang_name="designated-forwarder-hold-time", rest_name="designated-forwarder-hold-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Time in seconds to wait before electing a designated forwarder (Range:<1-60>, default:3)\', u\'cli-suppress-show-conf-path\': None, u\'cli-suppress-show-match\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-mct\', defining_module=\'brocade-mct\', yang_type=\'uint16\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__designated_forwarder_hold_time = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def _query_api(self, params, direct=False):
"""
:param params: dict
:return: requests.models.Response
"""
if not direct:
return get(
self.URI_BASE + self.RESOURCE_NAME, params=params, headers=self._headers
)
id = params.pop("id")
return get(
self.URI_BASE + self.RESOURCE_NAME + "/{0}".format(id),
params=params,
headers=self._headers,
) | def function[_query_api, parameter[self, params, direct]]:
constant[
:param params: dict
:return: requests.models.Response
]
if <ast.UnaryOp object at 0x7da1b2504bb0> begin[:]
return[call[name[get], parameter[binary_operation[name[self].URI_BASE + name[self].RESOURCE_NAME]]]]
variable[id] assign[=] call[name[params].pop, parameter[constant[id]]]
return[call[name[get], parameter[binary_operation[binary_operation[name[self].URI_BASE + name[self].RESOURCE_NAME] + call[constant[/{0}].format, parameter[name[id]]]]]]] | keyword[def] identifier[_query_api] ( identifier[self] , identifier[params] , identifier[direct] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[direct] :
keyword[return] identifier[get] (
identifier[self] . identifier[URI_BASE] + identifier[self] . identifier[RESOURCE_NAME] , identifier[params] = identifier[params] , identifier[headers] = identifier[self] . identifier[_headers]
)
identifier[id] = identifier[params] . identifier[pop] ( literal[string] )
keyword[return] identifier[get] (
identifier[self] . identifier[URI_BASE] + identifier[self] . identifier[RESOURCE_NAME] + literal[string] . identifier[format] ( identifier[id] ),
identifier[params] = identifier[params] ,
identifier[headers] = identifier[self] . identifier[_headers] ,
) | def _query_api(self, params, direct=False):
"""
:param params: dict
:return: requests.models.Response
"""
if not direct:
return get(self.URI_BASE + self.RESOURCE_NAME, params=params, headers=self._headers) # depends on [control=['if'], data=[]]
id = params.pop('id')
return get(self.URI_BASE + self.RESOURCE_NAME + '/{0}'.format(id), params=params, headers=self._headers) |
def start(context, mip_config, email, priority, dryrun, command, start_with, family):
"""Start a new analysis."""
mip_cli = MipCli(context.obj['script'])
mip_config = mip_config or context.obj['mip_config']
email = email or environ_email()
kwargs = dict(config=mip_config, family=family, priority=priority, email=email, dryrun=dryrun, start_with=start_with)
if command:
mip_command = mip_cli.build_command(**kwargs)
click.echo(' '.join(mip_command))
else:
try:
mip_cli(**kwargs)
if not dryrun:
context.obj['store'].add_pending(family, email=email)
except MipStartError as error:
click.echo(click.style(error.message, fg='red')) | def function[start, parameter[context, mip_config, email, priority, dryrun, command, start_with, family]]:
constant[Start a new analysis.]
variable[mip_cli] assign[=] call[name[MipCli], parameter[call[name[context].obj][constant[script]]]]
variable[mip_config] assign[=] <ast.BoolOp object at 0x7da1b1a77e20>
variable[email] assign[=] <ast.BoolOp object at 0x7da1b1a774c0>
variable[kwargs] assign[=] call[name[dict], parameter[]]
if name[command] begin[:]
variable[mip_command] assign[=] call[name[mip_cli].build_command, parameter[]]
call[name[click].echo, parameter[call[constant[ ].join, parameter[name[mip_command]]]]] | keyword[def] identifier[start] ( identifier[context] , identifier[mip_config] , identifier[email] , identifier[priority] , identifier[dryrun] , identifier[command] , identifier[start_with] , identifier[family] ):
literal[string]
identifier[mip_cli] = identifier[MipCli] ( identifier[context] . identifier[obj] [ literal[string] ])
identifier[mip_config] = identifier[mip_config] keyword[or] identifier[context] . identifier[obj] [ literal[string] ]
identifier[email] = identifier[email] keyword[or] identifier[environ_email] ()
identifier[kwargs] = identifier[dict] ( identifier[config] = identifier[mip_config] , identifier[family] = identifier[family] , identifier[priority] = identifier[priority] , identifier[email] = identifier[email] , identifier[dryrun] = identifier[dryrun] , identifier[start_with] = identifier[start_with] )
keyword[if] identifier[command] :
identifier[mip_command] = identifier[mip_cli] . identifier[build_command] (** identifier[kwargs] )
identifier[click] . identifier[echo] ( literal[string] . identifier[join] ( identifier[mip_command] ))
keyword[else] :
keyword[try] :
identifier[mip_cli] (** identifier[kwargs] )
keyword[if] keyword[not] identifier[dryrun] :
identifier[context] . identifier[obj] [ literal[string] ]. identifier[add_pending] ( identifier[family] , identifier[email] = identifier[email] )
keyword[except] identifier[MipStartError] keyword[as] identifier[error] :
identifier[click] . identifier[echo] ( identifier[click] . identifier[style] ( identifier[error] . identifier[message] , identifier[fg] = literal[string] )) | def start(context, mip_config, email, priority, dryrun, command, start_with, family):
"""Start a new analysis."""
mip_cli = MipCli(context.obj['script'])
mip_config = mip_config or context.obj['mip_config']
email = email or environ_email()
kwargs = dict(config=mip_config, family=family, priority=priority, email=email, dryrun=dryrun, start_with=start_with)
if command:
mip_command = mip_cli.build_command(**kwargs)
click.echo(' '.join(mip_command)) # depends on [control=['if'], data=[]]
else:
try:
mip_cli(**kwargs)
if not dryrun:
context.obj['store'].add_pending(family, email=email) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except MipStartError as error:
click.echo(click.style(error.message, fg='red')) # depends on [control=['except'], data=['error']] |
def get_first_result(threads):
""" this blocks, waiting for the first result that returns from a thread
:type threads: list[Thread]
"""
while True:
for thread in threads:
if not thread.is_alive():
return thread.queue.get() | def function[get_first_result, parameter[threads]]:
constant[ this blocks, waiting for the first result that returns from a thread
:type threads: list[Thread]
]
while constant[True] begin[:]
for taget[name[thread]] in starred[name[threads]] begin[:]
if <ast.UnaryOp object at 0x7da1b1472590> begin[:]
return[call[name[thread].queue.get, parameter[]]] | keyword[def] identifier[get_first_result] ( identifier[threads] ):
literal[string]
keyword[while] keyword[True] :
keyword[for] identifier[thread] keyword[in] identifier[threads] :
keyword[if] keyword[not] identifier[thread] . identifier[is_alive] ():
keyword[return] identifier[thread] . identifier[queue] . identifier[get] () | def get_first_result(threads):
""" this blocks, waiting for the first result that returns from a thread
:type threads: list[Thread]
"""
while True:
for thread in threads:
if not thread.is_alive():
return thread.queue.get() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['thread']] # depends on [control=['while'], data=[]] |
def download(self, storagemodel:object, modeldefinition = None):
""" load blob from storage into StorageBlobModelInstance """
if (storagemodel.name is None):
# No content to download
raise AzureStorageWrapException(storagemodel, "StorageBlobModel does not contain content nor content settings")
else:
container_name = modeldefinition['container']
blob_name = storagemodel.name
try:
if modeldefinition['blobservice'].exists(container_name, blob_name):
""" download blob """
blob = modeldefinition['blobservice'].get_blob_to_bytes(
container_name=modeldefinition['container'],
blob_name=storagemodel.name
)
storagemodel.__mergeblob__(blob)
except Exception as e:
msg = 'can not load blob from container {} because {!s}'.format(storagemodel._containername, e)
raise AzureStorageWrapException(storagemodel, msg=msg)
return storagemodel | def function[download, parameter[self, storagemodel, modeldefinition]]:
constant[ load blob from storage into StorageBlobModelInstance ]
if compare[name[storagemodel].name is constant[None]] begin[:]
<ast.Raise object at 0x7da18bcc9270>
return[name[storagemodel]] | keyword[def] identifier[download] ( identifier[self] , identifier[storagemodel] : identifier[object] , identifier[modeldefinition] = keyword[None] ):
literal[string]
keyword[if] ( identifier[storagemodel] . identifier[name] keyword[is] keyword[None] ):
keyword[raise] identifier[AzureStorageWrapException] ( identifier[storagemodel] , literal[string] )
keyword[else] :
identifier[container_name] = identifier[modeldefinition] [ literal[string] ]
identifier[blob_name] = identifier[storagemodel] . identifier[name]
keyword[try] :
keyword[if] identifier[modeldefinition] [ literal[string] ]. identifier[exists] ( identifier[container_name] , identifier[blob_name] ):
literal[string]
identifier[blob] = identifier[modeldefinition] [ literal[string] ]. identifier[get_blob_to_bytes] (
identifier[container_name] = identifier[modeldefinition] [ literal[string] ],
identifier[blob_name] = identifier[storagemodel] . identifier[name]
)
identifier[storagemodel] . identifier[__mergeblob__] ( identifier[blob] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[msg] = literal[string] . identifier[format] ( identifier[storagemodel] . identifier[_containername] , identifier[e] )
keyword[raise] identifier[AzureStorageWrapException] ( identifier[storagemodel] , identifier[msg] = identifier[msg] )
keyword[return] identifier[storagemodel] | def download(self, storagemodel: object, modeldefinition=None):
""" load blob from storage into StorageBlobModelInstance """
if storagemodel.name is None:
# No content to download
raise AzureStorageWrapException(storagemodel, 'StorageBlobModel does not contain content nor content settings') # depends on [control=['if'], data=[]]
else:
container_name = modeldefinition['container']
blob_name = storagemodel.name
try:
if modeldefinition['blobservice'].exists(container_name, blob_name):
' download blob '
blob = modeldefinition['blobservice'].get_blob_to_bytes(container_name=modeldefinition['container'], blob_name=storagemodel.name)
storagemodel.__mergeblob__(blob) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
msg = 'can not load blob from container {} because {!s}'.format(storagemodel._containername, e)
raise AzureStorageWrapException(storagemodel, msg=msg) # depends on [control=['except'], data=['e']]
return storagemodel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.