code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def _load_data(self, band):
"""From Morrissey+ 2005, with the actual data coming from
http://www.astro.caltech.edu/~capak/filters/. According to the latter,
these are in QE units and thus need to be multiplied by the wavelength
when integrating per-energy.
"""
# `band` should be 'nuv' or 'fuv'
df = bandpass_data_frame('filter_galex_' + band + '.dat', 'wlen resp')
df.resp *= df.wlen # QE -> EE response convention.
return df
|
def function[_load_data, parameter[self, band]]:
constant[From Morrissey+ 2005, with the actual data coming from
http://www.astro.caltech.edu/~capak/filters/. According to the latter,
these are in QE units and thus need to be multiplied by the wavelength
when integrating per-energy.
]
variable[df] assign[=] call[name[bandpass_data_frame], parameter[binary_operation[binary_operation[constant[filter_galex_] + name[band]] + constant[.dat]], constant[wlen resp]]]
<ast.AugAssign object at 0x7da1b27bace0>
return[name[df]]
|
keyword[def] identifier[_load_data] ( identifier[self] , identifier[band] ):
literal[string]
identifier[df] = identifier[bandpass_data_frame] ( literal[string] + identifier[band] + literal[string] , literal[string] )
identifier[df] . identifier[resp] *= identifier[df] . identifier[wlen]
keyword[return] identifier[df]
|
def _load_data(self, band):
"""From Morrissey+ 2005, with the actual data coming from
http://www.astro.caltech.edu/~capak/filters/. According to the latter,
these are in QE units and thus need to be multiplied by the wavelength
when integrating per-energy.
"""
# `band` should be 'nuv' or 'fuv'
df = bandpass_data_frame('filter_galex_' + band + '.dat', 'wlen resp')
df.resp *= df.wlen # QE -> EE response convention.
return df
|
def _get_parser(description):
"""Build an ArgumentParser with common arguments for both operations."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('key', help="Camellia key.")
parser.add_argument('input_file', nargs='*',
help="File(s) to read as input data. If none are "
"provided, assume STDIN.")
parser.add_argument('-o', '--output_file',
help="Output file. If not provided, assume STDOUT.")
parser.add_argument('-l', '--keylen', type=int, default=128,
help="Length of 'key' in bits, must be in one of %s "
"(default 128)." % camcrypt.ACCEPTABLE_KEY_LENGTHS)
parser.add_argument('-H', '--hexkey', action='store_true',
help="Treat 'key' as a hex string rather than binary.")
return parser
|
def function[_get_parser, parameter[description]]:
constant[Build an ArgumentParser with common arguments for both operations.]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[key]]]
call[name[parser].add_argument, parameter[constant[input_file]]]
call[name[parser].add_argument, parameter[constant[-o], constant[--output_file]]]
call[name[parser].add_argument, parameter[constant[-l], constant[--keylen]]]
call[name[parser].add_argument, parameter[constant[-H], constant[--hexkey]]]
return[name[parser]]
|
keyword[def] identifier[_get_parser] ( identifier[description] ):
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = identifier[description] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] ,
identifier[help] = literal[string]
literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] ,
identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] ,
identifier[help] = literal[string]
literal[string] % identifier[camcrypt] . identifier[ACCEPTABLE_KEY_LENGTHS] )
identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
keyword[return] identifier[parser]
|
def _get_parser(description):
"""Build an ArgumentParser with common arguments for both operations."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('key', help='Camellia key.')
parser.add_argument('input_file', nargs='*', help='File(s) to read as input data. If none are provided, assume STDIN.')
parser.add_argument('-o', '--output_file', help='Output file. If not provided, assume STDOUT.')
parser.add_argument('-l', '--keylen', type=int, default=128, help="Length of 'key' in bits, must be in one of %s (default 128)." % camcrypt.ACCEPTABLE_KEY_LENGTHS)
parser.add_argument('-H', '--hexkey', action='store_true', help="Treat 'key' as a hex string rather than binary.")
return parser
|
def download_extract(url):
"""download and extract file."""
logger.info("Downloading %s", url)
request = urllib2.Request(url)
request.add_header('User-Agent',
'caelum/0.1 +https://github.com/nrcharles/caelum')
opener = urllib2.build_opener()
with tempfile.TemporaryFile(suffix='.zip', dir=env.WEATHER_DATA_PATH) \
as local_file:
logger.debug('Saving to temporary file %s', local_file.name)
local_file.write(opener.open(request).read())
compressed_file = zipfile.ZipFile(local_file, 'r')
logger.debug('Extracting %s', compressed_file)
compressed_file.extractall(env.WEATHER_DATA_PATH)
local_file.close()
|
def function[download_extract, parameter[url]]:
constant[download and extract file.]
call[name[logger].info, parameter[constant[Downloading %s], name[url]]]
variable[request] assign[=] call[name[urllib2].Request, parameter[name[url]]]
call[name[request].add_header, parameter[constant[User-Agent], constant[caelum/0.1 +https://github.com/nrcharles/caelum]]]
variable[opener] assign[=] call[name[urllib2].build_opener, parameter[]]
with call[name[tempfile].TemporaryFile, parameter[]] begin[:]
call[name[logger].debug, parameter[constant[Saving to temporary file %s], name[local_file].name]]
call[name[local_file].write, parameter[call[call[name[opener].open, parameter[name[request]]].read, parameter[]]]]
variable[compressed_file] assign[=] call[name[zipfile].ZipFile, parameter[name[local_file], constant[r]]]
call[name[logger].debug, parameter[constant[Extracting %s], name[compressed_file]]]
call[name[compressed_file].extractall, parameter[name[env].WEATHER_DATA_PATH]]
call[name[local_file].close, parameter[]]
|
keyword[def] identifier[download_extract] ( identifier[url] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] , identifier[url] )
identifier[request] = identifier[urllib2] . identifier[Request] ( identifier[url] )
identifier[request] . identifier[add_header] ( literal[string] ,
literal[string] )
identifier[opener] = identifier[urllib2] . identifier[build_opener] ()
keyword[with] identifier[tempfile] . identifier[TemporaryFile] ( identifier[suffix] = literal[string] , identifier[dir] = identifier[env] . identifier[WEATHER_DATA_PATH] ) keyword[as] identifier[local_file] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[local_file] . identifier[name] )
identifier[local_file] . identifier[write] ( identifier[opener] . identifier[open] ( identifier[request] ). identifier[read] ())
identifier[compressed_file] = identifier[zipfile] . identifier[ZipFile] ( identifier[local_file] , literal[string] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[compressed_file] )
identifier[compressed_file] . identifier[extractall] ( identifier[env] . identifier[WEATHER_DATA_PATH] )
identifier[local_file] . identifier[close] ()
|
def download_extract(url):
"""download and extract file."""
logger.info('Downloading %s', url)
request = urllib2.Request(url)
request.add_header('User-Agent', 'caelum/0.1 +https://github.com/nrcharles/caelum')
opener = urllib2.build_opener()
with tempfile.TemporaryFile(suffix='.zip', dir=env.WEATHER_DATA_PATH) as local_file:
logger.debug('Saving to temporary file %s', local_file.name)
local_file.write(opener.open(request).read())
compressed_file = zipfile.ZipFile(local_file, 'r')
logger.debug('Extracting %s', compressed_file)
compressed_file.extractall(env.WEATHER_DATA_PATH)
local_file.close() # depends on [control=['with'], data=['local_file']]
|
def _collapse_list_of_sets(self, sets):
'''Input is a list of sets. Merges any intersecting sets in the list'''
found = True
while found:
found = False
to_intersect = None
for i in range(len(sets)):
for j in range(len(sets)):
if i == j:
continue
elif sets[i].intersection(sets[j]):
to_intersect = i, j
break
if to_intersect is not None:
break
if to_intersect is not None:
found = True
sets[i].update(sets[j])
sets.pop(j)
return sets
|
def function[_collapse_list_of_sets, parameter[self, sets]]:
constant[Input is a list of sets. Merges any intersecting sets in the list]
variable[found] assign[=] constant[True]
while name[found] begin[:]
variable[found] assign[=] constant[False]
variable[to_intersect] assign[=] constant[None]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[sets]]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[sets]]]]]] begin[:]
if compare[name[i] equal[==] name[j]] begin[:]
continue
if compare[name[to_intersect] is_not constant[None]] begin[:]
break
if compare[name[to_intersect] is_not constant[None]] begin[:]
variable[found] assign[=] constant[True]
call[call[name[sets]][name[i]].update, parameter[call[name[sets]][name[j]]]]
call[name[sets].pop, parameter[name[j]]]
return[name[sets]]
|
keyword[def] identifier[_collapse_list_of_sets] ( identifier[self] , identifier[sets] ):
literal[string]
identifier[found] = keyword[True]
keyword[while] identifier[found] :
identifier[found] = keyword[False]
identifier[to_intersect] = keyword[None]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sets] )):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[sets] )):
keyword[if] identifier[i] == identifier[j] :
keyword[continue]
keyword[elif] identifier[sets] [ identifier[i] ]. identifier[intersection] ( identifier[sets] [ identifier[j] ]):
identifier[to_intersect] = identifier[i] , identifier[j]
keyword[break]
keyword[if] identifier[to_intersect] keyword[is] keyword[not] keyword[None] :
keyword[break]
keyword[if] identifier[to_intersect] keyword[is] keyword[not] keyword[None] :
identifier[found] = keyword[True]
identifier[sets] [ identifier[i] ]. identifier[update] ( identifier[sets] [ identifier[j] ])
identifier[sets] . identifier[pop] ( identifier[j] )
keyword[return] identifier[sets]
|
def _collapse_list_of_sets(self, sets):
"""Input is a list of sets. Merges any intersecting sets in the list"""
found = True
while found:
found = False
to_intersect = None
for i in range(len(sets)):
for j in range(len(sets)):
if i == j:
continue # depends on [control=['if'], data=[]]
elif sets[i].intersection(sets[j]):
to_intersect = (i, j)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']]
if to_intersect is not None:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if to_intersect is not None:
found = True
sets[i].update(sets[j])
sets.pop(j) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return sets
|
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
|
def function[decompose, parameter[self]]:
constant[Recursively destroys the contents of this tree.]
call[name[self].extract, parameter[]]
if compare[call[name[len], parameter[name[self].contents]] equal[==] constant[0]] begin[:]
return[None]
variable[current] assign[=] call[name[self].contents][constant[0]]
while compare[name[current] is_not constant[None]] begin[:]
variable[next] assign[=] name[current].next
if call[name[isinstance], parameter[name[current], name[Tag]]] begin[:]
<ast.Delete object at 0x7da20c7cb790>
name[current].parent assign[=] constant[None]
name[current].previous assign[=] constant[None]
name[current].previousSibling assign[=] constant[None]
name[current].next assign[=] constant[None]
name[current].nextSibling assign[=] constant[None]
variable[current] assign[=] name[next]
|
keyword[def] identifier[decompose] ( identifier[self] ):
literal[string]
identifier[self] . identifier[extract] ()
keyword[if] identifier[len] ( identifier[self] . identifier[contents] )== literal[int] :
keyword[return]
identifier[current] = identifier[self] . identifier[contents] [ literal[int] ]
keyword[while] identifier[current] keyword[is] keyword[not] keyword[None] :
identifier[next] = identifier[current] . identifier[next]
keyword[if] identifier[isinstance] ( identifier[current] , identifier[Tag] ):
keyword[del] identifier[current] . identifier[contents] [:]
identifier[current] . identifier[parent] = keyword[None]
identifier[current] . identifier[previous] = keyword[None]
identifier[current] . identifier[previousSibling] = keyword[None]
identifier[current] . identifier[next] = keyword[None]
identifier[current] . identifier[nextSibling] = keyword[None]
identifier[current] = identifier[next]
|
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return # depends on [control=['if'], data=[]]
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:] # depends on [control=['if'], data=[]]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next # depends on [control=['while'], data=['current']]
|
def direct_messages_show(self, id):
"""
Gets the direct message with the given id.
https://dev.twitter.com/docs/api/1.1/get/direct_messages/show
:param str id:
(*required*) The ID of the direct message.
:returns:
A direct message dict.
"""
params = {}
set_str_param(params, 'id', id)
d = self._get_api('direct_messages/show.json', params)
d.addCallback(lambda dms: dms[0])
return d
|
def function[direct_messages_show, parameter[self, id]]:
constant[
Gets the direct message with the given id.
https://dev.twitter.com/docs/api/1.1/get/direct_messages/show
:param str id:
(*required*) The ID of the direct message.
:returns:
A direct message dict.
]
variable[params] assign[=] dictionary[[], []]
call[name[set_str_param], parameter[name[params], constant[id], name[id]]]
variable[d] assign[=] call[name[self]._get_api, parameter[constant[direct_messages/show.json], name[params]]]
call[name[d].addCallback, parameter[<ast.Lambda object at 0x7da1b168d210>]]
return[name[d]]
|
keyword[def] identifier[direct_messages_show] ( identifier[self] , identifier[id] ):
literal[string]
identifier[params] ={}
identifier[set_str_param] ( identifier[params] , literal[string] , identifier[id] )
identifier[d] = identifier[self] . identifier[_get_api] ( literal[string] , identifier[params] )
identifier[d] . identifier[addCallback] ( keyword[lambda] identifier[dms] : identifier[dms] [ literal[int] ])
keyword[return] identifier[d]
|
def direct_messages_show(self, id):
"""
Gets the direct message with the given id.
https://dev.twitter.com/docs/api/1.1/get/direct_messages/show
:param str id:
(*required*) The ID of the direct message.
:returns:
A direct message dict.
"""
params = {}
set_str_param(params, 'id', id)
d = self._get_api('direct_messages/show.json', params)
d.addCallback(lambda dms: dms[0])
return d
|
def todict_using_struct(self, dict_struct=None, dict_post_processors=None):
"""
dict_struct:
{
'attrs': ['id', 'created_at'],
'rels': {
'merchandise': {
'attrs': ['id', 'label']
}
}
}
"""
# It is important to assign the passed kwarg to a differently named variable.
# A dict is passed by reference and using the same kwarg here results in it
# getting mutated - causing unforeseen side effects
dict_struct_to_use = (
self._dict_struct_ if dict_struct is None
else dict_struct)
if dict_struct_to_use is None and self._autogenerate_dict_struct_if_none_:
dict_struct_to_use = self.autogenerated_dict_structure()
elif dict_struct.get("attrs") is None:
dict_struct_to_use = {}
dict_struct_to_use["attrs"] = self.autogenerated_dict_structure()["attrs"]
if "rels" in dict_struct:
dict_struct_to_use["rels"] = dict_struct.get("rels")
result = self.serialize_attrs(*dict_struct_to_use.get('attrs', []))
for rel, rel_dict_struct in dict_struct_to_use.get('rels', {}).items():
rel_obj = getattr(self, rel) if hasattr(self, rel) else None
if rel_obj is not None:
if is_list_like(rel_obj):
result[rel] = [i.todict_using_struct(dict_struct=rel_dict_struct)
if hasattr(i, 'todict_using_struct') else i
for i in rel_obj]
elif is_dict_like(rel_obj):
result[rel] = {k: v.todict_using_struct(dict_struct=rel_dict_struct)
if hasattr(v, 'todict_using_struct') else v
for k, v in rel_obj.iteritems()}
else:
result[rel] = rel_obj.todict_using_struct(
dict_struct=rel_dict_struct) if hasattr(
rel_obj, 'todict_using_struct') else rel_obj
else:
result[rel] = None
if isinstance(dict_post_processors, list):
for dict_post_processor in dict_post_processors:
if callable(dict_post_processor):
result = dict_post_processor(result, self)
return result
|
def function[todict_using_struct, parameter[self, dict_struct, dict_post_processors]]:
constant[
dict_struct:
{
'attrs': ['id', 'created_at'],
'rels': {
'merchandise': {
'attrs': ['id', 'label']
}
}
}
]
variable[dict_struct_to_use] assign[=] <ast.IfExp object at 0x7da204566aa0>
if <ast.BoolOp object at 0x7da204565420> begin[:]
variable[dict_struct_to_use] assign[=] call[name[self].autogenerated_dict_structure, parameter[]]
variable[result] assign[=] call[name[self].serialize_attrs, parameter[<ast.Starred object at 0x7da2045658a0>]]
for taget[tuple[[<ast.Name object at 0x7da204565180>, <ast.Name object at 0x7da204566020>]]] in starred[call[call[name[dict_struct_to_use].get, parameter[constant[rels], dictionary[[], []]]].items, parameter[]]] begin[:]
variable[rel_obj] assign[=] <ast.IfExp object at 0x7da204565c90>
if compare[name[rel_obj] is_not constant[None]] begin[:]
if call[name[is_list_like], parameter[name[rel_obj]]] begin[:]
call[name[result]][name[rel]] assign[=] <ast.ListComp object at 0x7da1b25d3550>
if call[name[isinstance], parameter[name[dict_post_processors], name[list]]] begin[:]
for taget[name[dict_post_processor]] in starred[name[dict_post_processors]] begin[:]
if call[name[callable], parameter[name[dict_post_processor]]] begin[:]
variable[result] assign[=] call[name[dict_post_processor], parameter[name[result], name[self]]]
return[name[result]]
|
keyword[def] identifier[todict_using_struct] ( identifier[self] , identifier[dict_struct] = keyword[None] , identifier[dict_post_processors] = keyword[None] ):
literal[string]
identifier[dict_struct_to_use] =(
identifier[self] . identifier[_dict_struct_] keyword[if] identifier[dict_struct] keyword[is] keyword[None]
keyword[else] identifier[dict_struct] )
keyword[if] identifier[dict_struct_to_use] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[_autogenerate_dict_struct_if_none_] :
identifier[dict_struct_to_use] = identifier[self] . identifier[autogenerated_dict_structure] ()
keyword[elif] identifier[dict_struct] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[dict_struct_to_use] ={}
identifier[dict_struct_to_use] [ literal[string] ]= identifier[self] . identifier[autogenerated_dict_structure] ()[ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[dict_struct] :
identifier[dict_struct_to_use] [ literal[string] ]= identifier[dict_struct] . identifier[get] ( literal[string] )
identifier[result] = identifier[self] . identifier[serialize_attrs] (* identifier[dict_struct_to_use] . identifier[get] ( literal[string] ,[]))
keyword[for] identifier[rel] , identifier[rel_dict_struct] keyword[in] identifier[dict_struct_to_use] . identifier[get] ( literal[string] ,{}). identifier[items] ():
identifier[rel_obj] = identifier[getattr] ( identifier[self] , identifier[rel] ) keyword[if] identifier[hasattr] ( identifier[self] , identifier[rel] ) keyword[else] keyword[None]
keyword[if] identifier[rel_obj] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[is_list_like] ( identifier[rel_obj] ):
identifier[result] [ identifier[rel] ]=[ identifier[i] . identifier[todict_using_struct] ( identifier[dict_struct] = identifier[rel_dict_struct] )
keyword[if] identifier[hasattr] ( identifier[i] , literal[string] ) keyword[else] identifier[i]
keyword[for] identifier[i] keyword[in] identifier[rel_obj] ]
keyword[elif] identifier[is_dict_like] ( identifier[rel_obj] ):
identifier[result] [ identifier[rel] ]={ identifier[k] : identifier[v] . identifier[todict_using_struct] ( identifier[dict_struct] = identifier[rel_dict_struct] )
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ) keyword[else] identifier[v]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[rel_obj] . identifier[iteritems] ()}
keyword[else] :
identifier[result] [ identifier[rel] ]= identifier[rel_obj] . identifier[todict_using_struct] (
identifier[dict_struct] = identifier[rel_dict_struct] ) keyword[if] identifier[hasattr] (
identifier[rel_obj] , literal[string] ) keyword[else] identifier[rel_obj]
keyword[else] :
identifier[result] [ identifier[rel] ]= keyword[None]
keyword[if] identifier[isinstance] ( identifier[dict_post_processors] , identifier[list] ):
keyword[for] identifier[dict_post_processor] keyword[in] identifier[dict_post_processors] :
keyword[if] identifier[callable] ( identifier[dict_post_processor] ):
identifier[result] = identifier[dict_post_processor] ( identifier[result] , identifier[self] )
keyword[return] identifier[result]
|
def todict_using_struct(self, dict_struct=None, dict_post_processors=None):
"""
dict_struct:
{
'attrs': ['id', 'created_at'],
'rels': {
'merchandise': {
'attrs': ['id', 'label']
}
}
}
"""
# It is important to assign the passed kwarg to a differently named variable.
# A dict is passed by reference and using the same kwarg here results in it
# getting mutated - causing unforeseen side effects
dict_struct_to_use = self._dict_struct_ if dict_struct is None else dict_struct
if dict_struct_to_use is None and self._autogenerate_dict_struct_if_none_:
dict_struct_to_use = self.autogenerated_dict_structure() # depends on [control=['if'], data=[]]
elif dict_struct.get('attrs') is None:
dict_struct_to_use = {}
dict_struct_to_use['attrs'] = self.autogenerated_dict_structure()['attrs']
if 'rels' in dict_struct:
dict_struct_to_use['rels'] = dict_struct.get('rels') # depends on [control=['if'], data=['dict_struct']] # depends on [control=['if'], data=[]]
result = self.serialize_attrs(*dict_struct_to_use.get('attrs', []))
for (rel, rel_dict_struct) in dict_struct_to_use.get('rels', {}).items():
rel_obj = getattr(self, rel) if hasattr(self, rel) else None
if rel_obj is not None:
if is_list_like(rel_obj):
result[rel] = [i.todict_using_struct(dict_struct=rel_dict_struct) if hasattr(i, 'todict_using_struct') else i for i in rel_obj] # depends on [control=['if'], data=[]]
elif is_dict_like(rel_obj):
result[rel] = {k: v.todict_using_struct(dict_struct=rel_dict_struct) if hasattr(v, 'todict_using_struct') else v for (k, v) in rel_obj.iteritems()} # depends on [control=['if'], data=[]]
else:
result[rel] = rel_obj.todict_using_struct(dict_struct=rel_dict_struct) if hasattr(rel_obj, 'todict_using_struct') else rel_obj # depends on [control=['if'], data=['rel_obj']]
else:
result[rel] = None # depends on [control=['for'], data=[]]
if isinstance(dict_post_processors, list):
for dict_post_processor in dict_post_processors:
if callable(dict_post_processor):
result = dict_post_processor(result, self) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dict_post_processor']] # depends on [control=['if'], data=[]]
return result
|
def uncurry_nested_dictionary(curried_dict):
"""
Transform dictionary from (key_a -> key_b -> float) to
(key_a, key_b) -> float
"""
result = {}
for a, a_dict in curried_dict.items():
for b, value in a_dict.items():
result[(a, b)] = value
return result
|
def function[uncurry_nested_dictionary, parameter[curried_dict]]:
constant[
Transform dictionary from (key_a -> key_b -> float) to
(key_a, key_b) -> float
]
variable[result] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18f721300>, <ast.Name object at 0x7da18f722500>]]] in starred[call[name[curried_dict].items, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1a66b30>, <ast.Name object at 0x7da1b1a64490>]]] in starred[call[name[a_dict].items, parameter[]]] begin[:]
call[name[result]][tuple[[<ast.Name object at 0x7da1b1a66cb0>, <ast.Name object at 0x7da1b1a67fd0>]]] assign[=] name[value]
return[name[result]]
|
keyword[def] identifier[uncurry_nested_dictionary] ( identifier[curried_dict] ):
literal[string]
identifier[result] ={}
keyword[for] identifier[a] , identifier[a_dict] keyword[in] identifier[curried_dict] . identifier[items] ():
keyword[for] identifier[b] , identifier[value] keyword[in] identifier[a_dict] . identifier[items] ():
identifier[result] [( identifier[a] , identifier[b] )]= identifier[value]
keyword[return] identifier[result]
|
def uncurry_nested_dictionary(curried_dict):
"""
Transform dictionary from (key_a -> key_b -> float) to
(key_a, key_b) -> float
"""
result = {}
for (a, a_dict) in curried_dict.items():
for (b, value) in a_dict.items():
result[a, b] = value # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return result
|
def parquet(self, path):
"""Loads a Parquet file stream, returning the result as a :class:`DataFrame`.
You can set the following Parquet-specific option(s) for reading Parquet files:
* ``mergeSchema``: sets whether we should merge schemas collected from all \
Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \
The default value is specified in ``spark.sql.parquet.mergeSchema``.
.. note:: Evolving.
>>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp())
>>> parquet_sdf.isStreaming
True
>>> parquet_sdf.schema == sdf_schema
True
"""
if isinstance(path, basestring):
return self._df(self._jreader.parquet(path))
else:
raise TypeError("path can be only a single string")
|
def function[parquet, parameter[self, path]]:
constant[Loads a Parquet file stream, returning the result as a :class:`DataFrame`.
You can set the following Parquet-specific option(s) for reading Parquet files:
* ``mergeSchema``: sets whether we should merge schemas collected from all Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. The default value is specified in ``spark.sql.parquet.mergeSchema``.
.. note:: Evolving.
>>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp())
>>> parquet_sdf.isStreaming
True
>>> parquet_sdf.schema == sdf_schema
True
]
if call[name[isinstance], parameter[name[path], name[basestring]]] begin[:]
return[call[name[self]._df, parameter[call[name[self]._jreader.parquet, parameter[name[path]]]]]]
|
keyword[def] identifier[parquet] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[path] , identifier[basestring] ):
keyword[return] identifier[self] . identifier[_df] ( identifier[self] . identifier[_jreader] . identifier[parquet] ( identifier[path] ))
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] )
|
def parquet(self, path):
"""Loads a Parquet file stream, returning the result as a :class:`DataFrame`.
You can set the following Parquet-specific option(s) for reading Parquet files:
* ``mergeSchema``: sets whether we should merge schemas collected from all Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. The default value is specified in ``spark.sql.parquet.mergeSchema``.
.. note:: Evolving.
>>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp())
>>> parquet_sdf.isStreaming
True
>>> parquet_sdf.schema == sdf_schema
True
"""
if isinstance(path, basestring):
return self._df(self._jreader.parquet(path)) # depends on [control=['if'], data=[]]
else:
raise TypeError('path can be only a single string')
|
def exit_cleanly(error_number=0):
"""exit_cleanly
Performs standard error notification and exiting statements as necessary. This
assures more consistent error handling within the script.
"""
default = "An Unknown error has occurred!"
descriptions = \
{22: 'An improper input error has occurred. Please see above stmt(s)',
29: 'An operation failed. Please see above stmt(s)',
5: 'An IO Error has occurred. Pelase see above stmt(s)'}
try:
error_number = int(error_number)
except TypeError:
stmt = default
error_number = -1
if error_number == 0:
stmt = "No error has been detected!"
elif not isinstance(error_number, int) and hasattr(errno, error_number):
error_number = getattr(errno, error_number)
stmt = descriptions[error_number] if error_number in descriptions \
else default
elif error_number in descriptions:
stmt = descriptions[error_number]
else:
stmt = default
if error_number:
print("""
%s [--opt [option]]
With opts:
working_directory - the full path to the working directory
operating_system - the full name of the operating system lower case
version - the version of the operating system
All of these options must be supplied, and if one is missing or if there is
no corresponding:
<working_directory>/*-dist/Docker/<operating_system>/<version>
Directory, then this script will exit cleanly reporting it as an error.
""" % sys.argv[0])
print("(%d) %s" % (error_number, stmt), file=sys.stderr)
sys.exit(error_number)
|
def function[exit_cleanly, parameter[error_number]]:
constant[exit_cleanly
Performs standard error notification and exiting statements as necessary. This
assures more consistent error handling within the script.
]
variable[default] assign[=] constant[An Unknown error has occurred!]
variable[descriptions] assign[=] dictionary[[<ast.Constant object at 0x7da1b17fbc40>, <ast.Constant object at 0x7da1b17f9f60>, <ast.Constant object at 0x7da1b17f98d0>], [<ast.Constant object at 0x7da1b17f8730>, <ast.Constant object at 0x7da1b17fa140>, <ast.Constant object at 0x7da1b17f9f90>]]
<ast.Try object at 0x7da1b17fb550>
if compare[name[error_number] equal[==] constant[0]] begin[:]
variable[stmt] assign[=] constant[No error has been detected!]
if name[error_number] begin[:]
call[name[print], parameter[binary_operation[constant[
%s [--opt [option]]
With opts:
working_directory - the full path to the working directory
operating_system - the full name of the operating system lower case
version - the version of the operating system
All of these options must be supplied, and if one is missing or if there is
no corresponding:
<working_directory>/*-dist/Docker/<operating_system>/<version>
Directory, then this script will exit cleanly reporting it as an error.
] <ast.Mod object at 0x7da2590d6920> call[name[sys].argv][constant[0]]]]]
call[name[print], parameter[binary_operation[constant[(%d) %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b17f93c0>, <ast.Name object at 0x7da1b17f8e80>]]]]]
call[name[sys].exit, parameter[name[error_number]]]
|
keyword[def] identifier[exit_cleanly] ( identifier[error_number] = literal[int] ):
literal[string]
identifier[default] = literal[string]
identifier[descriptions] ={ literal[int] : literal[string] ,
literal[int] : literal[string] ,
literal[int] : literal[string] }
keyword[try] :
identifier[error_number] = identifier[int] ( identifier[error_number] )
keyword[except] identifier[TypeError] :
identifier[stmt] = identifier[default]
identifier[error_number] =- literal[int]
keyword[if] identifier[error_number] == literal[int] :
identifier[stmt] = literal[string]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[error_number] , identifier[int] ) keyword[and] identifier[hasattr] ( identifier[errno] , identifier[error_number] ):
identifier[error_number] = identifier[getattr] ( identifier[errno] , identifier[error_number] )
identifier[stmt] = identifier[descriptions] [ identifier[error_number] ] keyword[if] identifier[error_number] keyword[in] identifier[descriptions] keyword[else] identifier[default]
keyword[elif] identifier[error_number] keyword[in] identifier[descriptions] :
identifier[stmt] = identifier[descriptions] [ identifier[error_number] ]
keyword[else] :
identifier[stmt] = identifier[default]
keyword[if] identifier[error_number] :
identifier[print] ( literal[string] % identifier[sys] . identifier[argv] [ literal[int] ])
identifier[print] ( literal[string] %( identifier[error_number] , identifier[stmt] ), identifier[file] = identifier[sys] . identifier[stderr] )
identifier[sys] . identifier[exit] ( identifier[error_number] )
|
def exit_cleanly(error_number=0):
"""exit_cleanly
Performs standard error notification and exiting statements as necessary. This
assures more consistent error handling within the script.
"""
default = 'An Unknown error has occurred!'
descriptions = {22: 'An improper input error has occurred. Please see above stmt(s)', 29: 'An operation failed. Please see above stmt(s)', 5: 'An IO Error has occurred. Pelase see above stmt(s)'}
try:
error_number = int(error_number) # depends on [control=['try'], data=[]]
except TypeError:
stmt = default
error_number = -1 # depends on [control=['except'], data=[]]
if error_number == 0:
stmt = 'No error has been detected!' # depends on [control=['if'], data=[]]
elif not isinstance(error_number, int) and hasattr(errno, error_number):
error_number = getattr(errno, error_number)
stmt = descriptions[error_number] if error_number in descriptions else default # depends on [control=['if'], data=[]]
elif error_number in descriptions:
stmt = descriptions[error_number] # depends on [control=['if'], data=['error_number', 'descriptions']]
else:
stmt = default
if error_number:
print('\n%s [--opt [option]]\n With opts:\n working_directory - the full path to the working directory\n operating_system - the full name of the operating system lower case\n version - the version of the operating system\n\n All of these options must be supplied, and if one is missing or if there is\n no corresponding:\n <working_directory>/*-dist/Docker/<operating_system>/<version>\n Directory, then this script will exit cleanly reporting it as an error.\n ' % sys.argv[0]) # depends on [control=['if'], data=[]]
print('(%d) %s' % (error_number, stmt), file=sys.stderr)
sys.exit(error_number)
|
def _connect(self):
""" Connect to the remote if not already connected. """
if not self.connected.is_set():
try:
self.lock.acquire()
# Another thread may have connected while we were
# waiting to acquire the lock
if not self.connected.is_set():
self._do_connect()
if self.keepalive:
self._transport.set_keepalive(self.keepalive)
self.connected.set()
except GerritError:
raise
finally:
self.lock.release()
|
def function[_connect, parameter[self]]:
constant[ Connect to the remote if not already connected. ]
if <ast.UnaryOp object at 0x7da1b1019ab0> begin[:]
<ast.Try object at 0x7da1b1018d60>
|
keyword[def] identifier[_connect] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[connected] . identifier[is_set] ():
keyword[try] :
identifier[self] . identifier[lock] . identifier[acquire] ()
keyword[if] keyword[not] identifier[self] . identifier[connected] . identifier[is_set] ():
identifier[self] . identifier[_do_connect] ()
keyword[if] identifier[self] . identifier[keepalive] :
identifier[self] . identifier[_transport] . identifier[set_keepalive] ( identifier[self] . identifier[keepalive] )
identifier[self] . identifier[connected] . identifier[set] ()
keyword[except] identifier[GerritError] :
keyword[raise]
keyword[finally] :
identifier[self] . identifier[lock] . identifier[release] ()
|
def _connect(self):
""" Connect to the remote if not already connected. """
if not self.connected.is_set():
try:
self.lock.acquire()
# Another thread may have connected while we were
# waiting to acquire the lock
if not self.connected.is_set():
self._do_connect()
if self.keepalive:
self._transport.set_keepalive(self.keepalive) # depends on [control=['if'], data=[]]
self.connected.set() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except GerritError:
raise # depends on [control=['except'], data=[]]
finally:
self.lock.release() # depends on [control=['if'], data=[]]
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict()
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version
if hasattr(self, 'tables') and self.tables is not None:
_dict['tables'] = [x._to_dict() for x in self.tables]
return _dict
|
def function[_to_dict, parameter[self]]:
constant[Return a json dictionary representing this model.]
variable[_dict] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da204623070> begin[:]
call[name[_dict]][constant[document]] assign[=] call[name[self].document._to_dict, parameter[]]
if <ast.BoolOp object at 0x7da18bcc8c40> begin[:]
call[name[_dict]][constant[model_id]] assign[=] name[self].model_id
if <ast.BoolOp object at 0x7da18bcc8070> begin[:]
call[name[_dict]][constant[model_version]] assign[=] name[self].model_version
if <ast.BoolOp object at 0x7da18bcc84f0> begin[:]
call[name[_dict]][constant[tables]] assign[=] <ast.ListComp object at 0x7da18fe93c40>
return[name[_dict]]
|
keyword[def] identifier[_to_dict] ( identifier[self] ):
literal[string]
identifier[_dict] ={}
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[document] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[document] . identifier[_to_dict] ()
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[model_id] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[model_id]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[model_version] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[model_version]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[tables] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]=[ identifier[x] . identifier[_to_dict] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[tables] ]
keyword[return] identifier[_dict]
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document') and self.document is not None:
_dict['document'] = self.document._to_dict() # depends on [control=['if'], data=[]]
if hasattr(self, 'model_id') and self.model_id is not None:
_dict['model_id'] = self.model_id # depends on [control=['if'], data=[]]
if hasattr(self, 'model_version') and self.model_version is not None:
_dict['model_version'] = self.model_version # depends on [control=['if'], data=[]]
if hasattr(self, 'tables') and self.tables is not None:
_dict['tables'] = [x._to_dict() for x in self.tables] # depends on [control=['if'], data=[]]
return _dict
|
def _condition_number(self):
"""Condition number of x; ratio of largest to smallest eigenvalue."""
ev = np.linalg.eig(np.matmul(self.xwins.swapaxes(1, 2), self.xwins))[0]
return np.sqrt(ev.max(axis=1) / ev.min(axis=1))
|
def function[_condition_number, parameter[self]]:
constant[Condition number of x; ratio of largest to smallest eigenvalue.]
variable[ev] assign[=] call[call[name[np].linalg.eig, parameter[call[name[np].matmul, parameter[call[name[self].xwins.swapaxes, parameter[constant[1], constant[2]]], name[self].xwins]]]]][constant[0]]
return[call[name[np].sqrt, parameter[binary_operation[call[name[ev].max, parameter[]] / call[name[ev].min, parameter[]]]]]]
|
keyword[def] identifier[_condition_number] ( identifier[self] ):
literal[string]
identifier[ev] = identifier[np] . identifier[linalg] . identifier[eig] ( identifier[np] . identifier[matmul] ( identifier[self] . identifier[xwins] . identifier[swapaxes] ( literal[int] , literal[int] ), identifier[self] . identifier[xwins] ))[ literal[int] ]
keyword[return] identifier[np] . identifier[sqrt] ( identifier[ev] . identifier[max] ( identifier[axis] = literal[int] )/ identifier[ev] . identifier[min] ( identifier[axis] = literal[int] ))
|
def _condition_number(self):
"""Condition number of x; ratio of largest to smallest eigenvalue."""
ev = np.linalg.eig(np.matmul(self.xwins.swapaxes(1, 2), self.xwins))[0]
return np.sqrt(ev.max(axis=1) / ev.min(axis=1))
|
def make_all_dirs(path, mode=0o777):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
return path
|
def function[make_all_dirs, parameter[path, mode]]:
constant[
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
]
<ast.Try object at 0x7da1b04ed420>
return[name[path]]
|
keyword[def] identifier[make_all_dirs] ( identifier[path] , identifier[mode] = literal[int] ):
literal[string]
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[path] , identifier[mode] = identifier[mode] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[EEXIST] keyword[and] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ):
keyword[pass]
keyword[else] :
keyword[raise]
keyword[return] identifier[path]
|
def make_all_dirs(path, mode=511):
"""
Ensure local dir, with all its parent dirs, are created.
Unlike os.makedirs(), will not fail if the path already exists.
"""
# Avoid races inherent to doing this in two steps (check then create).
# Python 3 has exist_ok but the approach below works for Python 2+3.
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path, mode=mode) # depends on [control=['try'], data=[]]
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']]
return path
|
def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, xtol=None, ftol=None, gtol=None):
"""
Optimisation through Scaled Conjugate Gradients (SCG)
f: the objective function
gradf : the gradient function (should return a 1D np.ndarray)
x : the initial condition
Returns
x the optimal value for x
flog : a list of all the objective values
function_eval number of fn evaluations
status: string describing convergence status
"""
if xtol is None:
xtol = 1e-6
if ftol is None:
ftol = 1e-6
if gtol is None:
gtol = 1e-5
sigma0 = 1.0e-7
fold = f(x, *optargs) # Initial function value.
function_eval = 1
fnow = fold
gradnew = gradf(x, *optargs) # Initial gradient.
function_eval += 1
#if any(np.isnan(gradnew)):
# raise UnexpectedInfOrNan, "Gradient contribution resulted in a NaN value"
current_grad = np.dot(gradnew, gradnew)
gradold = gradnew.copy()
d = -gradnew # Initial search direction.
success = True # Force calculation of directional derivs.
nsuccess = 0 # nsuccess counts number of successes.
beta = 1.0 # Initial scale parameter.
betamin = 1.0e-15 # Lower bound on scale.
betamax = 1.0e15 # Upper bound on scale.
status = "Not converged"
flog = [fold]
iteration = 0
# Main optimization loop.
while iteration < maxiters:
# Calculate first and second directional derivatives.
if success:
mu = np.dot(d, gradnew)
if mu >= 0: # pragma: no cover
d = -gradnew
mu = np.dot(d, gradnew)
kappa = np.dot(d, d)
sigma = sigma0 / np.sqrt(kappa)
xplus = x + sigma * d
gplus = gradf(xplus, *optargs)
function_eval += 1
theta = np.dot(d, (gplus - gradnew)) / sigma
# Increase effective curvature and evaluate step size alpha.
delta = theta + beta * kappa
if delta <= 0: # pragma: no cover
delta = beta * kappa
beta = beta - theta / kappa
alpha = -mu / delta
# Calculate the comparison ratio.
xnew = x + alpha * d
fnew = f(xnew, *optargs)
function_eval += 1
Delta = 2.*(fnew - fold) / (alpha * mu)
if Delta >= 0.:
success = True
nsuccess += 1
x = xnew
fnow = fnew
else:
success = False
fnow = fold
# Store relevant variables
flog.append(fnow) # Current function value
iteration += 1
if success:
# Test for termination
if (np.abs(fnew - fold) < ftol):
status = 'converged - relative reduction in objective'
break
# return x, flog, function_eval, status
elif (np.max(np.abs(alpha * d)) < xtol):
status = 'converged - relative stepsize'
break
else:
# Update variables for new position
gradold = gradnew
gradnew = gradf(x, *optargs)
function_eval += 1
current_grad = np.dot(gradnew, gradnew)
fold = fnew
# If the gradient is zero then we are done.
if current_grad <= gtol:
status = 'converged - relative reduction in gradient'
break
# return x, flog, function_eval, status
# Adjust beta according to comparison ratio.
if Delta < 0.25:
beta = min(4.0 * beta, betamax)
if Delta > 0.75:
beta = max(0.25 * beta, betamin)
# Update search direction using Polak-Ribiere formula, or re-start
# in direction of negative gradient after nparams steps.
if nsuccess == x.size:
d = -gradnew
beta = 1. # This is not in the original paper
nsuccess = 0
elif success:
Gamma = np.dot(gradold - gradnew, gradnew) / (mu)
d = Gamma * d - gradnew
else:
# If we get here, then we haven't terminated in the given number of
# iterations.
status = "maxiter exceeded"
return x, flog, function_eval, status
|
def function[SCG, parameter[f, gradf, x, optargs, maxiters, max_f_eval, xtol, ftol, gtol]]:
constant[
Optimisation through Scaled Conjugate Gradients (SCG)
f: the objective function
gradf : the gradient function (should return a 1D np.ndarray)
x : the initial condition
Returns
x the optimal value for x
flog : a list of all the objective values
function_eval number of fn evaluations
status: string describing convergence status
]
if compare[name[xtol] is constant[None]] begin[:]
variable[xtol] assign[=] constant[1e-06]
if compare[name[ftol] is constant[None]] begin[:]
variable[ftol] assign[=] constant[1e-06]
if compare[name[gtol] is constant[None]] begin[:]
variable[gtol] assign[=] constant[1e-05]
variable[sigma0] assign[=] constant[1e-07]
variable[fold] assign[=] call[name[f], parameter[name[x], <ast.Starred object at 0x7da1b0ec0e50>]]
variable[function_eval] assign[=] constant[1]
variable[fnow] assign[=] name[fold]
variable[gradnew] assign[=] call[name[gradf], parameter[name[x], <ast.Starred object at 0x7da1b0ec24d0>]]
<ast.AugAssign object at 0x7da1b0ec0f70>
variable[current_grad] assign[=] call[name[np].dot, parameter[name[gradnew], name[gradnew]]]
variable[gradold] assign[=] call[name[gradnew].copy, parameter[]]
variable[d] assign[=] <ast.UnaryOp object at 0x7da1b0ec2800>
variable[success] assign[=] constant[True]
variable[nsuccess] assign[=] constant[0]
variable[beta] assign[=] constant[1.0]
variable[betamin] assign[=] constant[1e-15]
variable[betamax] assign[=] constant[1000000000000000.0]
variable[status] assign[=] constant[Not converged]
variable[flog] assign[=] list[[<ast.Name object at 0x7da1b0ec30d0>]]
variable[iteration] assign[=] constant[0]
while compare[name[iteration] less[<] name[maxiters]] begin[:]
if name[success] begin[:]
variable[mu] assign[=] call[name[np].dot, parameter[name[d], name[gradnew]]]
if compare[name[mu] greater_or_equal[>=] constant[0]] begin[:]
variable[d] assign[=] <ast.UnaryOp object at 0x7da1b0ec3490>
variable[mu] assign[=] call[name[np].dot, parameter[name[d], name[gradnew]]]
variable[kappa] assign[=] call[name[np].dot, parameter[name[d], name[d]]]
variable[sigma] assign[=] binary_operation[name[sigma0] / call[name[np].sqrt, parameter[name[kappa]]]]
variable[xplus] assign[=] binary_operation[name[x] + binary_operation[name[sigma] * name[d]]]
variable[gplus] assign[=] call[name[gradf], parameter[name[xplus], <ast.Starred object at 0x7da1b0ec29e0>]]
<ast.AugAssign object at 0x7da1b0ec0790>
variable[theta] assign[=] binary_operation[call[name[np].dot, parameter[name[d], binary_operation[name[gplus] - name[gradnew]]]] / name[sigma]]
variable[delta] assign[=] binary_operation[name[theta] + binary_operation[name[beta] * name[kappa]]]
if compare[name[delta] less_or_equal[<=] constant[0]] begin[:]
variable[delta] assign[=] binary_operation[name[beta] * name[kappa]]
variable[beta] assign[=] binary_operation[name[beta] - binary_operation[name[theta] / name[kappa]]]
variable[alpha] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b0ec23b0> / name[delta]]
variable[xnew] assign[=] binary_operation[name[x] + binary_operation[name[alpha] * name[d]]]
variable[fnew] assign[=] call[name[f], parameter[name[xnew], <ast.Starred object at 0x7da1b0ec2080>]]
<ast.AugAssign object at 0x7da1b0ec2b00>
variable[Delta] assign[=] binary_operation[binary_operation[constant[2.0] * binary_operation[name[fnew] - name[fold]]] / binary_operation[name[alpha] * name[mu]]]
if compare[name[Delta] greater_or_equal[>=] constant[0.0]] begin[:]
variable[success] assign[=] constant[True]
<ast.AugAssign object at 0x7da1b0ec2650>
variable[x] assign[=] name[xnew]
variable[fnow] assign[=] name[fnew]
call[name[flog].append, parameter[name[fnow]]]
<ast.AugAssign object at 0x7da1b0ec2d70>
if name[success] begin[:]
if compare[call[name[np].abs, parameter[binary_operation[name[fnew] - name[fold]]]] less[<] name[ftol]] begin[:]
variable[status] assign[=] constant[converged - relative reduction in objective]
break
if compare[name[Delta] less[<] constant[0.25]] begin[:]
variable[beta] assign[=] call[name[min], parameter[binary_operation[constant[4.0] * name[beta]], name[betamax]]]
if compare[name[Delta] greater[>] constant[0.75]] begin[:]
variable[beta] assign[=] call[name[max], parameter[binary_operation[constant[0.25] * name[beta]], name[betamin]]]
if compare[name[nsuccess] equal[==] name[x].size] begin[:]
variable[d] assign[=] <ast.UnaryOp object at 0x7da1b0ecfe80>
variable[beta] assign[=] constant[1.0]
variable[nsuccess] assign[=] constant[0]
return[tuple[[<ast.Name object at 0x7da1b0ecf6a0>, <ast.Name object at 0x7da1b0eceb60>, <ast.Name object at 0x7da1b0eceb90>, <ast.Name object at 0x7da1b0eceda0>]]]
|
keyword[def] identifier[SCG] ( identifier[f] , identifier[gradf] , identifier[x] , identifier[optargs] =(), identifier[maxiters] = literal[int] , identifier[max_f_eval] = identifier[np] . identifier[inf] , identifier[xtol] = keyword[None] , identifier[ftol] = keyword[None] , identifier[gtol] = keyword[None] ):
literal[string]
keyword[if] identifier[xtol] keyword[is] keyword[None] :
identifier[xtol] = literal[int]
keyword[if] identifier[ftol] keyword[is] keyword[None] :
identifier[ftol] = literal[int]
keyword[if] identifier[gtol] keyword[is] keyword[None] :
identifier[gtol] = literal[int]
identifier[sigma0] = literal[int]
identifier[fold] = identifier[f] ( identifier[x] ,* identifier[optargs] )
identifier[function_eval] = literal[int]
identifier[fnow] = identifier[fold]
identifier[gradnew] = identifier[gradf] ( identifier[x] ,* identifier[optargs] )
identifier[function_eval] += literal[int]
identifier[current_grad] = identifier[np] . identifier[dot] ( identifier[gradnew] , identifier[gradnew] )
identifier[gradold] = identifier[gradnew] . identifier[copy] ()
identifier[d] =- identifier[gradnew]
identifier[success] = keyword[True]
identifier[nsuccess] = literal[int]
identifier[beta] = literal[int]
identifier[betamin] = literal[int]
identifier[betamax] = literal[int]
identifier[status] = literal[string]
identifier[flog] =[ identifier[fold] ]
identifier[iteration] = literal[int]
keyword[while] identifier[iteration] < identifier[maxiters] :
keyword[if] identifier[success] :
identifier[mu] = identifier[np] . identifier[dot] ( identifier[d] , identifier[gradnew] )
keyword[if] identifier[mu] >= literal[int] :
identifier[d] =- identifier[gradnew]
identifier[mu] = identifier[np] . identifier[dot] ( identifier[d] , identifier[gradnew] )
identifier[kappa] = identifier[np] . identifier[dot] ( identifier[d] , identifier[d] )
identifier[sigma] = identifier[sigma0] / identifier[np] . identifier[sqrt] ( identifier[kappa] )
identifier[xplus] = identifier[x] + identifier[sigma] * identifier[d]
identifier[gplus] = identifier[gradf] ( identifier[xplus] ,* identifier[optargs] )
identifier[function_eval] += literal[int]
identifier[theta] = identifier[np] . identifier[dot] ( identifier[d] ,( identifier[gplus] - identifier[gradnew] ))/ identifier[sigma]
identifier[delta] = identifier[theta] + identifier[beta] * identifier[kappa]
keyword[if] identifier[delta] <= literal[int] :
identifier[delta] = identifier[beta] * identifier[kappa]
identifier[beta] = identifier[beta] - identifier[theta] / identifier[kappa]
identifier[alpha] =- identifier[mu] / identifier[delta]
identifier[xnew] = identifier[x] + identifier[alpha] * identifier[d]
identifier[fnew] = identifier[f] ( identifier[xnew] ,* identifier[optargs] )
identifier[function_eval] += literal[int]
identifier[Delta] = literal[int] *( identifier[fnew] - identifier[fold] )/( identifier[alpha] * identifier[mu] )
keyword[if] identifier[Delta] >= literal[int] :
identifier[success] = keyword[True]
identifier[nsuccess] += literal[int]
identifier[x] = identifier[xnew]
identifier[fnow] = identifier[fnew]
keyword[else] :
identifier[success] = keyword[False]
identifier[fnow] = identifier[fold]
identifier[flog] . identifier[append] ( identifier[fnow] )
identifier[iteration] += literal[int]
keyword[if] identifier[success] :
keyword[if] ( identifier[np] . identifier[abs] ( identifier[fnew] - identifier[fold] )< identifier[ftol] ):
identifier[status] = literal[string]
keyword[break]
keyword[elif] ( identifier[np] . identifier[max] ( identifier[np] . identifier[abs] ( identifier[alpha] * identifier[d] ))< identifier[xtol] ):
identifier[status] = literal[string]
keyword[break]
keyword[else] :
identifier[gradold] = identifier[gradnew]
identifier[gradnew] = identifier[gradf] ( identifier[x] ,* identifier[optargs] )
identifier[function_eval] += literal[int]
identifier[current_grad] = identifier[np] . identifier[dot] ( identifier[gradnew] , identifier[gradnew] )
identifier[fold] = identifier[fnew]
keyword[if] identifier[current_grad] <= identifier[gtol] :
identifier[status] = literal[string]
keyword[break]
keyword[if] identifier[Delta] < literal[int] :
identifier[beta] = identifier[min] ( literal[int] * identifier[beta] , identifier[betamax] )
keyword[if] identifier[Delta] > literal[int] :
identifier[beta] = identifier[max] ( literal[int] * identifier[beta] , identifier[betamin] )
keyword[if] identifier[nsuccess] == identifier[x] . identifier[size] :
identifier[d] =- identifier[gradnew]
identifier[beta] = literal[int]
identifier[nsuccess] = literal[int]
keyword[elif] identifier[success] :
identifier[Gamma] = identifier[np] . identifier[dot] ( identifier[gradold] - identifier[gradnew] , identifier[gradnew] )/( identifier[mu] )
identifier[d] = identifier[Gamma] * identifier[d] - identifier[gradnew]
keyword[else] :
identifier[status] = literal[string]
keyword[return] identifier[x] , identifier[flog] , identifier[function_eval] , identifier[status]
|
def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, xtol=None, ftol=None, gtol=None):
"""
Optimisation through Scaled Conjugate Gradients (SCG)
f: the objective function
gradf : the gradient function (should return a 1D np.ndarray)
x : the initial condition
Returns
x the optimal value for x
flog : a list of all the objective values
function_eval number of fn evaluations
status: string describing convergence status
"""
if xtol is None:
xtol = 1e-06 # depends on [control=['if'], data=['xtol']]
if ftol is None:
ftol = 1e-06 # depends on [control=['if'], data=['ftol']]
if gtol is None:
gtol = 1e-05 # depends on [control=['if'], data=['gtol']]
sigma0 = 1e-07
fold = f(x, *optargs) # Initial function value.
function_eval = 1
fnow = fold
gradnew = gradf(x, *optargs) # Initial gradient.
function_eval += 1
#if any(np.isnan(gradnew)):
# raise UnexpectedInfOrNan, "Gradient contribution resulted in a NaN value"
current_grad = np.dot(gradnew, gradnew)
gradold = gradnew.copy()
d = -gradnew # Initial search direction.
success = True # Force calculation of directional derivs.
nsuccess = 0 # nsuccess counts number of successes.
beta = 1.0 # Initial scale parameter.
betamin = 1e-15 # Lower bound on scale.
betamax = 1000000000000000.0 # Upper bound on scale.
status = 'Not converged'
flog = [fold]
iteration = 0
# Main optimization loop.
while iteration < maxiters:
# Calculate first and second directional derivatives.
if success:
mu = np.dot(d, gradnew)
if mu >= 0: # pragma: no cover
d = -gradnew
mu = np.dot(d, gradnew) # depends on [control=['if'], data=['mu']]
kappa = np.dot(d, d)
sigma = sigma0 / np.sqrt(kappa)
xplus = x + sigma * d
gplus = gradf(xplus, *optargs)
function_eval += 1
theta = np.dot(d, gplus - gradnew) / sigma # depends on [control=['if'], data=[]]
# Increase effective curvature and evaluate step size alpha.
delta = theta + beta * kappa
if delta <= 0: # pragma: no cover
delta = beta * kappa
beta = beta - theta / kappa # depends on [control=['if'], data=['delta']]
alpha = -mu / delta
# Calculate the comparison ratio.
xnew = x + alpha * d
fnew = f(xnew, *optargs)
function_eval += 1
Delta = 2.0 * (fnew - fold) / (alpha * mu)
if Delta >= 0.0:
success = True
nsuccess += 1
x = xnew
fnow = fnew # depends on [control=['if'], data=[]]
else:
success = False
fnow = fold
# Store relevant variables
flog.append(fnow) # Current function value
iteration += 1
if success:
# Test for termination
if np.abs(fnew - fold) < ftol:
status = 'converged - relative reduction in objective'
break # depends on [control=['if'], data=[]]
# return x, flog, function_eval, status
elif np.max(np.abs(alpha * d)) < xtol:
status = 'converged - relative stepsize'
break # depends on [control=['if'], data=[]]
else:
# Update variables for new position
gradold = gradnew
gradnew = gradf(x, *optargs)
function_eval += 1
current_grad = np.dot(gradnew, gradnew)
fold = fnew
# If the gradient is zero then we are done.
if current_grad <= gtol:
status = 'converged - relative reduction in gradient'
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# return x, flog, function_eval, status
# Adjust beta according to comparison ratio.
if Delta < 0.25:
beta = min(4.0 * beta, betamax) # depends on [control=['if'], data=[]]
if Delta > 0.75:
beta = max(0.25 * beta, betamin) # depends on [control=['if'], data=[]]
# Update search direction using Polak-Ribiere formula, or re-start
# in direction of negative gradient after nparams steps.
if nsuccess == x.size:
d = -gradnew
beta = 1.0 # This is not in the original paper
nsuccess = 0 # depends on [control=['if'], data=['nsuccess']]
elif success:
Gamma = np.dot(gradold - gradnew, gradnew) / mu
d = Gamma * d - gradnew # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['iteration']]
else:
# If we get here, then we haven't terminated in the given number of
# iterations.
status = 'maxiter exceeded'
return (x, flog, function_eval, status)
|
def field2choices(self, field, **kwargs):
"""Return the dictionary of OpenAPI field attributes for valid choices definition
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
comparable = [
validator.comparable
for validator in field.validators
if hasattr(validator, "comparable")
]
if comparable:
attributes["enum"] = comparable
else:
choices = [
OrderedSet(validator.choices)
for validator in field.validators
if hasattr(validator, "choices")
]
if choices:
attributes["enum"] = list(functools.reduce(operator.and_, choices))
return attributes
|
def function[field2choices, parameter[self, field]]:
constant[Return the dictionary of OpenAPI field attributes for valid choices definition
:param Field field: A marshmallow field.
:rtype: dict
]
variable[attributes] assign[=] dictionary[[], []]
variable[comparable] assign[=] <ast.ListComp object at 0x7da1b17d7190>
if name[comparable] begin[:]
call[name[attributes]][constant[enum]] assign[=] name[comparable]
return[name[attributes]]
|
keyword[def] identifier[field2choices] ( identifier[self] , identifier[field] ,** identifier[kwargs] ):
literal[string]
identifier[attributes] ={}
identifier[comparable] =[
identifier[validator] . identifier[comparable]
keyword[for] identifier[validator] keyword[in] identifier[field] . identifier[validators]
keyword[if] identifier[hasattr] ( identifier[validator] , literal[string] )
]
keyword[if] identifier[comparable] :
identifier[attributes] [ literal[string] ]= identifier[comparable]
keyword[else] :
identifier[choices] =[
identifier[OrderedSet] ( identifier[validator] . identifier[choices] )
keyword[for] identifier[validator] keyword[in] identifier[field] . identifier[validators]
keyword[if] identifier[hasattr] ( identifier[validator] , literal[string] )
]
keyword[if] identifier[choices] :
identifier[attributes] [ literal[string] ]= identifier[list] ( identifier[functools] . identifier[reduce] ( identifier[operator] . identifier[and_] , identifier[choices] ))
keyword[return] identifier[attributes]
|
def field2choices(self, field, **kwargs):
"""Return the dictionary of OpenAPI field attributes for valid choices definition
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
comparable = [validator.comparable for validator in field.validators if hasattr(validator, 'comparable')]
if comparable:
attributes['enum'] = comparable # depends on [control=['if'], data=[]]
else:
choices = [OrderedSet(validator.choices) for validator in field.validators if hasattr(validator, 'choices')]
if choices:
attributes['enum'] = list(functools.reduce(operator.and_, choices)) # depends on [control=['if'], data=[]]
return attributes
|
def kinks(path, tol=1e-8):
"""returns indices of segments that start on a non-differentiable joint."""
kink_list = []
for idx in range(len(path)):
if idx == 0 and not path.isclosed():
continue
try:
u = path[(idx - 1) % len(path)].unit_tangent(1)
v = path[idx].unit_tangent(0)
u_dot_v = u.real*v.real + u.imag*v.imag
flag = False
except ValueError:
flag = True
if flag or abs(u_dot_v - 1) > tol:
kink_list.append(idx)
return kink_list
|
def function[kinks, parameter[path, tol]]:
constant[returns indices of segments that start on a non-differentiable joint.]
variable[kink_list] assign[=] list[[]]
for taget[name[idx]] in starred[call[name[range], parameter[call[name[len], parameter[name[path]]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b2347eb0> begin[:]
continue
<ast.Try object at 0x7da1b2345120>
if <ast.BoolOp object at 0x7da1b2346c80> begin[:]
call[name[kink_list].append, parameter[name[idx]]]
return[name[kink_list]]
|
keyword[def] identifier[kinks] ( identifier[path] , identifier[tol] = literal[int] ):
literal[string]
identifier[kink_list] =[]
keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[len] ( identifier[path] )):
keyword[if] identifier[idx] == literal[int] keyword[and] keyword[not] identifier[path] . identifier[isclosed] ():
keyword[continue]
keyword[try] :
identifier[u] = identifier[path] [( identifier[idx] - literal[int] )% identifier[len] ( identifier[path] )]. identifier[unit_tangent] ( literal[int] )
identifier[v] = identifier[path] [ identifier[idx] ]. identifier[unit_tangent] ( literal[int] )
identifier[u_dot_v] = identifier[u] . identifier[real] * identifier[v] . identifier[real] + identifier[u] . identifier[imag] * identifier[v] . identifier[imag]
identifier[flag] = keyword[False]
keyword[except] identifier[ValueError] :
identifier[flag] = keyword[True]
keyword[if] identifier[flag] keyword[or] identifier[abs] ( identifier[u_dot_v] - literal[int] )> identifier[tol] :
identifier[kink_list] . identifier[append] ( identifier[idx] )
keyword[return] identifier[kink_list]
|
def kinks(path, tol=1e-08):
"""returns indices of segments that start on a non-differentiable joint."""
kink_list = []
for idx in range(len(path)):
if idx == 0 and (not path.isclosed()):
continue # depends on [control=['if'], data=[]]
try:
u = path[(idx - 1) % len(path)].unit_tangent(1)
v = path[idx].unit_tangent(0)
u_dot_v = u.real * v.real + u.imag * v.imag
flag = False # depends on [control=['try'], data=[]]
except ValueError:
flag = True # depends on [control=['except'], data=[]]
if flag or abs(u_dot_v - 1) > tol:
kink_list.append(idx) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['idx']]
return kink_list
|
def _glsa_list_process_output(output):
'''
Process output from glsa_check_list into a dict
Returns a dict containing the glsa id, description, status, and CVEs
'''
ret = dict()
for line in output:
try:
glsa_id, status, desc = line.split(None, 2)
if 'U' in status:
status += ' Not Affected'
elif 'N' in status:
status += ' Might be Affected'
elif 'A' in status:
status += ' Applied (injected)'
if 'CVE' in desc:
desc, cves = desc.rsplit(None, 1)
cves = cves.split(',')
else:
cves = list()
ret[glsa_id] = {'description': desc, 'status': status,
'CVEs': cves}
except ValueError:
pass
return ret
|
def function[_glsa_list_process_output, parameter[output]]:
constant[
Process output from glsa_check_list into a dict
Returns a dict containing the glsa id, description, status, and CVEs
]
variable[ret] assign[=] call[name[dict], parameter[]]
for taget[name[line]] in starred[name[output]] begin[:]
<ast.Try object at 0x7da1b1c286a0>
return[name[ret]]
|
keyword[def] identifier[_glsa_list_process_output] ( identifier[output] ):
literal[string]
identifier[ret] = identifier[dict] ()
keyword[for] identifier[line] keyword[in] identifier[output] :
keyword[try] :
identifier[glsa_id] , identifier[status] , identifier[desc] = identifier[line] . identifier[split] ( keyword[None] , literal[int] )
keyword[if] literal[string] keyword[in] identifier[status] :
identifier[status] += literal[string]
keyword[elif] literal[string] keyword[in] identifier[status] :
identifier[status] += literal[string]
keyword[elif] literal[string] keyword[in] identifier[status] :
identifier[status] += literal[string]
keyword[if] literal[string] keyword[in] identifier[desc] :
identifier[desc] , identifier[cves] = identifier[desc] . identifier[rsplit] ( keyword[None] , literal[int] )
identifier[cves] = identifier[cves] . identifier[split] ( literal[string] )
keyword[else] :
identifier[cves] = identifier[list] ()
identifier[ret] [ identifier[glsa_id] ]={ literal[string] : identifier[desc] , literal[string] : identifier[status] ,
literal[string] : identifier[cves] }
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[ret]
|
def _glsa_list_process_output(output):
"""
Process output from glsa_check_list into a dict
Returns a dict containing the glsa id, description, status, and CVEs
"""
ret = dict()
for line in output:
try:
(glsa_id, status, desc) = line.split(None, 2)
if 'U' in status:
status += ' Not Affected' # depends on [control=['if'], data=['status']]
elif 'N' in status:
status += ' Might be Affected' # depends on [control=['if'], data=['status']]
elif 'A' in status:
status += ' Applied (injected)' # depends on [control=['if'], data=['status']]
if 'CVE' in desc:
(desc, cves) = desc.rsplit(None, 1)
cves = cves.split(',') # depends on [control=['if'], data=['desc']]
else:
cves = list()
ret[glsa_id] = {'description': desc, 'status': status, 'CVEs': cves} # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['line']]
return ret
|
def update_line(self, trace, xdata, ydata, side='left', draw=False,
update_limits=True):
""" update a single trace, for faster redraw """
x = self.conf.get_mpl_line(trace)
x.set_data(xdata, ydata)
datarange = [xdata.min(), xdata.max(), ydata.min(), ydata.max()]
self.conf.set_trace_datarange(datarange, trace=trace)
axes = self.axes
if side == 'right':
axes = self.get_right_axes()
if update_limits:
self.set_viewlimits()
if draw:
self.draw()
|
def function[update_line, parameter[self, trace, xdata, ydata, side, draw, update_limits]]:
constant[ update a single trace, for faster redraw ]
variable[x] assign[=] call[name[self].conf.get_mpl_line, parameter[name[trace]]]
call[name[x].set_data, parameter[name[xdata], name[ydata]]]
variable[datarange] assign[=] list[[<ast.Call object at 0x7da2041dad70>, <ast.Call object at 0x7da2041dbac0>, <ast.Call object at 0x7da2041d95a0>, <ast.Call object at 0x7da2041db880>]]
call[name[self].conf.set_trace_datarange, parameter[name[datarange]]]
variable[axes] assign[=] name[self].axes
if compare[name[side] equal[==] constant[right]] begin[:]
variable[axes] assign[=] call[name[self].get_right_axes, parameter[]]
if name[update_limits] begin[:]
call[name[self].set_viewlimits, parameter[]]
if name[draw] begin[:]
call[name[self].draw, parameter[]]
|
keyword[def] identifier[update_line] ( identifier[self] , identifier[trace] , identifier[xdata] , identifier[ydata] , identifier[side] = literal[string] , identifier[draw] = keyword[False] ,
identifier[update_limits] = keyword[True] ):
literal[string]
identifier[x] = identifier[self] . identifier[conf] . identifier[get_mpl_line] ( identifier[trace] )
identifier[x] . identifier[set_data] ( identifier[xdata] , identifier[ydata] )
identifier[datarange] =[ identifier[xdata] . identifier[min] (), identifier[xdata] . identifier[max] (), identifier[ydata] . identifier[min] (), identifier[ydata] . identifier[max] ()]
identifier[self] . identifier[conf] . identifier[set_trace_datarange] ( identifier[datarange] , identifier[trace] = identifier[trace] )
identifier[axes] = identifier[self] . identifier[axes]
keyword[if] identifier[side] == literal[string] :
identifier[axes] = identifier[self] . identifier[get_right_axes] ()
keyword[if] identifier[update_limits] :
identifier[self] . identifier[set_viewlimits] ()
keyword[if] identifier[draw] :
identifier[self] . identifier[draw] ()
|
def update_line(self, trace, xdata, ydata, side='left', draw=False, update_limits=True):
""" update a single trace, for faster redraw """
x = self.conf.get_mpl_line(trace)
x.set_data(xdata, ydata)
datarange = [xdata.min(), xdata.max(), ydata.min(), ydata.max()]
self.conf.set_trace_datarange(datarange, trace=trace)
axes = self.axes
if side == 'right':
axes = self.get_right_axes() # depends on [control=['if'], data=[]]
if update_limits:
self.set_viewlimits() # depends on [control=['if'], data=[]]
if draw:
self.draw() # depends on [control=['if'], data=[]]
|
def transitions(self, return_matrix=True):
"""Returns the routing probabilities for each vertex in the
graph.
Parameters
----------
return_matrix : bool (optional, the default is ``True``)
Specifies whether an :class:`~numpy.ndarray` is returned.
If ``False``, a dict is returned instead.
Returns
-------
out : a dict or :class:`~numpy.ndarray`
The transition probabilities for each vertex in the graph.
If ``out`` is an :class:`~numpy.ndarray`, then
``out[v, u]`` returns the probability of a transition from
vertex ``v`` to vertex ``u``. If ``out`` is a dict
then ``out_edge[v][u]`` is the probability of moving from
vertex ``v`` to the vertex ``u``.
Examples
--------
Lets change the routing probabilities:
>>> import queueing_tool as qt
>>> import networkx as nx
>>> g = nx.sedgewick_maze_graph()
>>> net = qt.QueueNetwork(g)
Below is an adjacency list for the graph ``g``.
>>> ans = qt.graph2dict(g, False)
>>> {k: sorted(v) for k, v in ans.items()}
... # doctest: +NORMALIZE_WHITESPACE
{0: [2, 5, 7],
1: [7],
2: [0, 6],
3: [4, 5],
4: [3, 5, 6, 7],
5: [0, 3, 4],
6: [2, 4],
7: [0, 1, 4]}
The default transition matrix is every out edge being equally
likely:
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 0.333..., 5: 0.333..., 7: 0.333...},
1: {7: 1.0},
2: {0: 0.5, 6: 0.5},
3: {4: 0.5, 5: 0.5},
4: {3: 0.25, 5: 0.25, 6: 0.25, 7: 0.25},
5: {0: 0.333..., 3: 0.333..., 4: 0.333...},
6: {2: 0.5, 4: 0.5},
7: {0: 0.333..., 1: 0.333..., 4: 0.333...}}
Now we will generate a random routing matrix:
>>> mat = qt.generate_transition_matrix(g, seed=96)
>>> net.set_transitions(mat)
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 0.112..., 5: 0.466..., 7: 0.420...},
1: {7: 1.0},
2: {0: 0.561..., 6: 0.438...},
3: {4: 0.545..., 5: 0.454...},
4: {3: 0.374..., 5: 0.381..., 6: 0.026..., 7: 0.217...},
5: {0: 0.265..., 3: 0.460..., 4: 0.274...},
6: {2: 0.673..., 4: 0.326...},
7: {0: 0.033..., 1: 0.336..., 4: 0.630...}}
What this shows is the following: when an :class:`.Agent` is at
vertex ``2`` they will transition to vertex ``0`` with
probability ``0.561`` and route to vertex ``6`` probability
``0.438``, when at vertex ``6`` they will transition back to
vertex ``2`` with probability ``0.673`` and route vertex ``4``
probability ``0.326``, etc.
"""
if return_matrix:
mat = np.zeros((self.nV, self.nV))
for v in self.g.nodes():
ind = [e[1] for e in sorted(self.g.out_edges(v))]
mat[v, ind] = self._route_probs[v]
else:
mat = {
k: {e[1]: p for e, p in zip(sorted(self.g.out_edges(k)), value)}
for k, value in enumerate(self._route_probs)
}
return mat
|
def function[transitions, parameter[self, return_matrix]]:
constant[Returns the routing probabilities for each vertex in the
graph.
Parameters
----------
return_matrix : bool (optional, the default is ``True``)
Specifies whether an :class:`~numpy.ndarray` is returned.
If ``False``, a dict is returned instead.
Returns
-------
out : a dict or :class:`~numpy.ndarray`
The transition probabilities for each vertex in the graph.
If ``out`` is an :class:`~numpy.ndarray`, then
``out[v, u]`` returns the probability of a transition from
vertex ``v`` to vertex ``u``. If ``out`` is a dict
then ``out_edge[v][u]`` is the probability of moving from
vertex ``v`` to the vertex ``u``.
Examples
--------
Lets change the routing probabilities:
>>> import queueing_tool as qt
>>> import networkx as nx
>>> g = nx.sedgewick_maze_graph()
>>> net = qt.QueueNetwork(g)
Below is an adjacency list for the graph ``g``.
>>> ans = qt.graph2dict(g, False)
>>> {k: sorted(v) for k, v in ans.items()}
... # doctest: +NORMALIZE_WHITESPACE
{0: [2, 5, 7],
1: [7],
2: [0, 6],
3: [4, 5],
4: [3, 5, 6, 7],
5: [0, 3, 4],
6: [2, 4],
7: [0, 1, 4]}
The default transition matrix is every out edge being equally
likely:
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 0.333..., 5: 0.333..., 7: 0.333...},
1: {7: 1.0},
2: {0: 0.5, 6: 0.5},
3: {4: 0.5, 5: 0.5},
4: {3: 0.25, 5: 0.25, 6: 0.25, 7: 0.25},
5: {0: 0.333..., 3: 0.333..., 4: 0.333...},
6: {2: 0.5, 4: 0.5},
7: {0: 0.333..., 1: 0.333..., 4: 0.333...}}
Now we will generate a random routing matrix:
>>> mat = qt.generate_transition_matrix(g, seed=96)
>>> net.set_transitions(mat)
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 0.112..., 5: 0.466..., 7: 0.420...},
1: {7: 1.0},
2: {0: 0.561..., 6: 0.438...},
3: {4: 0.545..., 5: 0.454...},
4: {3: 0.374..., 5: 0.381..., 6: 0.026..., 7: 0.217...},
5: {0: 0.265..., 3: 0.460..., 4: 0.274...},
6: {2: 0.673..., 4: 0.326...},
7: {0: 0.033..., 1: 0.336..., 4: 0.630...}}
What this shows is the following: when an :class:`.Agent` is at
vertex ``2`` they will transition to vertex ``0`` with
probability ``0.561`` and route to vertex ``6`` probability
``0.438``, when at vertex ``6`` they will transition back to
vertex ``2`` with probability ``0.673`` and route vertex ``4``
probability ``0.326``, etc.
]
if name[return_matrix] begin[:]
variable[mat] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Attribute object at 0x7da1b0054b50>, <ast.Attribute object at 0x7da1b00565f0>]]]]
for taget[name[v]] in starred[call[name[self].g.nodes, parameter[]]] begin[:]
variable[ind] assign[=] <ast.ListComp object at 0x7da1b0057a00>
call[name[mat]][tuple[[<ast.Name object at 0x7da1b012f280>, <ast.Name object at 0x7da1b012f250>]]] assign[=] call[name[self]._route_probs][name[v]]
return[name[mat]]
|
keyword[def] identifier[transitions] ( identifier[self] , identifier[return_matrix] = keyword[True] ):
literal[string]
keyword[if] identifier[return_matrix] :
identifier[mat] = identifier[np] . identifier[zeros] (( identifier[self] . identifier[nV] , identifier[self] . identifier[nV] ))
keyword[for] identifier[v] keyword[in] identifier[self] . identifier[g] . identifier[nodes] ():
identifier[ind] =[ identifier[e] [ literal[int] ] keyword[for] identifier[e] keyword[in] identifier[sorted] ( identifier[self] . identifier[g] . identifier[out_edges] ( identifier[v] ))]
identifier[mat] [ identifier[v] , identifier[ind] ]= identifier[self] . identifier[_route_probs] [ identifier[v] ]
keyword[else] :
identifier[mat] ={
identifier[k] :{ identifier[e] [ literal[int] ]: identifier[p] keyword[for] identifier[e] , identifier[p] keyword[in] identifier[zip] ( identifier[sorted] ( identifier[self] . identifier[g] . identifier[out_edges] ( identifier[k] )), identifier[value] )}
keyword[for] identifier[k] , identifier[value] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_route_probs] )
}
keyword[return] identifier[mat]
|
def transitions(self, return_matrix=True):
"""Returns the routing probabilities for each vertex in the
graph.
Parameters
----------
return_matrix : bool (optional, the default is ``True``)
Specifies whether an :class:`~numpy.ndarray` is returned.
If ``False``, a dict is returned instead.
Returns
-------
out : a dict or :class:`~numpy.ndarray`
The transition probabilities for each vertex in the graph.
If ``out`` is an :class:`~numpy.ndarray`, then
``out[v, u]`` returns the probability of a transition from
vertex ``v`` to vertex ``u``. If ``out`` is a dict
then ``out_edge[v][u]`` is the probability of moving from
vertex ``v`` to the vertex ``u``.
Examples
--------
Lets change the routing probabilities:
>>> import queueing_tool as qt
>>> import networkx as nx
>>> g = nx.sedgewick_maze_graph()
>>> net = qt.QueueNetwork(g)
Below is an adjacency list for the graph ``g``.
>>> ans = qt.graph2dict(g, False)
>>> {k: sorted(v) for k, v in ans.items()}
... # doctest: +NORMALIZE_WHITESPACE
{0: [2, 5, 7],
1: [7],
2: [0, 6],
3: [4, 5],
4: [3, 5, 6, 7],
5: [0, 3, 4],
6: [2, 4],
7: [0, 1, 4]}
The default transition matrix is every out edge being equally
likely:
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 0.333..., 5: 0.333..., 7: 0.333...},
1: {7: 1.0},
2: {0: 0.5, 6: 0.5},
3: {4: 0.5, 5: 0.5},
4: {3: 0.25, 5: 0.25, 6: 0.25, 7: 0.25},
5: {0: 0.333..., 3: 0.333..., 4: 0.333...},
6: {2: 0.5, 4: 0.5},
7: {0: 0.333..., 1: 0.333..., 4: 0.333...}}
Now we will generate a random routing matrix:
>>> mat = qt.generate_transition_matrix(g, seed=96)
>>> net.set_transitions(mat)
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 0.112..., 5: 0.466..., 7: 0.420...},
1: {7: 1.0},
2: {0: 0.561..., 6: 0.438...},
3: {4: 0.545..., 5: 0.454...},
4: {3: 0.374..., 5: 0.381..., 6: 0.026..., 7: 0.217...},
5: {0: 0.265..., 3: 0.460..., 4: 0.274...},
6: {2: 0.673..., 4: 0.326...},
7: {0: 0.033..., 1: 0.336..., 4: 0.630...}}
What this shows is the following: when an :class:`.Agent` is at
vertex ``2`` they will transition to vertex ``0`` with
probability ``0.561`` and route to vertex ``6`` probability
``0.438``, when at vertex ``6`` they will transition back to
vertex ``2`` with probability ``0.673`` and route vertex ``4``
probability ``0.326``, etc.
"""
if return_matrix:
mat = np.zeros((self.nV, self.nV))
for v in self.g.nodes():
ind = [e[1] for e in sorted(self.g.out_edges(v))]
mat[v, ind] = self._route_probs[v] # depends on [control=['for'], data=['v']] # depends on [control=['if'], data=[]]
else:
mat = {k: {e[1]: p for (e, p) in zip(sorted(self.g.out_edges(k)), value)} for (k, value) in enumerate(self._route_probs)}
return mat
|
def run(self):
"""
executes the script
:return: boolean if execution of script finished succesfully
"""
self.log_data.clear()
self._plot_refresh = True # flag that requests that plot axes are refreshed when self.plot is called next time
self.is_running = True
self.start_time = datetime.datetime.now()
self._current_subscript_stage = {
'current_subscript': None,
'subscript_exec_count':{},
'subscript_exec_duration':{}
}
# update the datapath of the subscripts, connect their progress signal to the receive slot
for subscript in list(self.scripts.values()):
subscript.data_path = os.path.join(self.filename(create_if_not_existing=False), self.SUBSCRIPT_DATA_DIR)
subscript.updateProgress.connect(self._receive_signal)
subscript.started.connect(lambda: self._set_current_subscript(True))
subscript.finished.connect(lambda: self._set_current_subscript(False))
self._current_subscript_stage['subscript_exec_count'].update({subscript.name:0})
self._current_subscript_stage['subscript_exec_duration'].update({subscript.name: datetime.timedelta(0)})
#todo: 170202JG (search for this to find related todos) need to test this:
# do we need to connect the log functions of the subscript to the mother script?, e.g
# subscript.log.connect(self.log)
self.log('starting script {:s} at {:s} on {:s}'.format(self.name, self.start_time.strftime('%H:%M:%S'),self.start_time.strftime('%d/%m/%y')))
self._abort = False
#saves standard to disk
if self.settings['save']:
self.save_b26()
self.started.emit()
self._function()
self.end_time = datetime.datetime.now()
self.log('script {:s} finished at {:s} on {:s}'.format(self.name, self.end_time.strftime('%H:%M:%S'),self.end_time.strftime('%d/%m/%y')))
#saves standard to disk
if self.settings['save']:
self.save_data()
self.save_log()
self.save_image_to_disk()
success = not self._abort
# disconnect subscripts
for subscript in list(self.scripts.values()):
subscript.started.disconnect()
subscript.updateProgress.disconnect()
subscript.finished.disconnect()
self.is_running = False
self.finished.emit()
|
def function[run, parameter[self]]:
constant[
executes the script
:return: boolean if execution of script finished succesfully
]
call[name[self].log_data.clear, parameter[]]
name[self]._plot_refresh assign[=] constant[True]
name[self].is_running assign[=] constant[True]
name[self].start_time assign[=] call[name[datetime].datetime.now, parameter[]]
name[self]._current_subscript_stage assign[=] dictionary[[<ast.Constant object at 0x7da1b2473910>, <ast.Constant object at 0x7da1b2472920>, <ast.Constant object at 0x7da1b2472b60>], [<ast.Constant object at 0x7da1b24732e0>, <ast.Dict object at 0x7da1b2473d60>, <ast.Dict object at 0x7da1b2473640>]]
for taget[name[subscript]] in starred[call[name[list], parameter[call[name[self].scripts.values, parameter[]]]]] begin[:]
name[subscript].data_path assign[=] call[name[os].path.join, parameter[call[name[self].filename, parameter[]], name[self].SUBSCRIPT_DATA_DIR]]
call[name[subscript].updateProgress.connect, parameter[name[self]._receive_signal]]
call[name[subscript].started.connect, parameter[<ast.Lambda object at 0x7da1b2473bb0>]]
call[name[subscript].finished.connect, parameter[<ast.Lambda object at 0x7da1b24739a0>]]
call[call[name[self]._current_subscript_stage][constant[subscript_exec_count]].update, parameter[dictionary[[<ast.Attribute object at 0x7da1b24738b0>], [<ast.Constant object at 0x7da1b2472680>]]]]
call[call[name[self]._current_subscript_stage][constant[subscript_exec_duration]].update, parameter[dictionary[[<ast.Attribute object at 0x7da1b2448370>], [<ast.Call object at 0x7da1b244bca0>]]]]
call[name[self].log, parameter[call[constant[starting script {:s} at {:s} on {:s}].format, parameter[name[self].name, call[name[self].start_time.strftime, parameter[constant[%H:%M:%S]]], call[name[self].start_time.strftime, parameter[constant[%d/%m/%y]]]]]]]
name[self]._abort assign[=] constant[False]
if call[name[self].settings][constant[save]] begin[:]
call[name[self].save_b26, parameter[]]
call[name[self].started.emit, parameter[]]
call[name[self]._function, parameter[]]
name[self].end_time assign[=] call[name[datetime].datetime.now, parameter[]]
call[name[self].log, parameter[call[constant[script {:s} finished at {:s} on {:s}].format, parameter[name[self].name, call[name[self].end_time.strftime, parameter[constant[%H:%M:%S]]], call[name[self].end_time.strftime, parameter[constant[%d/%m/%y]]]]]]]
if call[name[self].settings][constant[save]] begin[:]
call[name[self].save_data, parameter[]]
call[name[self].save_log, parameter[]]
call[name[self].save_image_to_disk, parameter[]]
variable[success] assign[=] <ast.UnaryOp object at 0x7da1b2490d00>
for taget[name[subscript]] in starred[call[name[list], parameter[call[name[self].scripts.values, parameter[]]]]] begin[:]
call[name[subscript].started.disconnect, parameter[]]
call[name[subscript].updateProgress.disconnect, parameter[]]
call[name[subscript].finished.disconnect, parameter[]]
name[self].is_running assign[=] constant[False]
call[name[self].finished.emit, parameter[]]
|
keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[self] . identifier[log_data] . identifier[clear] ()
identifier[self] . identifier[_plot_refresh] = keyword[True]
identifier[self] . identifier[is_running] = keyword[True]
identifier[self] . identifier[start_time] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[self] . identifier[_current_subscript_stage] ={
literal[string] : keyword[None] ,
literal[string] :{},
literal[string] :{}
}
keyword[for] identifier[subscript] keyword[in] identifier[list] ( identifier[self] . identifier[scripts] . identifier[values] ()):
identifier[subscript] . identifier[data_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[filename] ( identifier[create_if_not_existing] = keyword[False] ), identifier[self] . identifier[SUBSCRIPT_DATA_DIR] )
identifier[subscript] . identifier[updateProgress] . identifier[connect] ( identifier[self] . identifier[_receive_signal] )
identifier[subscript] . identifier[started] . identifier[connect] ( keyword[lambda] : identifier[self] . identifier[_set_current_subscript] ( keyword[True] ))
identifier[subscript] . identifier[finished] . identifier[connect] ( keyword[lambda] : identifier[self] . identifier[_set_current_subscript] ( keyword[False] ))
identifier[self] . identifier[_current_subscript_stage] [ literal[string] ]. identifier[update] ({ identifier[subscript] . identifier[name] : literal[int] })
identifier[self] . identifier[_current_subscript_stage] [ literal[string] ]. identifier[update] ({ identifier[subscript] . identifier[name] : identifier[datetime] . identifier[timedelta] ( literal[int] )})
identifier[self] . identifier[log] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[self] . identifier[start_time] . identifier[strftime] ( literal[string] ), identifier[self] . identifier[start_time] . identifier[strftime] ( literal[string] )))
identifier[self] . identifier[_abort] = keyword[False]
keyword[if] identifier[self] . identifier[settings] [ literal[string] ]:
identifier[self] . identifier[save_b26] ()
identifier[self] . identifier[started] . identifier[emit] ()
identifier[self] . identifier[_function] ()
identifier[self] . identifier[end_time] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[self] . identifier[log] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[self] . identifier[end_time] . identifier[strftime] ( literal[string] ), identifier[self] . identifier[end_time] . identifier[strftime] ( literal[string] )))
keyword[if] identifier[self] . identifier[settings] [ literal[string] ]:
identifier[self] . identifier[save_data] ()
identifier[self] . identifier[save_log] ()
identifier[self] . identifier[save_image_to_disk] ()
identifier[success] = keyword[not] identifier[self] . identifier[_abort]
keyword[for] identifier[subscript] keyword[in] identifier[list] ( identifier[self] . identifier[scripts] . identifier[values] ()):
identifier[subscript] . identifier[started] . identifier[disconnect] ()
identifier[subscript] . identifier[updateProgress] . identifier[disconnect] ()
identifier[subscript] . identifier[finished] . identifier[disconnect] ()
identifier[self] . identifier[is_running] = keyword[False]
identifier[self] . identifier[finished] . identifier[emit] ()
|
def run(self):
"""
executes the script
:return: boolean if execution of script finished succesfully
"""
self.log_data.clear()
self._plot_refresh = True # flag that requests that plot axes are refreshed when self.plot is called next time
self.is_running = True
self.start_time = datetime.datetime.now()
self._current_subscript_stage = {'current_subscript': None, 'subscript_exec_count': {}, 'subscript_exec_duration': {}}
# update the datapath of the subscripts, connect their progress signal to the receive slot
for subscript in list(self.scripts.values()):
subscript.data_path = os.path.join(self.filename(create_if_not_existing=False), self.SUBSCRIPT_DATA_DIR)
subscript.updateProgress.connect(self._receive_signal)
subscript.started.connect(lambda : self._set_current_subscript(True))
subscript.finished.connect(lambda : self._set_current_subscript(False))
self._current_subscript_stage['subscript_exec_count'].update({subscript.name: 0})
self._current_subscript_stage['subscript_exec_duration'].update({subscript.name: datetime.timedelta(0)}) # depends on [control=['for'], data=['subscript']]
#todo: 170202JG (search for this to find related todos) need to test this:
# do we need to connect the log functions of the subscript to the mother script?, e.g
# subscript.log.connect(self.log)
self.log('starting script {:s} at {:s} on {:s}'.format(self.name, self.start_time.strftime('%H:%M:%S'), self.start_time.strftime('%d/%m/%y')))
self._abort = False
#saves standard to disk
if self.settings['save']:
self.save_b26() # depends on [control=['if'], data=[]]
self.started.emit()
self._function()
self.end_time = datetime.datetime.now()
self.log('script {:s} finished at {:s} on {:s}'.format(self.name, self.end_time.strftime('%H:%M:%S'), self.end_time.strftime('%d/%m/%y')))
#saves standard to disk
if self.settings['save']:
self.save_data()
self.save_log()
self.save_image_to_disk() # depends on [control=['if'], data=[]]
success = not self._abort
# disconnect subscripts
for subscript in list(self.scripts.values()):
subscript.started.disconnect()
subscript.updateProgress.disconnect()
subscript.finished.disconnect() # depends on [control=['for'], data=['subscript']]
self.is_running = False
self.finished.emit()
|
def cancel(self):
"""Cancel a running :meth:`iterconsume` session."""
if self.channel_open:
try:
self.backend.cancel(self.consumer_tag)
except KeyError:
pass
|
def function[cancel, parameter[self]]:
constant[Cancel a running :meth:`iterconsume` session.]
if name[self].channel_open begin[:]
<ast.Try object at 0x7da1b0facc70>
|
keyword[def] identifier[cancel] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[channel_open] :
keyword[try] :
identifier[self] . identifier[backend] . identifier[cancel] ( identifier[self] . identifier[consumer_tag] )
keyword[except] identifier[KeyError] :
keyword[pass]
|
def cancel(self):
"""Cancel a running :meth:`iterconsume` session."""
if self.channel_open:
try:
self.backend.cancel(self.consumer_tag) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
|
def float_to_knx2(floatval):
"""Convert a float to a 2 byte KNX float value"""
if floatval < -671088.64 or floatval > 670760.96:
raise KNXException("float {} out of valid range".format(floatval))
floatval = floatval * 100
i = 0
for i in range(0, 15):
exp = pow(2, i)
if ((floatval / exp) >= -2048) and ((floatval / exp) < 2047):
break
if floatval < 0:
sign = 1
mantisse = int(2048 + (floatval / exp))
else:
sign = 0
mantisse = int(floatval / exp)
return [(sign << 7) + (i << 3) + (mantisse >> 8),
mantisse & 0xff]
|
def function[float_to_knx2, parameter[floatval]]:
constant[Convert a float to a 2 byte KNX float value]
if <ast.BoolOp object at 0x7da18ede7d30> begin[:]
<ast.Raise object at 0x7da18ede6d10>
variable[floatval] assign[=] binary_operation[name[floatval] * constant[100]]
variable[i] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], constant[15]]]] begin[:]
variable[exp] assign[=] call[name[pow], parameter[constant[2], name[i]]]
if <ast.BoolOp object at 0x7da18ede7490> begin[:]
break
if compare[name[floatval] less[<] constant[0]] begin[:]
variable[sign] assign[=] constant[1]
variable[mantisse] assign[=] call[name[int], parameter[binary_operation[constant[2048] + binary_operation[name[floatval] / name[exp]]]]]
return[list[[<ast.BinOp object at 0x7da18ede4280>, <ast.BinOp object at 0x7da18ede57e0>]]]
|
keyword[def] identifier[float_to_knx2] ( identifier[floatval] ):
literal[string]
keyword[if] identifier[floatval] <- literal[int] keyword[or] identifier[floatval] > literal[int] :
keyword[raise] identifier[KNXException] ( literal[string] . identifier[format] ( identifier[floatval] ))
identifier[floatval] = identifier[floatval] * literal[int]
identifier[i] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[exp] = identifier[pow] ( literal[int] , identifier[i] )
keyword[if] (( identifier[floatval] / identifier[exp] )>=- literal[int] ) keyword[and] (( identifier[floatval] / identifier[exp] )< literal[int] ):
keyword[break]
keyword[if] identifier[floatval] < literal[int] :
identifier[sign] = literal[int]
identifier[mantisse] = identifier[int] ( literal[int] +( identifier[floatval] / identifier[exp] ))
keyword[else] :
identifier[sign] = literal[int]
identifier[mantisse] = identifier[int] ( identifier[floatval] / identifier[exp] )
keyword[return] [( identifier[sign] << literal[int] )+( identifier[i] << literal[int] )+( identifier[mantisse] >> literal[int] ),
identifier[mantisse] & literal[int] ]
|
def float_to_knx2(floatval):
"""Convert a float to a 2 byte KNX float value"""
if floatval < -671088.64 or floatval > 670760.96:
raise KNXException('float {} out of valid range'.format(floatval)) # depends on [control=['if'], data=[]]
floatval = floatval * 100
i = 0
for i in range(0, 15):
exp = pow(2, i)
if floatval / exp >= -2048 and floatval / exp < 2047:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if floatval < 0:
sign = 1
mantisse = int(2048 + floatval / exp) # depends on [control=['if'], data=['floatval']]
else:
sign = 0
mantisse = int(floatval / exp)
return [(sign << 7) + (i << 3) + (mantisse >> 8), mantisse & 255]
|
def headers(self):
"""
Return only the headers as Python object
"""
d = {}
for k, v in self.message.items():
d[k] = decode_header_part(v)
return d
|
def function[headers, parameter[self]]:
constant[
Return only the headers as Python object
]
variable[d] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0719f90>, <ast.Name object at 0x7da1b0719480>]]] in starred[call[name[self].message.items, parameter[]]] begin[:]
call[name[d]][name[k]] assign[=] call[name[decode_header_part], parameter[name[v]]]
return[name[d]]
|
keyword[def] identifier[headers] ( identifier[self] ):
literal[string]
identifier[d] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[message] . identifier[items] ():
identifier[d] [ identifier[k] ]= identifier[decode_header_part] ( identifier[v] )
keyword[return] identifier[d]
|
def headers(self):
"""
Return only the headers as Python object
"""
d = {}
for (k, v) in self.message.items():
d[k] = decode_header_part(v) # depends on [control=['for'], data=[]]
return d
|
def toxml(self, encoding="UTF-8"):
""" Return the manifest as XML """
domtree = self.todom()
# WARNING: The XML declaration has to follow the order
# version-encoding-standalone (standalone being optional), otherwise
# if it is embedded in an exe the exe will fail to launch!
# ('application configuration incorrect')
xmlstr = domtree.toxml(encoding).replace(
'<?xml version="1.0" encoding="%s"?>' % encoding,
'<?xml version="1.0" encoding="%s" standalone="yes"?>' % encoding)
domtree.unlink()
return xmlstr
|
def function[toxml, parameter[self, encoding]]:
constant[ Return the manifest as XML ]
variable[domtree] assign[=] call[name[self].todom, parameter[]]
variable[xmlstr] assign[=] call[call[name[domtree].toxml, parameter[name[encoding]]].replace, parameter[binary_operation[constant[<?xml version="1.0" encoding="%s"?>] <ast.Mod object at 0x7da2590d6920> name[encoding]], binary_operation[constant[<?xml version="1.0" encoding="%s" standalone="yes"?>] <ast.Mod object at 0x7da2590d6920> name[encoding]]]]
call[name[domtree].unlink, parameter[]]
return[name[xmlstr]]
|
keyword[def] identifier[toxml] ( identifier[self] , identifier[encoding] = literal[string] ):
literal[string]
identifier[domtree] = identifier[self] . identifier[todom] ()
identifier[xmlstr] = identifier[domtree] . identifier[toxml] ( identifier[encoding] ). identifier[replace] (
literal[string] % identifier[encoding] ,
literal[string] % identifier[encoding] )
identifier[domtree] . identifier[unlink] ()
keyword[return] identifier[xmlstr]
|
def toxml(self, encoding='UTF-8'):
""" Return the manifest as XML """
domtree = self.todom() # WARNING: The XML declaration has to follow the order
# version-encoding-standalone (standalone being optional), otherwise
# if it is embedded in an exe the exe will fail to launch!
# ('application configuration incorrect')
xmlstr = domtree.toxml(encoding).replace('<?xml version="1.0" encoding="%s"?>' % encoding, '<?xml version="1.0" encoding="%s" standalone="yes"?>' % encoding)
domtree.unlink()
return xmlstr
|
def _check_log_scale(base, sides, scales, coord):
"""
Check the log transforms
Parameters
----------
base : float or None
Base of the logarithm in which the ticks will be
calculated. If ``None``, the base of the log transform
the scale will be used.
sides : str (default: bl)
Sides onto which to draw the marks. Any combination
chosen from the characters ``btlr``, for *bottom*, *top*,
*left* or *right* side marks. If ``coord_flip()`` is used,
these are the sides *after* the flip.
scales : SimpleNamespace
``x`` and ``y`` scales.
coord : coord
Coordinate (e.g. coord_cartesian) system of the geom.
Returns
-------
out : tuple
The bases (base_x, base_y) to use when generating the ticks.
"""
def is_log(trans):
return (trans.__class__.__name__.startswith('log') and
hasattr(trans, 'base'))
base_x, base_y = base, base
x_is_log = is_log(scales.x.trans)
y_is_log = is_log(scales.y.trans)
if isinstance(coord, coord_flip):
x_is_log, y_is_log = y_is_log, x_is_log
if 't' in sides or 'b' in sides:
if base_x is None:
base_x = scales.x.trans.base
if not x_is_log:
warnings.warn(
"annotation_logticks for x-axis which does not have "
"a log scale. The logticks may not make sense.",
PlotnineWarning)
elif x_is_log and base_x != scales.x.trans.base:
warnings.warn(
"The x-axis is log transformed in base {} ,"
"but the annotation_logticks are computed in base {}"
"".format(base_x, scales.x.trans.base),
PlotnineWarning)
if 'l' in sides or 'r' in sides:
if base_y is None:
base_y = scales.y.trans.base
if not y_is_log:
warnings.warn(
"annotation_logticks for y-axis which does not have "
"a log scale. The logticks may not make sense.",
PlotnineWarning)
elif y_is_log and base_y != scales.x.trans.base:
warnings.warn(
"The y-axis is log transformed in base {} ,"
"but the annotation_logticks are computed in base {}"
"".format(base_y, scales.x.trans.base),
PlotnineWarning)
return base_x, base_y
|
def function[_check_log_scale, parameter[base, sides, scales, coord]]:
constant[
Check the log transforms
Parameters
----------
base : float or None
Base of the logarithm in which the ticks will be
calculated. If ``None``, the base of the log transform
the scale will be used.
sides : str (default: bl)
Sides onto which to draw the marks. Any combination
chosen from the characters ``btlr``, for *bottom*, *top*,
*left* or *right* side marks. If ``coord_flip()`` is used,
these are the sides *after* the flip.
scales : SimpleNamespace
``x`` and ``y`` scales.
coord : coord
Coordinate (e.g. coord_cartesian) system of the geom.
Returns
-------
out : tuple
The bases (base_x, base_y) to use when generating the ticks.
]
def function[is_log, parameter[trans]]:
return[<ast.BoolOp object at 0x7da2049617b0>]
<ast.Tuple object at 0x7da204960610> assign[=] tuple[[<ast.Name object at 0x7da204960310>, <ast.Name object at 0x7da204961db0>]]
variable[x_is_log] assign[=] call[name[is_log], parameter[name[scales].x.trans]]
variable[y_is_log] assign[=] call[name[is_log], parameter[name[scales].y.trans]]
if call[name[isinstance], parameter[name[coord], name[coord_flip]]] begin[:]
<ast.Tuple object at 0x7da207f02b90> assign[=] tuple[[<ast.Name object at 0x7da207f01600>, <ast.Name object at 0x7da207f02ad0>]]
if <ast.BoolOp object at 0x7da207f01ed0> begin[:]
if compare[name[base_x] is constant[None]] begin[:]
variable[base_x] assign[=] name[scales].x.trans.base
if <ast.UnaryOp object at 0x7da207f03a60> begin[:]
call[name[warnings].warn, parameter[constant[annotation_logticks for x-axis which does not have a log scale. The logticks may not make sense.], name[PlotnineWarning]]]
if <ast.BoolOp object at 0x7da20e960b80> begin[:]
if compare[name[base_y] is constant[None]] begin[:]
variable[base_y] assign[=] name[scales].y.trans.base
if <ast.UnaryOp object at 0x7da20e961390> begin[:]
call[name[warnings].warn, parameter[constant[annotation_logticks for y-axis which does not have a log scale. The logticks may not make sense.], name[PlotnineWarning]]]
return[tuple[[<ast.Name object at 0x7da20e963b80>, <ast.Name object at 0x7da20e963820>]]]
|
keyword[def] identifier[_check_log_scale] ( identifier[base] , identifier[sides] , identifier[scales] , identifier[coord] ):
literal[string]
keyword[def] identifier[is_log] ( identifier[trans] ):
keyword[return] ( identifier[trans] . identifier[__class__] . identifier[__name__] . identifier[startswith] ( literal[string] ) keyword[and]
identifier[hasattr] ( identifier[trans] , literal[string] ))
identifier[base_x] , identifier[base_y] = identifier[base] , identifier[base]
identifier[x_is_log] = identifier[is_log] ( identifier[scales] . identifier[x] . identifier[trans] )
identifier[y_is_log] = identifier[is_log] ( identifier[scales] . identifier[y] . identifier[trans] )
keyword[if] identifier[isinstance] ( identifier[coord] , identifier[coord_flip] ):
identifier[x_is_log] , identifier[y_is_log] = identifier[y_is_log] , identifier[x_is_log]
keyword[if] literal[string] keyword[in] identifier[sides] keyword[or] literal[string] keyword[in] identifier[sides] :
keyword[if] identifier[base_x] keyword[is] keyword[None] :
identifier[base_x] = identifier[scales] . identifier[x] . identifier[trans] . identifier[base]
keyword[if] keyword[not] identifier[x_is_log] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string] ,
identifier[PlotnineWarning] )
keyword[elif] identifier[x_is_log] keyword[and] identifier[base_x] != identifier[scales] . identifier[x] . identifier[trans] . identifier[base] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[base_x] , identifier[scales] . identifier[x] . identifier[trans] . identifier[base] ),
identifier[PlotnineWarning] )
keyword[if] literal[string] keyword[in] identifier[sides] keyword[or] literal[string] keyword[in] identifier[sides] :
keyword[if] identifier[base_y] keyword[is] keyword[None] :
identifier[base_y] = identifier[scales] . identifier[y] . identifier[trans] . identifier[base]
keyword[if] keyword[not] identifier[y_is_log] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string] ,
identifier[PlotnineWarning] )
keyword[elif] identifier[y_is_log] keyword[and] identifier[base_y] != identifier[scales] . identifier[x] . identifier[trans] . identifier[base] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[base_y] , identifier[scales] . identifier[x] . identifier[trans] . identifier[base] ),
identifier[PlotnineWarning] )
keyword[return] identifier[base_x] , identifier[base_y]
|
def _check_log_scale(base, sides, scales, coord):
"""
Check the log transforms
Parameters
----------
base : float or None
Base of the logarithm in which the ticks will be
calculated. If ``None``, the base of the log transform
the scale will be used.
sides : str (default: bl)
Sides onto which to draw the marks. Any combination
chosen from the characters ``btlr``, for *bottom*, *top*,
*left* or *right* side marks. If ``coord_flip()`` is used,
these are the sides *after* the flip.
scales : SimpleNamespace
``x`` and ``y`` scales.
coord : coord
Coordinate (e.g. coord_cartesian) system of the geom.
Returns
-------
out : tuple
The bases (base_x, base_y) to use when generating the ticks.
"""
def is_log(trans):
return trans.__class__.__name__.startswith('log') and hasattr(trans, 'base')
(base_x, base_y) = (base, base)
x_is_log = is_log(scales.x.trans)
y_is_log = is_log(scales.y.trans)
if isinstance(coord, coord_flip):
(x_is_log, y_is_log) = (y_is_log, x_is_log) # depends on [control=['if'], data=[]]
if 't' in sides or 'b' in sides:
if base_x is None:
base_x = scales.x.trans.base # depends on [control=['if'], data=['base_x']]
if not x_is_log:
warnings.warn('annotation_logticks for x-axis which does not have a log scale. The logticks may not make sense.', PlotnineWarning) # depends on [control=['if'], data=[]]
elif x_is_log and base_x != scales.x.trans.base:
warnings.warn('The x-axis is log transformed in base {} ,but the annotation_logticks are computed in base {}'.format(base_x, scales.x.trans.base), PlotnineWarning) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'l' in sides or 'r' in sides:
if base_y is None:
base_y = scales.y.trans.base # depends on [control=['if'], data=['base_y']]
if not y_is_log:
warnings.warn('annotation_logticks for y-axis which does not have a log scale. The logticks may not make sense.', PlotnineWarning) # depends on [control=['if'], data=[]]
elif y_is_log and base_y != scales.x.trans.base:
warnings.warn('The y-axis is log transformed in base {} ,but the annotation_logticks are computed in base {}'.format(base_y, scales.x.trans.base), PlotnineWarning) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return (base_x, base_y)
|
def read_namespaced_stateful_set(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_stateful_set # noqa: E501
read the specified StatefulSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
|
def function[read_namespaced_stateful_set, parameter[self, name, namespace]]:
constant[read_namespaced_stateful_set # noqa: E501
read the specified StatefulSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1StatefulSet
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].read_namespaced_stateful_set_with_http_info, parameter[name[name], name[namespace]]]]
|
keyword[def] identifier[read_namespaced_stateful_set] ( identifier[self] , identifier[name] , identifier[namespace] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[read_namespaced_stateful_set_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[read_namespaced_stateful_set_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[return] identifier[data]
|
def read_namespaced_stateful_set(self, name, namespace, **kwargs): # noqa: E501
"read_namespaced_stateful_set # noqa: E501\n\n read the specified StatefulSet # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.read_namespaced_stateful_set(name, namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the StatefulSet (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.\n :param bool export: Should this value be exported. Export strips fields that a user can not specify.\n :return: V1StatefulSet\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
|
def isOnSiliconList(self, ra_deg, dec_deg, padding_pix=DEFAULT_PADDING):
"""similar to isOnSilicon() but takes lists as input"""
ch, col, row = self.getChannelColRowList(ra_deg, dec_deg)
out = np.zeros(len(ch), dtype=bool)
for channel in set(ch):
mask = (ch == channel)
if channel in self.brokenChannels:
continue
if channel > 84:
continue
out[mask] = self.colRowIsOnSciencePixelList(col[mask], row[mask], padding_pix)
return out
|
def function[isOnSiliconList, parameter[self, ra_deg, dec_deg, padding_pix]]:
constant[similar to isOnSilicon() but takes lists as input]
<ast.Tuple object at 0x7da1b0a4c310> assign[=] call[name[self].getChannelColRowList, parameter[name[ra_deg], name[dec_deg]]]
variable[out] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[ch]]]]]
for taget[name[channel]] in starred[call[name[set], parameter[name[ch]]]] begin[:]
variable[mask] assign[=] compare[name[ch] equal[==] name[channel]]
if compare[name[channel] in name[self].brokenChannels] begin[:]
continue
if compare[name[channel] greater[>] constant[84]] begin[:]
continue
call[name[out]][name[mask]] assign[=] call[name[self].colRowIsOnSciencePixelList, parameter[call[name[col]][name[mask]], call[name[row]][name[mask]], name[padding_pix]]]
return[name[out]]
|
keyword[def] identifier[isOnSiliconList] ( identifier[self] , identifier[ra_deg] , identifier[dec_deg] , identifier[padding_pix] = identifier[DEFAULT_PADDING] ):
literal[string]
identifier[ch] , identifier[col] , identifier[row] = identifier[self] . identifier[getChannelColRowList] ( identifier[ra_deg] , identifier[dec_deg] )
identifier[out] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[ch] ), identifier[dtype] = identifier[bool] )
keyword[for] identifier[channel] keyword[in] identifier[set] ( identifier[ch] ):
identifier[mask] =( identifier[ch] == identifier[channel] )
keyword[if] identifier[channel] keyword[in] identifier[self] . identifier[brokenChannels] :
keyword[continue]
keyword[if] identifier[channel] > literal[int] :
keyword[continue]
identifier[out] [ identifier[mask] ]= identifier[self] . identifier[colRowIsOnSciencePixelList] ( identifier[col] [ identifier[mask] ], identifier[row] [ identifier[mask] ], identifier[padding_pix] )
keyword[return] identifier[out]
|
def isOnSiliconList(self, ra_deg, dec_deg, padding_pix=DEFAULT_PADDING):
"""similar to isOnSilicon() but takes lists as input"""
(ch, col, row) = self.getChannelColRowList(ra_deg, dec_deg)
out = np.zeros(len(ch), dtype=bool)
for channel in set(ch):
mask = ch == channel
if channel in self.brokenChannels:
continue # depends on [control=['if'], data=[]]
if channel > 84:
continue # depends on [control=['if'], data=[]]
out[mask] = self.colRowIsOnSciencePixelList(col[mask], row[mask], padding_pix) # depends on [control=['for'], data=['channel']]
return out
|
def pretty_xml(data):
"""Return a pretty formated xml
"""
parsed_string = minidom.parseString(data.decode('utf-8'))
return parsed_string.toprettyxml(indent='\t', encoding='utf-8')
|
def function[pretty_xml, parameter[data]]:
constant[Return a pretty formated xml
]
variable[parsed_string] assign[=] call[name[minidom].parseString, parameter[call[name[data].decode, parameter[constant[utf-8]]]]]
return[call[name[parsed_string].toprettyxml, parameter[]]]
|
keyword[def] identifier[pretty_xml] ( identifier[data] ):
literal[string]
identifier[parsed_string] = identifier[minidom] . identifier[parseString] ( identifier[data] . identifier[decode] ( literal[string] ))
keyword[return] identifier[parsed_string] . identifier[toprettyxml] ( identifier[indent] = literal[string] , identifier[encoding] = literal[string] )
|
def pretty_xml(data):
"""Return a pretty formated xml
"""
parsed_string = minidom.parseString(data.decode('utf-8'))
return parsed_string.toprettyxml(indent='\t', encoding='utf-8')
|
def solve_equilibrium_point(self, analyzer1, analyzer2,
delu_dict={}, delu_default=0, units="nanometers"):
"""
Gives the radial size of two particles where equilibrium is reached
between both particles. NOTE: the solution here is not the same
as the solution visualized in the plot because solving for r
requires that both the total surface area and volume of the
particles are functions of r.
Args:
analyzer1 (SurfaceEnergyPlotter): Analyzer associated with the
first polymorph
analyzer2 (SurfaceEnergyPlotter): Analyzer associated with the
second polymorph
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
units (str): Can be nanometers or Angstrom
Returns:
Particle radius in nm
"""
# Set up
wulff1 = analyzer1.wulff_from_chempot(delu_dict=delu_dict,
delu_default=delu_default,
symprec=self.symprec)
wulff2 = analyzer2.wulff_from_chempot(delu_dict=delu_dict,
delu_default=delu_default,
symprec=self.symprec)
# Now calculate r
delta_gamma = wulff1.weighted_surface_energy - wulff2.weighted_surface_energy
delta_E = self.bulk_gform(analyzer1.ucell_entry) - self.bulk_gform(analyzer2.ucell_entry)
r = ((-3 * delta_gamma) / (delta_E))
return r / 10 if units == "nanometers" else r
|
def function[solve_equilibrium_point, parameter[self, analyzer1, analyzer2, delu_dict, delu_default, units]]:
constant[
Gives the radial size of two particles where equilibrium is reached
between both particles. NOTE: the solution here is not the same
as the solution visualized in the plot because solving for r
requires that both the total surface area and volume of the
particles are functions of r.
Args:
analyzer1 (SurfaceEnergyPlotter): Analyzer associated with the
first polymorph
analyzer2 (SurfaceEnergyPlotter): Analyzer associated with the
second polymorph
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
units (str): Can be nanometers or Angstrom
Returns:
Particle radius in nm
]
variable[wulff1] assign[=] call[name[analyzer1].wulff_from_chempot, parameter[]]
variable[wulff2] assign[=] call[name[analyzer2].wulff_from_chempot, parameter[]]
variable[delta_gamma] assign[=] binary_operation[name[wulff1].weighted_surface_energy - name[wulff2].weighted_surface_energy]
variable[delta_E] assign[=] binary_operation[call[name[self].bulk_gform, parameter[name[analyzer1].ucell_entry]] - call[name[self].bulk_gform, parameter[name[analyzer2].ucell_entry]]]
variable[r] assign[=] binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b1cb6890> * name[delta_gamma]] / name[delta_E]]
return[<ast.IfExp object at 0x7da1b1cb6aa0>]
|
keyword[def] identifier[solve_equilibrium_point] ( identifier[self] , identifier[analyzer1] , identifier[analyzer2] ,
identifier[delu_dict] ={}, identifier[delu_default] = literal[int] , identifier[units] = literal[string] ):
literal[string]
identifier[wulff1] = identifier[analyzer1] . identifier[wulff_from_chempot] ( identifier[delu_dict] = identifier[delu_dict] ,
identifier[delu_default] = identifier[delu_default] ,
identifier[symprec] = identifier[self] . identifier[symprec] )
identifier[wulff2] = identifier[analyzer2] . identifier[wulff_from_chempot] ( identifier[delu_dict] = identifier[delu_dict] ,
identifier[delu_default] = identifier[delu_default] ,
identifier[symprec] = identifier[self] . identifier[symprec] )
identifier[delta_gamma] = identifier[wulff1] . identifier[weighted_surface_energy] - identifier[wulff2] . identifier[weighted_surface_energy]
identifier[delta_E] = identifier[self] . identifier[bulk_gform] ( identifier[analyzer1] . identifier[ucell_entry] )- identifier[self] . identifier[bulk_gform] ( identifier[analyzer2] . identifier[ucell_entry] )
identifier[r] =((- literal[int] * identifier[delta_gamma] )/( identifier[delta_E] ))
keyword[return] identifier[r] / literal[int] keyword[if] identifier[units] == literal[string] keyword[else] identifier[r]
|
def solve_equilibrium_point(self, analyzer1, analyzer2, delu_dict={}, delu_default=0, units='nanometers'):
"""
Gives the radial size of two particles where equilibrium is reached
between both particles. NOTE: the solution here is not the same
as the solution visualized in the plot because solving for r
requires that both the total surface area and volume of the
particles are functions of r.
Args:
analyzer1 (SurfaceEnergyPlotter): Analyzer associated with the
first polymorph
analyzer2 (SurfaceEnergyPlotter): Analyzer associated with the
second polymorph
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
units (str): Can be nanometers or Angstrom
Returns:
Particle radius in nm
"""
# Set up
wulff1 = analyzer1.wulff_from_chempot(delu_dict=delu_dict, delu_default=delu_default, symprec=self.symprec)
wulff2 = analyzer2.wulff_from_chempot(delu_dict=delu_dict, delu_default=delu_default, symprec=self.symprec)
# Now calculate r
delta_gamma = wulff1.weighted_surface_energy - wulff2.weighted_surface_energy
delta_E = self.bulk_gform(analyzer1.ucell_entry) - self.bulk_gform(analyzer2.ucell_entry)
r = -3 * delta_gamma / delta_E
return r / 10 if units == 'nanometers' else r
|
def xfrange(start, stop, step=1, maxSize=-1):
"""
Returns a generator that yields the frames from start to stop, inclusive.
In other words it adds or subtracts a frame, as necessary, to return the
stop value as well, if the stepped range would touch that value.
Args:
start (int):
stop (int):
step (int): Note that the sign will be ignored
maxSize (int):
Returns:
generator:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: if size is exceeded
"""
if start <= stop:
stop, step = stop + 1, abs(step)
else:
stop, step = stop - 1, -abs(step)
if maxSize >= 0:
size = lenRange(start, stop, step)
if size > maxSize:
raise exceptions.MaxSizeException(
"Size %d > %s (MAX_FRAME_SIZE)" % (size, maxSize))
# because an xrange is an odd object all its own, we wrap it in a
# generator expression to get a proper Generator
return (f for f in xrange(start, stop, step))
|
def function[xfrange, parameter[start, stop, step, maxSize]]:
constant[
Returns a generator that yields the frames from start to stop, inclusive.
In other words it adds or subtracts a frame, as necessary, to return the
stop value as well, if the stepped range would touch that value.
Args:
start (int):
stop (int):
step (int): Note that the sign will be ignored
maxSize (int):
Returns:
generator:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: if size is exceeded
]
if compare[name[start] less_or_equal[<=] name[stop]] begin[:]
<ast.Tuple object at 0x7da204963df0> assign[=] tuple[[<ast.BinOp object at 0x7da204960310>, <ast.Call object at 0x7da204962110>]]
if compare[name[maxSize] greater_or_equal[>=] constant[0]] begin[:]
variable[size] assign[=] call[name[lenRange], parameter[name[start], name[stop], name[step]]]
if compare[name[size] greater[>] name[maxSize]] begin[:]
<ast.Raise object at 0x7da204960160>
return[<ast.GeneratorExp object at 0x7da204960ac0>]
|
keyword[def] identifier[xfrange] ( identifier[start] , identifier[stop] , identifier[step] = literal[int] , identifier[maxSize] =- literal[int] ):
literal[string]
keyword[if] identifier[start] <= identifier[stop] :
identifier[stop] , identifier[step] = identifier[stop] + literal[int] , identifier[abs] ( identifier[step] )
keyword[else] :
identifier[stop] , identifier[step] = identifier[stop] - literal[int] ,- identifier[abs] ( identifier[step] )
keyword[if] identifier[maxSize] >= literal[int] :
identifier[size] = identifier[lenRange] ( identifier[start] , identifier[stop] , identifier[step] )
keyword[if] identifier[size] > identifier[maxSize] :
keyword[raise] identifier[exceptions] . identifier[MaxSizeException] (
literal[string] %( identifier[size] , identifier[maxSize] ))
keyword[return] ( identifier[f] keyword[for] identifier[f] keyword[in] identifier[xrange] ( identifier[start] , identifier[stop] , identifier[step] ))
|
def xfrange(start, stop, step=1, maxSize=-1):
"""
Returns a generator that yields the frames from start to stop, inclusive.
In other words it adds or subtracts a frame, as necessary, to return the
stop value as well, if the stepped range would touch that value.
Args:
start (int):
stop (int):
step (int): Note that the sign will be ignored
maxSize (int):
Returns:
generator:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: if size is exceeded
"""
if start <= stop:
(stop, step) = (stop + 1, abs(step)) # depends on [control=['if'], data=['stop']]
else:
(stop, step) = (stop - 1, -abs(step))
if maxSize >= 0:
size = lenRange(start, stop, step)
if size > maxSize:
raise exceptions.MaxSizeException('Size %d > %s (MAX_FRAME_SIZE)' % (size, maxSize)) # depends on [control=['if'], data=['size', 'maxSize']] # depends on [control=['if'], data=['maxSize']]
# because an xrange is an odd object all its own, we wrap it in a
# generator expression to get a proper Generator
return (f for f in xrange(start, stop, step))
|
def sign(self, value):
"""Signs the given string."""
return value + want_bytes(self.sep) + self.get_signature(value)
|
def function[sign, parameter[self, value]]:
constant[Signs the given string.]
return[binary_operation[binary_operation[name[value] + call[name[want_bytes], parameter[name[self].sep]]] + call[name[self].get_signature, parameter[name[value]]]]]
|
keyword[def] identifier[sign] ( identifier[self] , identifier[value] ):
literal[string]
keyword[return] identifier[value] + identifier[want_bytes] ( identifier[self] . identifier[sep] )+ identifier[self] . identifier[get_signature] ( identifier[value] )
|
def sign(self, value):
"""Signs the given string."""
return value + want_bytes(self.sep) + self.get_signature(value)
|
def initialize_hooks(self):
"""
Create SessionRunHooks for all callbacks, and hook it onto `self.sess` to create `self.hooked_sess`.
A new trainer may override this method to create multiple groups of hooks,
which can be useful when the training is not done by a single `train_op`.
"""
hooks = self._callbacks.get_hooks()
self.hooked_sess = tfv1.train.MonitoredSession(
session_creator=ReuseSessionCreator(self.sess), hooks=hooks)
|
def function[initialize_hooks, parameter[self]]:
constant[
Create SessionRunHooks for all callbacks, and hook it onto `self.sess` to create `self.hooked_sess`.
A new trainer may override this method to create multiple groups of hooks,
which can be useful when the training is not done by a single `train_op`.
]
variable[hooks] assign[=] call[name[self]._callbacks.get_hooks, parameter[]]
name[self].hooked_sess assign[=] call[name[tfv1].train.MonitoredSession, parameter[]]
|
keyword[def] identifier[initialize_hooks] ( identifier[self] ):
literal[string]
identifier[hooks] = identifier[self] . identifier[_callbacks] . identifier[get_hooks] ()
identifier[self] . identifier[hooked_sess] = identifier[tfv1] . identifier[train] . identifier[MonitoredSession] (
identifier[session_creator] = identifier[ReuseSessionCreator] ( identifier[self] . identifier[sess] ), identifier[hooks] = identifier[hooks] )
|
def initialize_hooks(self):
"""
Create SessionRunHooks for all callbacks, and hook it onto `self.sess` to create `self.hooked_sess`.
A new trainer may override this method to create multiple groups of hooks,
which can be useful when the training is not done by a single `train_op`.
"""
hooks = self._callbacks.get_hooks()
self.hooked_sess = tfv1.train.MonitoredSession(session_creator=ReuseSessionCreator(self.sess), hooks=hooks)
|
def filter_taxa(records, taxids, unclassified=False, discard=False):
'''
Selectively include or discard specified taxon IDs from tictax annotated FASTA/Qs
Filters all children of specified taxon IDs
Returns subset of input SeqRecords
Taxon IDs of 1 and 2 are considered unclassified
'''
taxids = set(taxids)
kept_records = []
ncbi = ete3.NCBITaxa()
unclassified_taxids = {0,1}
for r in records:
taxid, rank, sciname, lineage = r.description.strip().partition(' ')[2].split('|')
taxid = int(taxid)
lineage = set(ncbi.get_lineage(taxid) or [0]) # lineage defined twice?
intersection = lineage & taxids
if taxid in unclassified_taxids and unclassified:
kept_records.append(r)
elif intersection and not discard:
kept_records.append(r)
elif not intersection and discard and taxid not in unclassified_taxids:
kept_records.append(r)
return kept_records
|
def function[filter_taxa, parameter[records, taxids, unclassified, discard]]:
constant[
Selectively include or discard specified taxon IDs from tictax annotated FASTA/Qs
Filters all children of specified taxon IDs
Returns subset of input SeqRecords
Taxon IDs of 1 and 2 are considered unclassified
]
variable[taxids] assign[=] call[name[set], parameter[name[taxids]]]
variable[kept_records] assign[=] list[[]]
variable[ncbi] assign[=] call[name[ete3].NCBITaxa, parameter[]]
variable[unclassified_taxids] assign[=] <ast.Set object at 0x7da1b0a73b80>
for taget[name[r]] in starred[name[records]] begin[:]
<ast.Tuple object at 0x7da1b0917910> assign[=] call[call[call[call[name[r].description.strip, parameter[]].partition, parameter[constant[ ]]]][constant[2]].split, parameter[constant[|]]]
variable[taxid] assign[=] call[name[int], parameter[name[taxid]]]
variable[lineage] assign[=] call[name[set], parameter[<ast.BoolOp object at 0x7da1b0916b60>]]
variable[intersection] assign[=] binary_operation[name[lineage] <ast.BitAnd object at 0x7da2590d6b60> name[taxids]]
if <ast.BoolOp object at 0x7da1b0915c00> begin[:]
call[name[kept_records].append, parameter[name[r]]]
return[name[kept_records]]
|
keyword[def] identifier[filter_taxa] ( identifier[records] , identifier[taxids] , identifier[unclassified] = keyword[False] , identifier[discard] = keyword[False] ):
literal[string]
identifier[taxids] = identifier[set] ( identifier[taxids] )
identifier[kept_records] =[]
identifier[ncbi] = identifier[ete3] . identifier[NCBITaxa] ()
identifier[unclassified_taxids] ={ literal[int] , literal[int] }
keyword[for] identifier[r] keyword[in] identifier[records] :
identifier[taxid] , identifier[rank] , identifier[sciname] , identifier[lineage] = identifier[r] . identifier[description] . identifier[strip] (). identifier[partition] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )
identifier[taxid] = identifier[int] ( identifier[taxid] )
identifier[lineage] = identifier[set] ( identifier[ncbi] . identifier[get_lineage] ( identifier[taxid] ) keyword[or] [ literal[int] ])
identifier[intersection] = identifier[lineage] & identifier[taxids]
keyword[if] identifier[taxid] keyword[in] identifier[unclassified_taxids] keyword[and] identifier[unclassified] :
identifier[kept_records] . identifier[append] ( identifier[r] )
keyword[elif] identifier[intersection] keyword[and] keyword[not] identifier[discard] :
identifier[kept_records] . identifier[append] ( identifier[r] )
keyword[elif] keyword[not] identifier[intersection] keyword[and] identifier[discard] keyword[and] identifier[taxid] keyword[not] keyword[in] identifier[unclassified_taxids] :
identifier[kept_records] . identifier[append] ( identifier[r] )
keyword[return] identifier[kept_records]
|
def filter_taxa(records, taxids, unclassified=False, discard=False):
"""
Selectively include or discard specified taxon IDs from tictax annotated FASTA/Qs
Filters all children of specified taxon IDs
Returns subset of input SeqRecords
Taxon IDs of 1 and 2 are considered unclassified
"""
taxids = set(taxids)
kept_records = []
ncbi = ete3.NCBITaxa()
unclassified_taxids = {0, 1}
for r in records:
(taxid, rank, sciname, lineage) = r.description.strip().partition(' ')[2].split('|')
taxid = int(taxid)
lineage = set(ncbi.get_lineage(taxid) or [0]) # lineage defined twice?
intersection = lineage & taxids
if taxid in unclassified_taxids and unclassified:
kept_records.append(r) # depends on [control=['if'], data=[]]
elif intersection and (not discard):
kept_records.append(r) # depends on [control=['if'], data=[]]
elif not intersection and discard and (taxid not in unclassified_taxids):
kept_records.append(r) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['r']]
return kept_records
|
def split_floats(op, min_num, value):
"""Split `value`, a list of numbers as a string, to a list of float numbers.
Also optionally insert a `l` or `L` operation depending on the operation
and the length of values.
Example: with op='m' and value='10,20 30,40,' the returned value will be
['m', [10.0, 20.0], 'l', [30.0, 40.0]]
"""
floats = [float(seq) for seq in re.findall(r'(-?\d*\.?\d*(?:e[+-]\d+)?)', value) if seq]
res = []
for i in range(0, len(floats), min_num):
if i > 0 and op in {'m', 'M'}:
op = 'l' if op == 'm' else 'L'
res.extend([op, floats[i:i + min_num]])
return res
|
def function[split_floats, parameter[op, min_num, value]]:
constant[Split `value`, a list of numbers as a string, to a list of float numbers.
Also optionally insert a `l` or `L` operation depending on the operation
and the length of values.
Example: with op='m' and value='10,20 30,40,' the returned value will be
['m', [10.0, 20.0], 'l', [30.0, 40.0]]
]
variable[floats] assign[=] <ast.ListComp object at 0x7da18fe90e20>
variable[res] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[floats]]], name[min_num]]]] begin[:]
if <ast.BoolOp object at 0x7da18fe91420> begin[:]
variable[op] assign[=] <ast.IfExp object at 0x7da18fe90a90>
call[name[res].extend, parameter[list[[<ast.Name object at 0x7da18fe926e0>, <ast.Subscript object at 0x7da18fe93d00>]]]]
return[name[res]]
|
keyword[def] identifier[split_floats] ( identifier[op] , identifier[min_num] , identifier[value] ):
literal[string]
identifier[floats] =[ identifier[float] ( identifier[seq] ) keyword[for] identifier[seq] keyword[in] identifier[re] . identifier[findall] ( literal[string] , identifier[value] ) keyword[if] identifier[seq] ]
identifier[res] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[floats] ), identifier[min_num] ):
keyword[if] identifier[i] > literal[int] keyword[and] identifier[op] keyword[in] { literal[string] , literal[string] }:
identifier[op] = literal[string] keyword[if] identifier[op] == literal[string] keyword[else] literal[string]
identifier[res] . identifier[extend] ([ identifier[op] , identifier[floats] [ identifier[i] : identifier[i] + identifier[min_num] ]])
keyword[return] identifier[res]
|
def split_floats(op, min_num, value):
"""Split `value`, a list of numbers as a string, to a list of float numbers.
Also optionally insert a `l` or `L` operation depending on the operation
and the length of values.
Example: with op='m' and value='10,20 30,40,' the returned value will be
['m', [10.0, 20.0], 'l', [30.0, 40.0]]
"""
floats = [float(seq) for seq in re.findall('(-?\\d*\\.?\\d*(?:e[+-]\\d+)?)', value) if seq]
res = []
for i in range(0, len(floats), min_num):
if i > 0 and op in {'m', 'M'}:
op = 'l' if op == 'm' else 'L' # depends on [control=['if'], data=[]]
res.extend([op, floats[i:i + min_num]]) # depends on [control=['for'], data=['i']]
return res
|
def isglove(filepath):
""" Get the first word vector in a GloVE file and return its dimensionality or False if not a vector
>>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt'))
False
"""
with ensure_open(filepath, 'r') as f:
header_line = f.readline()
vector_line = f.readline()
try:
num_vectors, num_dim = header_line.split()
return int(num_dim)
except (ValueError, TypeError):
pass
vector = vector_line.split()[1:]
if len(vector) % 10:
print(vector)
print(len(vector) % 10)
return False
try:
vector = np.array([float(x) for x in vector])
except (ValueError, TypeError):
return False
if np.all(np.abs(vector) < 12.):
return len(vector)
return False
|
def function[isglove, parameter[filepath]]:
constant[ Get the first word vector in a GloVE file and return its dimensionality or False if not a vector
>>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt'))
False
]
with call[name[ensure_open], parameter[name[filepath], constant[r]]] begin[:]
variable[header_line] assign[=] call[name[f].readline, parameter[]]
variable[vector_line] assign[=] call[name[f].readline, parameter[]]
<ast.Try object at 0x7da18f09edd0>
variable[vector] assign[=] call[call[name[vector_line].split, parameter[]]][<ast.Slice object at 0x7da2054a6d70>]
if binary_operation[call[name[len], parameter[name[vector]]] <ast.Mod object at 0x7da2590d6920> constant[10]] begin[:]
call[name[print], parameter[name[vector]]]
call[name[print], parameter[binary_operation[call[name[len], parameter[name[vector]]] <ast.Mod object at 0x7da2590d6920> constant[10]]]]
return[constant[False]]
<ast.Try object at 0x7da2054a78e0>
if call[name[np].all, parameter[compare[call[name[np].abs, parameter[name[vector]]] less[<] constant[12.0]]]] begin[:]
return[call[name[len], parameter[name[vector]]]]
return[constant[False]]
|
keyword[def] identifier[isglove] ( identifier[filepath] ):
literal[string]
keyword[with] identifier[ensure_open] ( identifier[filepath] , literal[string] ) keyword[as] identifier[f] :
identifier[header_line] = identifier[f] . identifier[readline] ()
identifier[vector_line] = identifier[f] . identifier[readline] ()
keyword[try] :
identifier[num_vectors] , identifier[num_dim] = identifier[header_line] . identifier[split] ()
keyword[return] identifier[int] ( identifier[num_dim] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[pass]
identifier[vector] = identifier[vector_line] . identifier[split] ()[ literal[int] :]
keyword[if] identifier[len] ( identifier[vector] )% literal[int] :
identifier[print] ( identifier[vector] )
identifier[print] ( identifier[len] ( identifier[vector] )% literal[int] )
keyword[return] keyword[False]
keyword[try] :
identifier[vector] = identifier[np] . identifier[array] ([ identifier[float] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[vector] ])
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[return] keyword[False]
keyword[if] identifier[np] . identifier[all] ( identifier[np] . identifier[abs] ( identifier[vector] )< literal[int] ):
keyword[return] identifier[len] ( identifier[vector] )
keyword[return] keyword[False]
|
def isglove(filepath):
""" Get the first word vector in a GloVE file and return its dimensionality or False if not a vector
>>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt'))
False
"""
with ensure_open(filepath, 'r') as f:
header_line = f.readline()
vector_line = f.readline() # depends on [control=['with'], data=['f']]
try:
(num_vectors, num_dim) = header_line.split()
return int(num_dim) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
pass # depends on [control=['except'], data=[]]
vector = vector_line.split()[1:]
if len(vector) % 10:
print(vector)
print(len(vector) % 10)
return False # depends on [control=['if'], data=[]]
try:
vector = np.array([float(x) for x in vector]) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
return False # depends on [control=['except'], data=[]]
if np.all(np.abs(vector) < 12.0):
return len(vector) # depends on [control=['if'], data=[]]
return False
|
def select_layout(self, layout=None):
"""Wrapper for ``$ tmux select-layout <layout>``.
Parameters
----------
layout : str, optional
string of the layout, 'even-horizontal', 'tiled', etc. Entering
None (leaving this blank) is same as ``select-layout`` with no
layout. In recent tmux versions, it picks the most recently
set layout.
'even-horizontal'
Panes are spread out evenly from left to right across the
window.
'even-vertical'
Panes are spread evenly from top to bottom.
'main-horizontal'
A large (main) pane is shown at the top of the window and the
remaining panes are spread from left to right in the leftover
space at the bottom.
'main-vertical'
Similar to main-horizontal but the large pane is placed on the
left and the others spread from top to bottom along the right.
'tiled'
Panes are spread out as evenly as possible over the window in
both rows and columns.
'custom'
custom dimensions (see :term:`tmux(1)` manpages).
"""
cmd = ['select-layout', '-t%s:%s' % (self.get('session_id'), self.index)]
if layout: # tmux allows select-layout without args
cmd.append(layout)
proc = self.cmd(*cmd)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr)
|
def function[select_layout, parameter[self, layout]]:
constant[Wrapper for ``$ tmux select-layout <layout>``.
Parameters
----------
layout : str, optional
string of the layout, 'even-horizontal', 'tiled', etc. Entering
None (leaving this blank) is same as ``select-layout`` with no
layout. In recent tmux versions, it picks the most recently
set layout.
'even-horizontal'
Panes are spread out evenly from left to right across the
window.
'even-vertical'
Panes are spread evenly from top to bottom.
'main-horizontal'
A large (main) pane is shown at the top of the window and the
remaining panes are spread from left to right in the leftover
space at the bottom.
'main-vertical'
Similar to main-horizontal but the large pane is placed on the
left and the others spread from top to bottom along the right.
'tiled'
Panes are spread out as evenly as possible over the window in
both rows and columns.
'custom'
custom dimensions (see :term:`tmux(1)` manpages).
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b2345e10>, <ast.BinOp object at 0x7da1b2347cd0>]]
if name[layout] begin[:]
call[name[cmd].append, parameter[name[layout]]]
variable[proc] assign[=] call[name[self].cmd, parameter[<ast.Starred object at 0x7da1b2347a90>]]
if name[proc].stderr begin[:]
<ast.Raise object at 0x7da1b2344220>
|
keyword[def] identifier[select_layout] ( identifier[self] , identifier[layout] = keyword[None] ):
literal[string]
identifier[cmd] =[ literal[string] , literal[string] %( identifier[self] . identifier[get] ( literal[string] ), identifier[self] . identifier[index] )]
keyword[if] identifier[layout] :
identifier[cmd] . identifier[append] ( identifier[layout] )
identifier[proc] = identifier[self] . identifier[cmd] (* identifier[cmd] )
keyword[if] identifier[proc] . identifier[stderr] :
keyword[raise] identifier[exc] . identifier[LibTmuxException] ( identifier[proc] . identifier[stderr] )
|
def select_layout(self, layout=None):
"""Wrapper for ``$ tmux select-layout <layout>``.
Parameters
----------
layout : str, optional
string of the layout, 'even-horizontal', 'tiled', etc. Entering
None (leaving this blank) is same as ``select-layout`` with no
layout. In recent tmux versions, it picks the most recently
set layout.
'even-horizontal'
Panes are spread out evenly from left to right across the
window.
'even-vertical'
Panes are spread evenly from top to bottom.
'main-horizontal'
A large (main) pane is shown at the top of the window and the
remaining panes are spread from left to right in the leftover
space at the bottom.
'main-vertical'
Similar to main-horizontal but the large pane is placed on the
left and the others spread from top to bottom along the right.
'tiled'
Panes are spread out as evenly as possible over the window in
both rows and columns.
'custom'
custom dimensions (see :term:`tmux(1)` manpages).
"""
cmd = ['select-layout', '-t%s:%s' % (self.get('session_id'), self.index)]
if layout: # tmux allows select-layout without args
cmd.append(layout) # depends on [control=['if'], data=[]]
proc = self.cmd(*cmd)
if proc.stderr:
raise exc.LibTmuxException(proc.stderr) # depends on [control=['if'], data=[]]
|
def _compare_by_version(path1, path2):
"""Returns the current/latest learned path.
Checks if given paths are from same source/peer and then compares their
version number to determine which path is received later. If paths are from
different source/peer return None.
"""
if path1.source == path2.source:
if path1.source_version_num > path2.source_version_num:
return path1
else:
return path2
return None
|
def function[_compare_by_version, parameter[path1, path2]]:
constant[Returns the current/latest learned path.
Checks if given paths are from same source/peer and then compares their
version number to determine which path is received later. If paths are from
different source/peer return None.
]
if compare[name[path1].source equal[==] name[path2].source] begin[:]
if compare[name[path1].source_version_num greater[>] name[path2].source_version_num] begin[:]
return[name[path1]]
return[constant[None]]
|
keyword[def] identifier[_compare_by_version] ( identifier[path1] , identifier[path2] ):
literal[string]
keyword[if] identifier[path1] . identifier[source] == identifier[path2] . identifier[source] :
keyword[if] identifier[path1] . identifier[source_version_num] > identifier[path2] . identifier[source_version_num] :
keyword[return] identifier[path1]
keyword[else] :
keyword[return] identifier[path2]
keyword[return] keyword[None]
|
def _compare_by_version(path1, path2):
"""Returns the current/latest learned path.
Checks if given paths are from same source/peer and then compares their
version number to determine which path is received later. If paths are from
different source/peer return None.
"""
if path1.source == path2.source:
if path1.source_version_num > path2.source_version_num:
return path1 # depends on [control=['if'], data=[]]
else:
return path2 # depends on [control=['if'], data=[]]
return None
|
def _call(self, f, out):
"""Implement ``self(vf, out)``."""
for vfi, oi, ran_wi, dom_wi in zip(self.vecfield, out,
self.__ran_weights, self.weights):
vfi.multiply(f, out=oi)
if not np.isclose(ran_wi, dom_wi):
oi *= dom_wi / ran_wi
|
def function[_call, parameter[self, f, out]]:
constant[Implement ``self(vf, out)``.]
for taget[tuple[[<ast.Name object at 0x7da1b1e5c400>, <ast.Name object at 0x7da1b1e5c640>, <ast.Name object at 0x7da1b1e5e5c0>, <ast.Name object at 0x7da1b1e5dc30>]]] in starred[call[name[zip], parameter[name[self].vecfield, name[out], name[self].__ran_weights, name[self].weights]]] begin[:]
call[name[vfi].multiply, parameter[name[f]]]
if <ast.UnaryOp object at 0x7da1b20b4250> begin[:]
<ast.AugAssign object at 0x7da1b20b4430>
|
keyword[def] identifier[_call] ( identifier[self] , identifier[f] , identifier[out] ):
literal[string]
keyword[for] identifier[vfi] , identifier[oi] , identifier[ran_wi] , identifier[dom_wi] keyword[in] identifier[zip] ( identifier[self] . identifier[vecfield] , identifier[out] ,
identifier[self] . identifier[__ran_weights] , identifier[self] . identifier[weights] ):
identifier[vfi] . identifier[multiply] ( identifier[f] , identifier[out] = identifier[oi] )
keyword[if] keyword[not] identifier[np] . identifier[isclose] ( identifier[ran_wi] , identifier[dom_wi] ):
identifier[oi] *= identifier[dom_wi] / identifier[ran_wi]
|
def _call(self, f, out):
"""Implement ``self(vf, out)``."""
for (vfi, oi, ran_wi, dom_wi) in zip(self.vecfield, out, self.__ran_weights, self.weights):
vfi.multiply(f, out=oi)
if not np.isclose(ran_wi, dom_wi):
oi *= dom_wi / ran_wi # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def raise_check_result(self):
"""Raise ACTIVE CHECK RESULT entry
Example : "ACTIVE HOST CHECK: server;DOWN;HARD;1;I don't know what to say..."
:return: None
"""
if not self.__class__.log_active_checks:
return
log_level = 'info'
if self.state == 'DOWN':
log_level = 'error'
elif self.state == 'UNREACHABLE':
log_level = 'warning'
brok = make_monitoring_log(
log_level, 'ACTIVE HOST CHECK: %s;%s;%d;%s' % (self.get_name(), self.state,
self.attempt, self.output)
)
self.broks.append(brok)
|
def function[raise_check_result, parameter[self]]:
constant[Raise ACTIVE CHECK RESULT entry
Example : "ACTIVE HOST CHECK: server;DOWN;HARD;1;I don't know what to say..."
:return: None
]
if <ast.UnaryOp object at 0x7da20c7cbf40> begin[:]
return[None]
variable[log_level] assign[=] constant[info]
if compare[name[self].state equal[==] constant[DOWN]] begin[:]
variable[log_level] assign[=] constant[error]
variable[brok] assign[=] call[name[make_monitoring_log], parameter[name[log_level], binary_operation[constant[ACTIVE HOST CHECK: %s;%s;%d;%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da207f00400>, <ast.Attribute object at 0x7da207f00f70>, <ast.Attribute object at 0x7da207f026b0>, <ast.Attribute object at 0x7da207f00430>]]]]]
call[name[self].broks.append, parameter[name[brok]]]
|
keyword[def] identifier[raise_check_result] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__class__] . identifier[log_active_checks] :
keyword[return]
identifier[log_level] = literal[string]
keyword[if] identifier[self] . identifier[state] == literal[string] :
identifier[log_level] = literal[string]
keyword[elif] identifier[self] . identifier[state] == literal[string] :
identifier[log_level] = literal[string]
identifier[brok] = identifier[make_monitoring_log] (
identifier[log_level] , literal[string] %( identifier[self] . identifier[get_name] (), identifier[self] . identifier[state] ,
identifier[self] . identifier[attempt] , identifier[self] . identifier[output] )
)
identifier[self] . identifier[broks] . identifier[append] ( identifier[brok] )
|
def raise_check_result(self):
"""Raise ACTIVE CHECK RESULT entry
Example : "ACTIVE HOST CHECK: server;DOWN;HARD;1;I don't know what to say..."
:return: None
"""
if not self.__class__.log_active_checks:
return # depends on [control=['if'], data=[]]
log_level = 'info'
if self.state == 'DOWN':
log_level = 'error' # depends on [control=['if'], data=[]]
elif self.state == 'UNREACHABLE':
log_level = 'warning' # depends on [control=['if'], data=[]]
brok = make_monitoring_log(log_level, 'ACTIVE HOST CHECK: %s;%s;%d;%s' % (self.get_name(), self.state, self.attempt, self.output))
self.broks.append(brok)
|
def _rescale(ar):
"""Shift and rescale array ar to the interval [-1, 1]"""
max = np.nanmax(ar)
min = np.nanmin(ar)
midpoint = (max + min) / 2.0
return 2.0 * (ar - midpoint) / (max - min)
|
def function[_rescale, parameter[ar]]:
constant[Shift and rescale array ar to the interval [-1, 1]]
variable[max] assign[=] call[name[np].nanmax, parameter[name[ar]]]
variable[min] assign[=] call[name[np].nanmin, parameter[name[ar]]]
variable[midpoint] assign[=] binary_operation[binary_operation[name[max] + name[min]] / constant[2.0]]
return[binary_operation[binary_operation[constant[2.0] * binary_operation[name[ar] - name[midpoint]]] / binary_operation[name[max] - name[min]]]]
|
keyword[def] identifier[_rescale] ( identifier[ar] ):
literal[string]
identifier[max] = identifier[np] . identifier[nanmax] ( identifier[ar] )
identifier[min] = identifier[np] . identifier[nanmin] ( identifier[ar] )
identifier[midpoint] =( identifier[max] + identifier[min] )/ literal[int]
keyword[return] literal[int] *( identifier[ar] - identifier[midpoint] )/( identifier[max] - identifier[min] )
|
def _rescale(ar):
"""Shift and rescale array ar to the interval [-1, 1]"""
max = np.nanmax(ar)
min = np.nanmin(ar)
midpoint = (max + min) / 2.0
return 2.0 * (ar - midpoint) / (max - min)
|
def get_config(self):
"""Return configurations of EpsGreedyQPolicy
# Returns
Dict of config
"""
config = super(EpsGreedyQPolicy, self).get_config()
config['eps'] = self.eps
return config
|
def function[get_config, parameter[self]]:
constant[Return configurations of EpsGreedyQPolicy
# Returns
Dict of config
]
variable[config] assign[=] call[call[name[super], parameter[name[EpsGreedyQPolicy], name[self]]].get_config, parameter[]]
call[name[config]][constant[eps]] assign[=] name[self].eps
return[name[config]]
|
keyword[def] identifier[get_config] ( identifier[self] ):
literal[string]
identifier[config] = identifier[super] ( identifier[EpsGreedyQPolicy] , identifier[self] ). identifier[get_config] ()
identifier[config] [ literal[string] ]= identifier[self] . identifier[eps]
keyword[return] identifier[config]
|
def get_config(self):
"""Return configurations of EpsGreedyQPolicy
# Returns
Dict of config
"""
config = super(EpsGreedyQPolicy, self).get_config()
config['eps'] = self.eps
return config
|
def on_copy_remote(self, pair):
"""Called when the remote resource should be copied to local."""
status = pair.local_classification
self._log_action("copy", status, "<", pair.remote)
|
def function[on_copy_remote, parameter[self, pair]]:
constant[Called when the remote resource should be copied to local.]
variable[status] assign[=] name[pair].local_classification
call[name[self]._log_action, parameter[constant[copy], name[status], constant[<], name[pair].remote]]
|
keyword[def] identifier[on_copy_remote] ( identifier[self] , identifier[pair] ):
literal[string]
identifier[status] = identifier[pair] . identifier[local_classification]
identifier[self] . identifier[_log_action] ( literal[string] , identifier[status] , literal[string] , identifier[pair] . identifier[remote] )
|
def on_copy_remote(self, pair):
"""Called when the remote resource should be copied to local."""
status = pair.local_classification
self._log_action('copy', status, '<', pair.remote)
|
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
|
def function[_convert_to_boolean, parameter[self, value]]:
constant[Return a boolean value translating from other types if necessary.
]
if compare[call[name[value].lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> name[self].BOOLEAN_STATES] begin[:]
<ast.Raise object at 0x7da2047e8490>
return[call[name[self].BOOLEAN_STATES][call[name[value].lower, parameter[]]]]
|
keyword[def] identifier[_convert_to_boolean] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] . identifier[lower] () keyword[not] keyword[in] identifier[self] . identifier[BOOLEAN_STATES] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[value] )
keyword[return] identifier[self] . identifier[BOOLEAN_STATES] [ identifier[value] . identifier[lower] ()]
|
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value) # depends on [control=['if'], data=[]]
return self.BOOLEAN_STATES[value.lower()]
|
def is_state(self, status):
# pylint: disable=too-many-return-statements
"""Return True if status match the current service status
:param status: status to compare ( "o", "c", "w", "u", "x"). Usually comes from config files
:type status: str
:return: True if status <=> self.status, otherwise False
:rtype: bool
"""
if status == self.state:
return True
# Now low status
if status == 'o' and self.state == u'OK':
return True
if status == 'c' and self.state == u'CRITICAL':
return True
if status == 'w' and self.state == u'WARNING':
return True
if status == 'u' and self.state == u'UNKNOWN':
return True
if status == 'x' and self.state == u'UNREACHABLE':
return True
return False
|
def function[is_state, parameter[self, status]]:
constant[Return True if status match the current service status
:param status: status to compare ( "o", "c", "w", "u", "x"). Usually comes from config files
:type status: str
:return: True if status <=> self.status, otherwise False
:rtype: bool
]
if compare[name[status] equal[==] name[self].state] begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da204567370> begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b0ddf490> begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b0ddca90> begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b0ddddb0> begin[:]
return[constant[True]]
if <ast.BoolOp object at 0x7da1b0dde2f0> begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[is_state] ( identifier[self] , identifier[status] ):
literal[string]
keyword[if] identifier[status] == identifier[self] . identifier[state] :
keyword[return] keyword[True]
keyword[if] identifier[status] == literal[string] keyword[and] identifier[self] . identifier[state] == literal[string] :
keyword[return] keyword[True]
keyword[if] identifier[status] == literal[string] keyword[and] identifier[self] . identifier[state] == literal[string] :
keyword[return] keyword[True]
keyword[if] identifier[status] == literal[string] keyword[and] identifier[self] . identifier[state] == literal[string] :
keyword[return] keyword[True]
keyword[if] identifier[status] == literal[string] keyword[and] identifier[self] . identifier[state] == literal[string] :
keyword[return] keyword[True]
keyword[if] identifier[status] == literal[string] keyword[and] identifier[self] . identifier[state] == literal[string] :
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def is_state(self, status):
# pylint: disable=too-many-return-statements
'Return True if status match the current service status\n\n :param status: status to compare ( "o", "c", "w", "u", "x"). Usually comes from config files\n :type status: str\n :return: True if status <=> self.status, otherwise False\n :rtype: bool\n '
if status == self.state:
return True # depends on [control=['if'], data=[]]
# Now low status
if status == 'o' and self.state == u'OK':
return True # depends on [control=['if'], data=[]]
if status == 'c' and self.state == u'CRITICAL':
return True # depends on [control=['if'], data=[]]
if status == 'w' and self.state == u'WARNING':
return True # depends on [control=['if'], data=[]]
if status == 'u' and self.state == u'UNKNOWN':
return True # depends on [control=['if'], data=[]]
if status == 'x' and self.state == u'UNREACHABLE':
return True # depends on [control=['if'], data=[]]
return False
|
def _zerosamestates(self, A):
"""
zeros out states that should be identical
REQUIRED ARGUMENTS
A: the matrix whose entries are to be zeroed.
"""
for pair in self.samestates:
A[pair[0], pair[1]] = 0
A[pair[1], pair[0]] = 0
|
def function[_zerosamestates, parameter[self, A]]:
constant[
zeros out states that should be identical
REQUIRED ARGUMENTS
A: the matrix whose entries are to be zeroed.
]
for taget[name[pair]] in starred[name[self].samestates] begin[:]
call[name[A]][tuple[[<ast.Subscript object at 0x7da1b26ac3a0>, <ast.Subscript object at 0x7da20c990100>]]] assign[=] constant[0]
call[name[A]][tuple[[<ast.Subscript object at 0x7da20c9928f0>, <ast.Subscript object at 0x7da20c991ab0>]]] assign[=] constant[0]
|
keyword[def] identifier[_zerosamestates] ( identifier[self] , identifier[A] ):
literal[string]
keyword[for] identifier[pair] keyword[in] identifier[self] . identifier[samestates] :
identifier[A] [ identifier[pair] [ literal[int] ], identifier[pair] [ literal[int] ]]= literal[int]
identifier[A] [ identifier[pair] [ literal[int] ], identifier[pair] [ literal[int] ]]= literal[int]
|
def _zerosamestates(self, A):
"""
zeros out states that should be identical
REQUIRED ARGUMENTS
A: the matrix whose entries are to be zeroed.
"""
for pair in self.samestates:
A[pair[0], pair[1]] = 0
A[pair[1], pair[0]] = 0 # depends on [control=['for'], data=['pair']]
|
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme)
ipv6_md = re.match(r'^\[([0-9a-fA-F:]+)\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':')
elif ':' in parts.netloc:
host, port = parts.netloc.rsplit(':', 1)
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
|
def function[from_uri, parameter[cls, uri, socket_timeout, auto_decode]]:
constant[Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
]
variable[parts] assign[=] call[name[six].moves.urllib.parse.urlparse, parameter[name[uri]]]
if compare[call[name[parts].scheme.lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b25976d0>, <ast.Constant object at 0x7da1b25953c0>]]] begin[:]
<ast.Raise object at 0x7da1b2594e80>
variable[ipv6_md] assign[=] call[name[re].match, parameter[constant[^\[([0-9a-fA-F:]+)\](:[0-9]+)?$], name[parts].netloc]]
if name[ipv6_md] begin[:]
variable[host] assign[=] call[name[ipv6_md].group, parameter[constant[1]]]
variable[port] assign[=] <ast.BoolOp object at 0x7da1b2595c00>
variable[port] assign[=] call[name[port].lstrip, parameter[constant[:]]]
variable[port] assign[=] call[name[int], parameter[name[port]]]
return[call[name[cls], parameter[name[host], name[port]]]]
|
keyword[def] identifier[from_uri] ( identifier[cls] , identifier[uri] , identifier[socket_timeout] = keyword[None] , identifier[auto_decode] = keyword[False] ):
literal[string]
identifier[parts] = identifier[six] . identifier[moves] . identifier[urllib] . identifier[parse] . identifier[urlparse] ( identifier[uri] )
keyword[if] identifier[parts] . identifier[scheme] . identifier[lower] () keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[parts] . identifier[scheme] )
identifier[ipv6_md] = identifier[re] . identifier[match] ( literal[string] , identifier[parts] . identifier[netloc] )
keyword[if] identifier[ipv6_md] :
identifier[host] = identifier[ipv6_md] . identifier[group] ( literal[int] )
identifier[port] = identifier[ipv6_md] . identifier[group] ( literal[int] ) keyword[or] literal[string]
identifier[port] = identifier[port] . identifier[lstrip] ( literal[string] )
keyword[elif] literal[string] keyword[in] identifier[parts] . identifier[netloc] :
identifier[host] , identifier[port] = identifier[parts] . identifier[netloc] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[else] :
identifier[host] = identifier[parts] . identifier[netloc]
identifier[port] = literal[int]
identifier[port] = identifier[int] ( identifier[port] )
keyword[return] identifier[cls] ( identifier[host] , identifier[port] , identifier[socket_timeout] = identifier[socket_timeout] , identifier[auto_decode] = identifier[auto_decode] )
|
def from_uri(cls, uri, socket_timeout=None, auto_decode=False):
"""Construct a synchronous Beanstalk Client from a URI.
The URI may be of the form beanstalk://host:port or beanstalkd://host:port
IPv6 literals must be wrapped in brackets as per RFC 2732.
"""
parts = six.moves.urllib.parse.urlparse(uri)
if parts.scheme.lower() not in ('beanstalk', 'beanstalkd'):
raise ValueError('Invalid scheme %s' % parts.scheme) # depends on [control=['if'], data=[]]
ipv6_md = re.match('^\\[([0-9a-fA-F:]+)\\](:[0-9]+)?$', parts.netloc)
if ipv6_md:
host = ipv6_md.group(1)
port = ipv6_md.group(2) or '11300'
port = port.lstrip(':') # depends on [control=['if'], data=[]]
elif ':' in parts.netloc:
(host, port) = parts.netloc.rsplit(':', 1) # depends on [control=['if'], data=[]]
else:
host = parts.netloc
port = 11300
port = int(port)
return cls(host, port, socket_timeout=socket_timeout, auto_decode=auto_decode)
|
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
super(Header, self).Deserialize(reader)
if reader.ReadByte() != 0:
raise Exception('Incorrect Header Format')
|
def function[Deserialize, parameter[self, reader]]:
constant[
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
]
call[call[name[super], parameter[name[Header], name[self]]].Deserialize, parameter[name[reader]]]
if compare[call[name[reader].ReadByte, parameter[]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da204623310>
|
keyword[def] identifier[Deserialize] ( identifier[self] , identifier[reader] ):
literal[string]
identifier[super] ( identifier[Header] , identifier[self] ). identifier[Deserialize] ( identifier[reader] )
keyword[if] identifier[reader] . identifier[ReadByte] ()!= literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
|
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
super(Header, self).Deserialize(reader)
if reader.ReadByte() != 0:
raise Exception('Incorrect Header Format') # depends on [control=['if'], data=[]]
|
def map(self, callback):
"""
Run a map over each of the item.
:param callback: The map function
:type callback: callable
:rtype: Collection
"""
return self.__class__(list(map(callback, self.items)))
|
def function[map, parameter[self, callback]]:
constant[
Run a map over each of the item.
:param callback: The map function
:type callback: callable
:rtype: Collection
]
return[call[name[self].__class__, parameter[call[name[list], parameter[call[name[map], parameter[name[callback], name[self].items]]]]]]]
|
keyword[def] identifier[map] ( identifier[self] , identifier[callback] ):
literal[string]
keyword[return] identifier[self] . identifier[__class__] ( identifier[list] ( identifier[map] ( identifier[callback] , identifier[self] . identifier[items] )))
|
def map(self, callback):
"""
Run a map over each of the item.
:param callback: The map function
:type callback: callable
:rtype: Collection
"""
return self.__class__(list(map(callback, self.items)))
|
def _set_member_vlan(self, v, load=False):
"""
Setter method for member_vlan, mapped from YANG variable /topology_group/member_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_vlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=member_vlan.member_vlan, is_container='container', presence=False, yang_name="member-vlan", rest_name="member-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Member VLANs for this topology group', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-topology-group', defining_module='brocade-topology-group', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """member_vlan must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=member_vlan.member_vlan, is_container='container', presence=False, yang_name="member-vlan", rest_name="member-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Member VLANs for this topology group', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-topology-group', defining_module='brocade-topology-group', yang_type='container', is_config=True)""",
})
self.__member_vlan = t
if hasattr(self, '_set'):
self._set()
|
def function[_set_member_vlan, parameter[self, v, load]]:
constant[
Setter method for member_vlan, mapped from YANG variable /topology_group/member_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_vlan() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f723730>
name[self].__member_vlan assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]]
|
keyword[def] identifier[_set_member_vlan] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[member_vlan] . identifier[member_vlan] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__member_vlan] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] ()
|
def _set_member_vlan(self, v, load=False):
"""
Setter method for member_vlan, mapped from YANG variable /topology_group/member_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_vlan() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=member_vlan.member_vlan, is_container='container', presence=False, yang_name='member-vlan', rest_name='member-vlan', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Member VLANs for this topology group', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-topology-group', defining_module='brocade-topology-group', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'member_vlan must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=member_vlan.member_vlan, is_container=\'container\', presence=False, yang_name="member-vlan", rest_name="member-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Configure Member VLANs for this topology group\', u\'cli-suppress-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-topology-group\', defining_module=\'brocade-topology-group\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__member_vlan = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]]
|
def main():
""" Main entry point, expects doctopt arg dict as argd. """
global DEBUG
argd = docopt(USAGESTR, version=VERSIONSTR, script=SCRIPT)
DEBUG = argd['--debug']
width = parse_int(argd['--width'] or DEFAULT_WIDTH) or 1
indent = parse_int(argd['--indent'] or (argd['--INDENT'] or 0))
prepend = ' ' * (indent * 4)
if prepend and argd['--indent']:
# Smart indent, change max width based on indention.
width -= len(prepend)
userprepend = argd['--prepend'] or (argd['--PREPEND'] or '')
prepend = ''.join((prepend, userprepend))
if argd['--prepend']:
# Smart indent, change max width based on prepended text.
width -= len(userprepend)
userappend = argd['--append'] or (argd['--APPEND'] or '')
if argd['--append']:
width -= len(userappend)
if argd['WORDS']:
# Try each argument as a file name.
argd['WORDS'] = (
(try_read_file(w) if len(w) < 256 else w)
for w in argd['WORDS']
)
words = ' '.join((w for w in argd['WORDS'] if w))
else:
# No text/filenames provided, use stdin for input.
words = read_stdin()
block = FormatBlock(words).iter_format_block(
chars=argd['--chars'],
fill=argd['--fill'],
prepend=prepend,
strip_first=argd['--stripfirst'],
append=userappend,
strip_last=argd['--striplast'],
width=width,
newlines=argd['--newlines'],
lstrip=argd['--lstrip'],
)
for i, line in enumerate(block):
if argd['--enumerate']:
# Current line number format supports up to 999 lines before
# messing up. Who would format 1000 lines like this anyway?
print('{: >3}: {}'.format(i + 1, line))
else:
print(line)
return 0
|
def function[main, parameter[]]:
constant[ Main entry point, expects doctopt arg dict as argd. ]
<ast.Global object at 0x7da18bcc9330>
variable[argd] assign[=] call[name[docopt], parameter[name[USAGESTR]]]
variable[DEBUG] assign[=] call[name[argd]][constant[--debug]]
variable[width] assign[=] <ast.BoolOp object at 0x7da18bccbc70>
variable[indent] assign[=] call[name[parse_int], parameter[<ast.BoolOp object at 0x7da18bcc88e0>]]
variable[prepend] assign[=] binary_operation[constant[ ] * binary_operation[name[indent] * constant[4]]]
if <ast.BoolOp object at 0x7da18bcc9510> begin[:]
<ast.AugAssign object at 0x7da18bccb6a0>
variable[userprepend] assign[=] <ast.BoolOp object at 0x7da18bccab60>
variable[prepend] assign[=] call[constant[].join, parameter[tuple[[<ast.Name object at 0x7da18bcc8580>, <ast.Name object at 0x7da18bccb640>]]]]
if call[name[argd]][constant[--prepend]] begin[:]
<ast.AugAssign object at 0x7da18bccaad0>
variable[userappend] assign[=] <ast.BoolOp object at 0x7da18bcc9bd0>
if call[name[argd]][constant[--append]] begin[:]
<ast.AugAssign object at 0x7da18bccb310>
if call[name[argd]][constant[WORDS]] begin[:]
call[name[argd]][constant[WORDS]] assign[=] <ast.GeneratorExp object at 0x7da18bcc8b50>
variable[words] assign[=] call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da18bcc93f0>]]
variable[block] assign[=] call[call[name[FormatBlock], parameter[name[words]]].iter_format_block, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18bcc85b0>, <ast.Name object at 0x7da18bcca140>]]] in starred[call[name[enumerate], parameter[name[block]]]] begin[:]
if call[name[argd]][constant[--enumerate]] begin[:]
call[name[print], parameter[call[constant[{: >3}: {}].format, parameter[binary_operation[name[i] + constant[1]], name[line]]]]]
return[constant[0]]
|
keyword[def] identifier[main] ():
literal[string]
keyword[global] identifier[DEBUG]
identifier[argd] = identifier[docopt] ( identifier[USAGESTR] , identifier[version] = identifier[VERSIONSTR] , identifier[script] = identifier[SCRIPT] )
identifier[DEBUG] = identifier[argd] [ literal[string] ]
identifier[width] = identifier[parse_int] ( identifier[argd] [ literal[string] ] keyword[or] identifier[DEFAULT_WIDTH] ) keyword[or] literal[int]
identifier[indent] = identifier[parse_int] ( identifier[argd] [ literal[string] ] keyword[or] ( identifier[argd] [ literal[string] ] keyword[or] literal[int] ))
identifier[prepend] = literal[string] *( identifier[indent] * literal[int] )
keyword[if] identifier[prepend] keyword[and] identifier[argd] [ literal[string] ]:
identifier[width] -= identifier[len] ( identifier[prepend] )
identifier[userprepend] = identifier[argd] [ literal[string] ] keyword[or] ( identifier[argd] [ literal[string] ] keyword[or] literal[string] )
identifier[prepend] = literal[string] . identifier[join] (( identifier[prepend] , identifier[userprepend] ))
keyword[if] identifier[argd] [ literal[string] ]:
identifier[width] -= identifier[len] ( identifier[userprepend] )
identifier[userappend] = identifier[argd] [ literal[string] ] keyword[or] ( identifier[argd] [ literal[string] ] keyword[or] literal[string] )
keyword[if] identifier[argd] [ literal[string] ]:
identifier[width] -= identifier[len] ( identifier[userappend] )
keyword[if] identifier[argd] [ literal[string] ]:
identifier[argd] [ literal[string] ]=(
( identifier[try_read_file] ( identifier[w] ) keyword[if] identifier[len] ( identifier[w] )< literal[int] keyword[else] identifier[w] )
keyword[for] identifier[w] keyword[in] identifier[argd] [ literal[string] ]
)
identifier[words] = literal[string] . identifier[join] (( identifier[w] keyword[for] identifier[w] keyword[in] identifier[argd] [ literal[string] ] keyword[if] identifier[w] ))
keyword[else] :
identifier[words] = identifier[read_stdin] ()
identifier[block] = identifier[FormatBlock] ( identifier[words] ). identifier[iter_format_block] (
identifier[chars] = identifier[argd] [ literal[string] ],
identifier[fill] = identifier[argd] [ literal[string] ],
identifier[prepend] = identifier[prepend] ,
identifier[strip_first] = identifier[argd] [ literal[string] ],
identifier[append] = identifier[userappend] ,
identifier[strip_last] = identifier[argd] [ literal[string] ],
identifier[width] = identifier[width] ,
identifier[newlines] = identifier[argd] [ literal[string] ],
identifier[lstrip] = identifier[argd] [ literal[string] ],
)
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[block] ):
keyword[if] identifier[argd] [ literal[string] ]:
identifier[print] ( literal[string] . identifier[format] ( identifier[i] + literal[int] , identifier[line] ))
keyword[else] :
identifier[print] ( identifier[line] )
keyword[return] literal[int]
|
def main():
""" Main entry point, expects doctopt arg dict as argd. """
global DEBUG
argd = docopt(USAGESTR, version=VERSIONSTR, script=SCRIPT)
DEBUG = argd['--debug']
width = parse_int(argd['--width'] or DEFAULT_WIDTH) or 1
indent = parse_int(argd['--indent'] or (argd['--INDENT'] or 0))
prepend = ' ' * (indent * 4)
if prepend and argd['--indent']:
# Smart indent, change max width based on indention.
width -= len(prepend) # depends on [control=['if'], data=[]]
userprepend = argd['--prepend'] or (argd['--PREPEND'] or '')
prepend = ''.join((prepend, userprepend))
if argd['--prepend']:
# Smart indent, change max width based on prepended text.
width -= len(userprepend) # depends on [control=['if'], data=[]]
userappend = argd['--append'] or (argd['--APPEND'] or '')
if argd['--append']:
width -= len(userappend) # depends on [control=['if'], data=[]]
if argd['WORDS']:
# Try each argument as a file name.
argd['WORDS'] = (try_read_file(w) if len(w) < 256 else w for w in argd['WORDS'])
words = ' '.join((w for w in argd['WORDS'] if w)) # depends on [control=['if'], data=[]]
else:
# No text/filenames provided, use stdin for input.
words = read_stdin()
block = FormatBlock(words).iter_format_block(chars=argd['--chars'], fill=argd['--fill'], prepend=prepend, strip_first=argd['--stripfirst'], append=userappend, strip_last=argd['--striplast'], width=width, newlines=argd['--newlines'], lstrip=argd['--lstrip'])
for (i, line) in enumerate(block):
if argd['--enumerate']:
# Current line number format supports up to 999 lines before
# messing up. Who would format 1000 lines like this anyway?
print('{: >3}: {}'.format(i + 1, line)) # depends on [control=['if'], data=[]]
else:
print(line) # depends on [control=['for'], data=[]]
return 0
|
def work_items(self):
"""An iterable of all of WorkItems in the db.
This includes both WorkItems with and without results.
"""
cur = self._conn.cursor()
rows = cur.execute("SELECT * FROM work_items")
for row in rows:
yield _row_to_work_item(row)
|
def function[work_items, parameter[self]]:
constant[An iterable of all of WorkItems in the db.
This includes both WorkItems with and without results.
]
variable[cur] assign[=] call[name[self]._conn.cursor, parameter[]]
variable[rows] assign[=] call[name[cur].execute, parameter[constant[SELECT * FROM work_items]]]
for taget[name[row]] in starred[name[rows]] begin[:]
<ast.Yield object at 0x7da20c795570>
|
keyword[def] identifier[work_items] ( identifier[self] ):
literal[string]
identifier[cur] = identifier[self] . identifier[_conn] . identifier[cursor] ()
identifier[rows] = identifier[cur] . identifier[execute] ( literal[string] )
keyword[for] identifier[row] keyword[in] identifier[rows] :
keyword[yield] identifier[_row_to_work_item] ( identifier[row] )
|
def work_items(self):
"""An iterable of all of WorkItems in the db.
This includes both WorkItems with and without results.
"""
cur = self._conn.cursor()
rows = cur.execute('SELECT * FROM work_items')
for row in rows:
yield _row_to_work_item(row) # depends on [control=['for'], data=['row']]
|
def splitset(num_trials, skipstep=None):
""" Split-set cross validation
Use half the trials for training, and the other half for testing. Then
repeat the other way round.
Parameters
----------
num_trials : int
Total number of trials
skipstep : int
unused
Returns
-------
gen : generator object
the generator returns tuples (trainset, testset)
"""
split = num_trials // 2
a = list(range(0, split))
b = list(range(split, num_trials))
yield a, b
yield b, a
|
def function[splitset, parameter[num_trials, skipstep]]:
constant[ Split-set cross validation
Use half the trials for training, and the other half for testing. Then
repeat the other way round.
Parameters
----------
num_trials : int
Total number of trials
skipstep : int
unused
Returns
-------
gen : generator object
the generator returns tuples (trainset, testset)
]
variable[split] assign[=] binary_operation[name[num_trials] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
variable[a] assign[=] call[name[list], parameter[call[name[range], parameter[constant[0], name[split]]]]]
variable[b] assign[=] call[name[list], parameter[call[name[range], parameter[name[split], name[num_trials]]]]]
<ast.Yield object at 0x7da1b2664bb0>
<ast.Yield object at 0x7da1b26cca60>
|
keyword[def] identifier[splitset] ( identifier[num_trials] , identifier[skipstep] = keyword[None] ):
literal[string]
identifier[split] = identifier[num_trials] // literal[int]
identifier[a] = identifier[list] ( identifier[range] ( literal[int] , identifier[split] ))
identifier[b] = identifier[list] ( identifier[range] ( identifier[split] , identifier[num_trials] ))
keyword[yield] identifier[a] , identifier[b]
keyword[yield] identifier[b] , identifier[a]
|
def splitset(num_trials, skipstep=None):
""" Split-set cross validation
Use half the trials for training, and the other half for testing. Then
repeat the other way round.
Parameters
----------
num_trials : int
Total number of trials
skipstep : int
unused
Returns
-------
gen : generator object
the generator returns tuples (trainset, testset)
"""
split = num_trials // 2
a = list(range(0, split))
b = list(range(split, num_trials))
yield (a, b)
yield (b, a)
|
def support_false_positive_count(m, m_hat):
"""Count the number of false positive support elements in
m_hat in one triangle, not including the diagonal.
"""
m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat)
return int((m_hat_nnz - intersection_nnz) / 2.0)
|
def function[support_false_positive_count, parameter[m, m_hat]]:
constant[Count the number of false positive support elements in
m_hat in one triangle, not including the diagonal.
]
<ast.Tuple object at 0x7da1b117a410> assign[=] call[name[_nonzero_intersection], parameter[name[m], name[m_hat]]]
return[call[name[int], parameter[binary_operation[binary_operation[name[m_hat_nnz] - name[intersection_nnz]] / constant[2.0]]]]]
|
keyword[def] identifier[support_false_positive_count] ( identifier[m] , identifier[m_hat] ):
literal[string]
identifier[m_nnz] , identifier[m_hat_nnz] , identifier[intersection_nnz] = identifier[_nonzero_intersection] ( identifier[m] , identifier[m_hat] )
keyword[return] identifier[int] (( identifier[m_hat_nnz] - identifier[intersection_nnz] )/ literal[int] )
|
def support_false_positive_count(m, m_hat):
"""Count the number of false positive support elements in
m_hat in one triangle, not including the diagonal.
"""
(m_nnz, m_hat_nnz, intersection_nnz) = _nonzero_intersection(m, m_hat)
return int((m_hat_nnz - intersection_nnz) / 2.0)
|
def getoptS(X, Y, M_E, E):
''' Find Sopt given X, Y
'''
n, r = X.shape
C = np.dot(np.dot(X.T, M_E), Y)
C = C.flatten()
A = np.zeros((r * r, r * r))
for i in range(r):
for j in range(r):
ind = j * r + i
temp = np.dot(
np.dot(X.T, np.dot(X[:, i, None], Y[:, j, None].T) * E), Y)
A[:, ind] = temp.flatten()
S = np.linalg.solve(A, C)
return np.reshape(S, (r, r)).T
|
def function[getoptS, parameter[X, Y, M_E, E]]:
constant[ Find Sopt given X, Y
]
<ast.Tuple object at 0x7da1b1ad17b0> assign[=] name[X].shape
variable[C] assign[=] call[name[np].dot, parameter[call[name[np].dot, parameter[name[X].T, name[M_E]]], name[Y]]]
variable[C] assign[=] call[name[C].flatten, parameter[]]
variable[A] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da1b1ad2da0>, <ast.BinOp object at 0x7da1b1ad2890>]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[r]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[name[r]]]] begin[:]
variable[ind] assign[=] binary_operation[binary_operation[name[j] * name[r]] + name[i]]
variable[temp] assign[=] call[name[np].dot, parameter[call[name[np].dot, parameter[name[X].T, binary_operation[call[name[np].dot, parameter[call[name[X]][tuple[[<ast.Slice object at 0x7da1b1ad0580>, <ast.Name object at 0x7da1b1ad0d30>, <ast.Constant object at 0x7da1b1ad02b0>]]], call[name[Y]][tuple[[<ast.Slice object at 0x7da1b1ad1780>, <ast.Name object at 0x7da1b1ad05e0>, <ast.Constant object at 0x7da1b1ad37f0>]]].T]] * name[E]]]], name[Y]]]
call[name[A]][tuple[[<ast.Slice object at 0x7da1b1ad3610>, <ast.Name object at 0x7da1b1ad3520>]]] assign[=] call[name[temp].flatten, parameter[]]
variable[S] assign[=] call[name[np].linalg.solve, parameter[name[A], name[C]]]
return[call[name[np].reshape, parameter[name[S], tuple[[<ast.Name object at 0x7da1b1ad23b0>, <ast.Name object at 0x7da1b1ad19c0>]]]].T]
|
keyword[def] identifier[getoptS] ( identifier[X] , identifier[Y] , identifier[M_E] , identifier[E] ):
literal[string]
identifier[n] , identifier[r] = identifier[X] . identifier[shape]
identifier[C] = identifier[np] . identifier[dot] ( identifier[np] . identifier[dot] ( identifier[X] . identifier[T] , identifier[M_E] ), identifier[Y] )
identifier[C] = identifier[C] . identifier[flatten] ()
identifier[A] = identifier[np] . identifier[zeros] (( identifier[r] * identifier[r] , identifier[r] * identifier[r] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[r] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[r] ):
identifier[ind] = identifier[j] * identifier[r] + identifier[i]
identifier[temp] = identifier[np] . identifier[dot] (
identifier[np] . identifier[dot] ( identifier[X] . identifier[T] , identifier[np] . identifier[dot] ( identifier[X] [:, identifier[i] , keyword[None] ], identifier[Y] [:, identifier[j] , keyword[None] ]. identifier[T] )* identifier[E] ), identifier[Y] )
identifier[A] [:, identifier[ind] ]= identifier[temp] . identifier[flatten] ()
identifier[S] = identifier[np] . identifier[linalg] . identifier[solve] ( identifier[A] , identifier[C] )
keyword[return] identifier[np] . identifier[reshape] ( identifier[S] ,( identifier[r] , identifier[r] )). identifier[T]
|
def getoptS(X, Y, M_E, E):
""" Find Sopt given X, Y
"""
(n, r) = X.shape
C = np.dot(np.dot(X.T, M_E), Y)
C = C.flatten()
A = np.zeros((r * r, r * r))
for i in range(r):
for j in range(r):
ind = j * r + i
temp = np.dot(np.dot(X.T, np.dot(X[:, i, None], Y[:, j, None].T) * E), Y)
A[:, ind] = temp.flatten() # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
S = np.linalg.solve(A, C)
return np.reshape(S, (r, r)).T
|
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats
|
def function[stats, parameter[self]]:
constant[Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
]
with call[name[self]._sock_ctx, parameter[]] begin[:]
call[name[self]._send_message, parameter[constant[stats], name[socket]]]
variable[body] assign[=] call[name[self]._receive_data_with_prefix, parameter[constant[b'OK'], name[socket]]]
variable[stats] assign[=] call[name[yaml_load], parameter[name[body]]]
return[name[stats]]
|
keyword[def] identifier[stats] ( identifier[self] ):
literal[string]
keyword[with] identifier[self] . identifier[_sock_ctx] () keyword[as] identifier[socket] :
identifier[self] . identifier[_send_message] ( literal[string] , identifier[socket] )
identifier[body] = identifier[self] . identifier[_receive_data_with_prefix] ( literal[string] , identifier[socket] )
identifier[stats] = identifier[yaml_load] ( identifier[body] )
keyword[return] identifier[stats]
|
def stats(self):
"""Return a dictionary with a bunch of instance-wide statistics
:rtype: dict
"""
with self._sock_ctx() as socket:
self._send_message('stats', socket)
body = self._receive_data_with_prefix(b'OK', socket)
stats = yaml_load(body)
return stats # depends on [control=['with'], data=['socket']]
|
def write_batch_report(self, input_directory, parameter):
"""
Collect all of the batch reports and concatenate the results. The report should be :
:param input_directory:
:param parameter: This is the parameter in which to report.
"""
# Check to see if there is an @ in the parameter. If there is split
if '@' in parameter:
parameter_dir = parameter.split('@')[1]
parameter = parameter.split('@')[0]
# --------------------------------------------------#
# we put the batch report one directory up in the tree
# --------------------------------------------------#
batch_report_file = 'batch_report.txt'
batch_report_file = os.path.join(input_directory, batch_report_file)
f = open(batch_report_file, 'w')
w = csv.writer(f, delimiter=',')
#--------------------------------------------------#
# Read in the report from planarrad and pull out the parameter that we want
#--------------------------------------------------#
dir_list = os.listdir(input_directory)
#--------------------------------------------------#
# Sometimes the report isn't generated for some reason.
# this checks to see if the first file in the dir list exists and skips if it doesn't
#--------------------------------------------------#
read_first_file = True
i_iter = 0
while read_first_file:
if os.path.exists(os.path.join(input_directory, os.path.join(dir_list[i_iter], 'report.txt'))):
report = self.read_pr_report(
os.path.join(input_directory, os.path.join(dir_list[i_iter], 'report.txt')))
read_first_file = False
else:
lg.warning('Missing report file in' + dir_list[i_iter])
i_iter += 1
try:
wave_val = report['band_centres']
param_val = report[parameter]
except:
lg.exception('Parameter :: ' + str(parameter) + ' :: Not in report')
wave_str = str(wave_val)
wave_str = wave_str.strip('[').strip(']').replace('\'', '').replace('\\n', '').replace(' ', '').replace(' -,',
'').replace(
',', '\",\"')
f.write(
'\"Sun Azimuth (deg)\",\"Sun Zenith (deg)\",\"Phytoplankton\",\"Scattering X\",\"Scattering Y\",\"CDOM G\",\"CDOM S\",\"Depth (m)\",\"#wave length (nm) ->\",\"' + wave_str + '\"\n')
#--------------------------------------------------#
# Get all of the directories under the batch directories
# The directory names have the IOP parameters in the names
#--------------------------------------------------#
for dir in dir_list:
if os.path.isdir(os.path.abspath(os.path.join(input_directory, dir))):
tmp_str_list = dir.split('_')
#for tmp_str in tmp_str_list:
saa = ''.join(c for c in tmp_str_list[0] if not c.isalpha())
sza = ''.join(c for c in tmp_str_list[1] if not c.isalpha())
p = ''.join(c for c in tmp_str_list[2] if not c.isalpha())
x = ''.join(c for c in tmp_str_list[3] if not c.isalpha())
y = ''.join(c for c in tmp_str_list[4] if not c.isalpha())
g = ''.join(c for c in tmp_str_list[5] if not c.isalpha())
s = ''.join(c for c in tmp_str_list[6] if not c.isalpha())
z = ''.join(c for c in tmp_str_list[7] if not c.isalpha())
#--------------------------------------------------#
# Write the report header and then the values above in the columns
#--------------------------------------------------#
try:
f.write(saa + ',' + sza + ',' + p + ',' + x + ',' + y + ',' + g + ',' + s + ',' + z + ',')
report = self.read_pr_report(os.path.join(input_directory, os.path.join(dir, 'report.txt')))
try:
# check to see if the parameter has the @ parameter. If it does pass to directional calculator
if 'parameter_dir' in locals():
param_val = self.calc_directional_aop(report, parameter, parameter_dir)
else:
param_val = report[parameter]
param_str = str(param_val)
param_str = param_str.strip('[').strip(']').replace('\'', '').replace('\\n', '').replace(' ',
'')
f.write(param_str + '\n')
except:
lg.exception('Parameter :: ' + str(parameter) + ' :: Not in report')
except:
lg.warning('Cannot find a report in directory :: ' + dir)
|
def function[write_batch_report, parameter[self, input_directory, parameter]]:
constant[
Collect all of the batch reports and concatenate the results. The report should be :
:param input_directory:
:param parameter: This is the parameter in which to report.
]
if compare[constant[@] in name[parameter]] begin[:]
variable[parameter_dir] assign[=] call[call[name[parameter].split, parameter[constant[@]]]][constant[1]]
variable[parameter] assign[=] call[call[name[parameter].split, parameter[constant[@]]]][constant[0]]
variable[batch_report_file] assign[=] constant[batch_report.txt]
variable[batch_report_file] assign[=] call[name[os].path.join, parameter[name[input_directory], name[batch_report_file]]]
variable[f] assign[=] call[name[open], parameter[name[batch_report_file], constant[w]]]
variable[w] assign[=] call[name[csv].writer, parameter[name[f]]]
variable[dir_list] assign[=] call[name[os].listdir, parameter[name[input_directory]]]
variable[read_first_file] assign[=] constant[True]
variable[i_iter] assign[=] constant[0]
while name[read_first_file] begin[:]
if call[name[os].path.exists, parameter[call[name[os].path.join, parameter[name[input_directory], call[name[os].path.join, parameter[call[name[dir_list]][name[i_iter]], constant[report.txt]]]]]]] begin[:]
variable[report] assign[=] call[name[self].read_pr_report, parameter[call[name[os].path.join, parameter[name[input_directory], call[name[os].path.join, parameter[call[name[dir_list]][name[i_iter]], constant[report.txt]]]]]]]
variable[read_first_file] assign[=] constant[False]
<ast.Try object at 0x7da1b0121180>
variable[wave_str] assign[=] call[name[str], parameter[name[wave_val]]]
variable[wave_str] assign[=] call[call[call[call[call[call[call[name[wave_str].strip, parameter[constant[[]]].strip, parameter[constant[]]]].replace, parameter[constant['], constant[]]].replace, parameter[constant[\n], constant[]]].replace, parameter[constant[ ], constant[]]].replace, parameter[constant[ -,], constant[]]].replace, parameter[constant[,], constant[","]]]
call[name[f].write, parameter[binary_operation[binary_operation[constant["Sun Azimuth (deg)","Sun Zenith (deg)","Phytoplankton","Scattering X","Scattering Y","CDOM G","CDOM S","Depth (m)","#wave length (nm) ->","] + name[wave_str]] + constant["
]]]]
for taget[name[dir]] in starred[name[dir_list]] begin[:]
if call[name[os].path.isdir, parameter[call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[name[input_directory], name[dir]]]]]]] begin[:]
variable[tmp_str_list] assign[=] call[name[dir].split, parameter[constant[_]]]
variable[saa] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18f00f2b0>]]
variable[sza] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18f00d540>]]
variable[p] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18f00db70>]]
variable[x] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18f00d720>]]
variable[y] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18f00e110>]]
variable[g] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18f00c760>]]
variable[s] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18f00fa60>]]
variable[z] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18f00d000>]]
<ast.Try object at 0x7da18f00ca30>
|
keyword[def] identifier[write_batch_report] ( identifier[self] , identifier[input_directory] , identifier[parameter] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[parameter] :
identifier[parameter_dir] = identifier[parameter] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[parameter] = identifier[parameter] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[batch_report_file] = literal[string]
identifier[batch_report_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[input_directory] , identifier[batch_report_file] )
identifier[f] = identifier[open] ( identifier[batch_report_file] , literal[string] )
identifier[w] = identifier[csv] . identifier[writer] ( identifier[f] , identifier[delimiter] = literal[string] )
identifier[dir_list] = identifier[os] . identifier[listdir] ( identifier[input_directory] )
identifier[read_first_file] = keyword[True]
identifier[i_iter] = literal[int]
keyword[while] identifier[read_first_file] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[input_directory] , identifier[os] . identifier[path] . identifier[join] ( identifier[dir_list] [ identifier[i_iter] ], literal[string] ))):
identifier[report] = identifier[self] . identifier[read_pr_report] (
identifier[os] . identifier[path] . identifier[join] ( identifier[input_directory] , identifier[os] . identifier[path] . identifier[join] ( identifier[dir_list] [ identifier[i_iter] ], literal[string] )))
identifier[read_first_file] = keyword[False]
keyword[else] :
identifier[lg] . identifier[warning] ( literal[string] + identifier[dir_list] [ identifier[i_iter] ])
identifier[i_iter] += literal[int]
keyword[try] :
identifier[wave_val] = identifier[report] [ literal[string] ]
identifier[param_val] = identifier[report] [ identifier[parameter] ]
keyword[except] :
identifier[lg] . identifier[exception] ( literal[string] + identifier[str] ( identifier[parameter] )+ literal[string] )
identifier[wave_str] = identifier[str] ( identifier[wave_val] )
identifier[wave_str] = identifier[wave_str] . identifier[strip] ( literal[string] ). identifier[strip] ( literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] ,
literal[string] ). identifier[replace] (
literal[string] , literal[string] )
identifier[f] . identifier[write] (
literal[string] + identifier[wave_str] + literal[string] )
keyword[for] identifier[dir] keyword[in] identifier[dir_list] :
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[input_directory] , identifier[dir] ))):
identifier[tmp_str_list] = identifier[dir] . identifier[split] ( literal[string] )
identifier[saa] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[tmp_str_list] [ literal[int] ] keyword[if] keyword[not] identifier[c] . identifier[isalpha] ())
identifier[sza] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[tmp_str_list] [ literal[int] ] keyword[if] keyword[not] identifier[c] . identifier[isalpha] ())
identifier[p] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[tmp_str_list] [ literal[int] ] keyword[if] keyword[not] identifier[c] . identifier[isalpha] ())
identifier[x] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[tmp_str_list] [ literal[int] ] keyword[if] keyword[not] identifier[c] . identifier[isalpha] ())
identifier[y] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[tmp_str_list] [ literal[int] ] keyword[if] keyword[not] identifier[c] . identifier[isalpha] ())
identifier[g] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[tmp_str_list] [ literal[int] ] keyword[if] keyword[not] identifier[c] . identifier[isalpha] ())
identifier[s] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[tmp_str_list] [ literal[int] ] keyword[if] keyword[not] identifier[c] . identifier[isalpha] ())
identifier[z] = literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[tmp_str_list] [ literal[int] ] keyword[if] keyword[not] identifier[c] . identifier[isalpha] ())
keyword[try] :
identifier[f] . identifier[write] ( identifier[saa] + literal[string] + identifier[sza] + literal[string] + identifier[p] + literal[string] + identifier[x] + literal[string] + identifier[y] + literal[string] + identifier[g] + literal[string] + identifier[s] + literal[string] + identifier[z] + literal[string] )
identifier[report] = identifier[self] . identifier[read_pr_report] ( identifier[os] . identifier[path] . identifier[join] ( identifier[input_directory] , identifier[os] . identifier[path] . identifier[join] ( identifier[dir] , literal[string] )))
keyword[try] :
keyword[if] literal[string] keyword[in] identifier[locals] ():
identifier[param_val] = identifier[self] . identifier[calc_directional_aop] ( identifier[report] , identifier[parameter] , identifier[parameter_dir] )
keyword[else] :
identifier[param_val] = identifier[report] [ identifier[parameter] ]
identifier[param_str] = identifier[str] ( identifier[param_val] )
identifier[param_str] = identifier[param_str] . identifier[strip] ( literal[string] ). identifier[strip] ( literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] ,
literal[string] )
identifier[f] . identifier[write] ( identifier[param_str] + literal[string] )
keyword[except] :
identifier[lg] . identifier[exception] ( literal[string] + identifier[str] ( identifier[parameter] )+ literal[string] )
keyword[except] :
identifier[lg] . identifier[warning] ( literal[string] + identifier[dir] )
|
def write_batch_report(self, input_directory, parameter):
"""
Collect all of the batch reports and concatenate the results. The report should be :
:param input_directory:
:param parameter: This is the parameter in which to report.
"""
# Check to see if there is an @ in the parameter. If there is split
if '@' in parameter:
parameter_dir = parameter.split('@')[1]
parameter = parameter.split('@')[0] # depends on [control=['if'], data=['parameter']]
# --------------------------------------------------#
# we put the batch report one directory up in the tree
# --------------------------------------------------#
batch_report_file = 'batch_report.txt'
batch_report_file = os.path.join(input_directory, batch_report_file)
f = open(batch_report_file, 'w')
w = csv.writer(f, delimiter=',')
#--------------------------------------------------#
# Read in the report from planarrad and pull out the parameter that we want
#--------------------------------------------------#
dir_list = os.listdir(input_directory)
#--------------------------------------------------#
# Sometimes the report isn't generated for some reason.
# this checks to see if the first file in the dir list exists and skips if it doesn't
#--------------------------------------------------#
read_first_file = True
i_iter = 0
while read_first_file:
if os.path.exists(os.path.join(input_directory, os.path.join(dir_list[i_iter], 'report.txt'))):
report = self.read_pr_report(os.path.join(input_directory, os.path.join(dir_list[i_iter], 'report.txt')))
read_first_file = False # depends on [control=['if'], data=[]]
else:
lg.warning('Missing report file in' + dir_list[i_iter])
i_iter += 1 # depends on [control=['while'], data=[]]
try:
wave_val = report['band_centres']
param_val = report[parameter] # depends on [control=['try'], data=[]]
except:
lg.exception('Parameter :: ' + str(parameter) + ' :: Not in report') # depends on [control=['except'], data=[]]
wave_str = str(wave_val)
wave_str = wave_str.strip('[').strip(']').replace("'", '').replace('\\n', '').replace(' ', '').replace(' -,', '').replace(',', '","')
f.write('"Sun Azimuth (deg)","Sun Zenith (deg)","Phytoplankton","Scattering X","Scattering Y","CDOM G","CDOM S","Depth (m)","#wave length (nm) ->","' + wave_str + '"\n')
#--------------------------------------------------#
# Get all of the directories under the batch directories
# The directory names have the IOP parameters in the names
#--------------------------------------------------#
for dir in dir_list:
if os.path.isdir(os.path.abspath(os.path.join(input_directory, dir))):
tmp_str_list = dir.split('_')
#for tmp_str in tmp_str_list:
saa = ''.join((c for c in tmp_str_list[0] if not c.isalpha()))
sza = ''.join((c for c in tmp_str_list[1] if not c.isalpha()))
p = ''.join((c for c in tmp_str_list[2] if not c.isalpha()))
x = ''.join((c for c in tmp_str_list[3] if not c.isalpha()))
y = ''.join((c for c in tmp_str_list[4] if not c.isalpha()))
g = ''.join((c for c in tmp_str_list[5] if not c.isalpha()))
s = ''.join((c for c in tmp_str_list[6] if not c.isalpha()))
z = ''.join((c for c in tmp_str_list[7] if not c.isalpha()))
#--------------------------------------------------#
# Write the report header and then the values above in the columns
#--------------------------------------------------#
try:
f.write(saa + ',' + sza + ',' + p + ',' + x + ',' + y + ',' + g + ',' + s + ',' + z + ',')
report = self.read_pr_report(os.path.join(input_directory, os.path.join(dir, 'report.txt')))
try:
# check to see if the parameter has the @ parameter. If it does pass to directional calculator
if 'parameter_dir' in locals():
param_val = self.calc_directional_aop(report, parameter, parameter_dir) # depends on [control=['if'], data=[]]
else:
param_val = report[parameter]
param_str = str(param_val)
param_str = param_str.strip('[').strip(']').replace("'", '').replace('\\n', '').replace(' ', '')
f.write(param_str + '\n') # depends on [control=['try'], data=[]]
except:
lg.exception('Parameter :: ' + str(parameter) + ' :: Not in report') # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]]
except:
lg.warning('Cannot find a report in directory :: ' + dir) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dir']]
|
def get_query_cache_key(compiler):
"""
Generates a cache key from a SQLCompiler.
This cache key is specific to the SQL query and its context
(which database is used). The same query in the same context
(= the same database) must generate the same cache key.
:arg compiler: A SQLCompiler that will generate the SQL query
:type compiler: django.db.models.sql.compiler.SQLCompiler
:return: A cache key
:rtype: int
"""
sql, params = compiler.as_sql()
check_parameter_types(params)
cache_key = '%s:%s:%s' % (compiler.using, sql,
[text_type(p) for p in params])
return sha1(cache_key.encode('utf-8')).hexdigest()
|
def function[get_query_cache_key, parameter[compiler]]:
constant[
Generates a cache key from a SQLCompiler.
This cache key is specific to the SQL query and its context
(which database is used). The same query in the same context
(= the same database) must generate the same cache key.
:arg compiler: A SQLCompiler that will generate the SQL query
:type compiler: django.db.models.sql.compiler.SQLCompiler
:return: A cache key
:rtype: int
]
<ast.Tuple object at 0x7da18eb57010> assign[=] call[name[compiler].as_sql, parameter[]]
call[name[check_parameter_types], parameter[name[params]]]
variable[cache_key] assign[=] binary_operation[constant[%s:%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18eb54a60>, <ast.Name object at 0x7da18eb57910>, <ast.ListComp object at 0x7da18eb57bb0>]]]
return[call[call[name[sha1], parameter[call[name[cache_key].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]]]
|
keyword[def] identifier[get_query_cache_key] ( identifier[compiler] ):
literal[string]
identifier[sql] , identifier[params] = identifier[compiler] . identifier[as_sql] ()
identifier[check_parameter_types] ( identifier[params] )
identifier[cache_key] = literal[string] %( identifier[compiler] . identifier[using] , identifier[sql] ,
[ identifier[text_type] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[params] ])
keyword[return] identifier[sha1] ( identifier[cache_key] . identifier[encode] ( literal[string] )). identifier[hexdigest] ()
|
def get_query_cache_key(compiler):
"""
Generates a cache key from a SQLCompiler.
This cache key is specific to the SQL query and its context
(which database is used). The same query in the same context
(= the same database) must generate the same cache key.
:arg compiler: A SQLCompiler that will generate the SQL query
:type compiler: django.db.models.sql.compiler.SQLCompiler
:return: A cache key
:rtype: int
"""
(sql, params) = compiler.as_sql()
check_parameter_types(params)
cache_key = '%s:%s:%s' % (compiler.using, sql, [text_type(p) for p in params])
return sha1(cache_key.encode('utf-8')).hexdigest()
|
def path_to_list(pathstr):
"""Conver a path string to a list of path elements."""
return [elem for elem in pathstr.split(os.path.pathsep) if elem]
|
def function[path_to_list, parameter[pathstr]]:
constant[Conver a path string to a list of path elements.]
return[<ast.ListComp object at 0x7da20c7cb2e0>]
|
keyword[def] identifier[path_to_list] ( identifier[pathstr] ):
literal[string]
keyword[return] [ identifier[elem] keyword[for] identifier[elem] keyword[in] identifier[pathstr] . identifier[split] ( identifier[os] . identifier[path] . identifier[pathsep] ) keyword[if] identifier[elem] ]
|
def path_to_list(pathstr):
"""Conver a path string to a list of path elements."""
return [elem for elem in pathstr.split(os.path.pathsep) if elem]
|
def get(self, objectType, *args, **coolArgs) :
"""Raba Magic inside. This is th function that you use for
querying pyGeno's DB.
Usage examples:
* myGenome.get("Gene", name = 'TPST2')
* myGene.get(Protein, id = 'ENSID...')
* myGenome.get(Transcript, {'start >' : x, 'end <' : y})"""
ret = []
for e in self._makeLoadQuery(objectType, *args, **coolArgs).iterRun() :
if issubclass(objectType, pyGenoRabaObjectWrapper) :
ret.append(objectType(wrapped_object_and_bag = (e, self.bagKey)))
else :
ret.append(e)
return ret
|
def function[get, parameter[self, objectType]]:
constant[Raba Magic inside. This is th function that you use for
querying pyGeno's DB.
Usage examples:
* myGenome.get("Gene", name = 'TPST2')
* myGene.get(Protein, id = 'ENSID...')
* myGenome.get(Transcript, {'start >' : x, 'end <' : y})]
variable[ret] assign[=] list[[]]
for taget[name[e]] in starred[call[call[name[self]._makeLoadQuery, parameter[name[objectType], <ast.Starred object at 0x7da204621c30>]].iterRun, parameter[]]] begin[:]
if call[name[issubclass], parameter[name[objectType], name[pyGenoRabaObjectWrapper]]] begin[:]
call[name[ret].append, parameter[call[name[objectType], parameter[]]]]
return[name[ret]]
|
keyword[def] identifier[get] ( identifier[self] , identifier[objectType] ,* identifier[args] ,** identifier[coolArgs] ):
literal[string]
identifier[ret] =[]
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[_makeLoadQuery] ( identifier[objectType] ,* identifier[args] ,** identifier[coolArgs] ). identifier[iterRun] ():
keyword[if] identifier[issubclass] ( identifier[objectType] , identifier[pyGenoRabaObjectWrapper] ):
identifier[ret] . identifier[append] ( identifier[objectType] ( identifier[wrapped_object_and_bag] =( identifier[e] , identifier[self] . identifier[bagKey] )))
keyword[else] :
identifier[ret] . identifier[append] ( identifier[e] )
keyword[return] identifier[ret]
|
def get(self, objectType, *args, **coolArgs):
"""Raba Magic inside. This is th function that you use for
querying pyGeno's DB.
Usage examples:
* myGenome.get("Gene", name = 'TPST2')
* myGene.get(Protein, id = 'ENSID...')
* myGenome.get(Transcript, {'start >' : x, 'end <' : y})"""
ret = []
for e in self._makeLoadQuery(objectType, *args, **coolArgs).iterRun():
if issubclass(objectType, pyGenoRabaObjectWrapper):
ret.append(objectType(wrapped_object_and_bag=(e, self.bagKey))) # depends on [control=['if'], data=[]]
else:
ret.append(e) # depends on [control=['for'], data=['e']]
return ret
|
def _irregular(singular, plural):
"""
A convenience function to add appropriate rules to plurals and singular
for irregular words.
:param singular: irregular word in singular form
:param plural: irregular word in plural form
"""
def caseinsensitive(string):
return ''.join('[' + char + char.upper() + ']' for char in string)
if singular[0].upper() == plural[0].upper():
PLURALS.insert(0, (
r"(?i)({}){}$".format(singular[0], singular[1:]),
r'\1' + plural[1:]
))
PLURALS.insert(0, (
r"(?i)({}){}$".format(plural[0], plural[1:]),
r'\1' + plural[1:]
))
SINGULARS.insert(0, (
r"(?i)({}){}$".format(plural[0], plural[1:]),
r'\1' + singular[1:]
))
else:
PLURALS.insert(0, (
r"{}{}$".format(singular[0].upper(),
caseinsensitive(singular[1:])),
plural[0].upper() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(singular[0].lower(),
caseinsensitive(singular[1:])),
plural[0].lower() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(plural[0].upper(), caseinsensitive(plural[1:])),
plural[0].upper() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(plural[0].lower(), caseinsensitive(plural[1:])),
plural[0].lower() + plural[1:]
))
SINGULARS.insert(0, (
r"{}{}$".format(plural[0].upper(), caseinsensitive(plural[1:])),
singular[0].upper() + singular[1:]
))
SINGULARS.insert(0, (
r"{}{}$".format(plural[0].lower(), caseinsensitive(plural[1:])),
singular[0].lower() + singular[1:]
))
|
def function[_irregular, parameter[singular, plural]]:
constant[
A convenience function to add appropriate rules to plurals and singular
for irregular words.
:param singular: irregular word in singular form
:param plural: irregular word in plural form
]
def function[caseinsensitive, parameter[string]]:
return[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18ede59f0>]]]
if compare[call[call[name[singular]][constant[0]].upper, parameter[]] equal[==] call[call[name[plural]][constant[0]].upper, parameter[]]] begin[:]
call[name[PLURALS].insert, parameter[constant[0], tuple[[<ast.Call object at 0x7da18ede7a90>, <ast.BinOp object at 0x7da18ede5330>]]]]
call[name[PLURALS].insert, parameter[constant[0], tuple[[<ast.Call object at 0x7da18ede6020>, <ast.BinOp object at 0x7da18ede6f80>]]]]
call[name[SINGULARS].insert, parameter[constant[0], tuple[[<ast.Call object at 0x7da18ede7f40>, <ast.BinOp object at 0x7da18ede52d0>]]]]
|
keyword[def] identifier[_irregular] ( identifier[singular] , identifier[plural] ):
literal[string]
keyword[def] identifier[caseinsensitive] ( identifier[string] ):
keyword[return] literal[string] . identifier[join] ( literal[string] + identifier[char] + identifier[char] . identifier[upper] ()+ literal[string] keyword[for] identifier[char] keyword[in] identifier[string] )
keyword[if] identifier[singular] [ literal[int] ]. identifier[upper] ()== identifier[plural] [ literal[int] ]. identifier[upper] ():
identifier[PLURALS] . identifier[insert] ( literal[int] ,(
literal[string] . identifier[format] ( identifier[singular] [ literal[int] ], identifier[singular] [ literal[int] :]),
literal[string] + identifier[plural] [ literal[int] :]
))
identifier[PLURALS] . identifier[insert] ( literal[int] ,(
literal[string] . identifier[format] ( identifier[plural] [ literal[int] ], identifier[plural] [ literal[int] :]),
literal[string] + identifier[plural] [ literal[int] :]
))
identifier[SINGULARS] . identifier[insert] ( literal[int] ,(
literal[string] . identifier[format] ( identifier[plural] [ literal[int] ], identifier[plural] [ literal[int] :]),
literal[string] + identifier[singular] [ literal[int] :]
))
keyword[else] :
identifier[PLURALS] . identifier[insert] ( literal[int] ,(
literal[string] . identifier[format] ( identifier[singular] [ literal[int] ]. identifier[upper] (),
identifier[caseinsensitive] ( identifier[singular] [ literal[int] :])),
identifier[plural] [ literal[int] ]. identifier[upper] ()+ identifier[plural] [ literal[int] :]
))
identifier[PLURALS] . identifier[insert] ( literal[int] ,(
literal[string] . identifier[format] ( identifier[singular] [ literal[int] ]. identifier[lower] (),
identifier[caseinsensitive] ( identifier[singular] [ literal[int] :])),
identifier[plural] [ literal[int] ]. identifier[lower] ()+ identifier[plural] [ literal[int] :]
))
identifier[PLURALS] . identifier[insert] ( literal[int] ,(
literal[string] . identifier[format] ( identifier[plural] [ literal[int] ]. identifier[upper] (), identifier[caseinsensitive] ( identifier[plural] [ literal[int] :])),
identifier[plural] [ literal[int] ]. identifier[upper] ()+ identifier[plural] [ literal[int] :]
))
identifier[PLURALS] . identifier[insert] ( literal[int] ,(
literal[string] . identifier[format] ( identifier[plural] [ literal[int] ]. identifier[lower] (), identifier[caseinsensitive] ( identifier[plural] [ literal[int] :])),
identifier[plural] [ literal[int] ]. identifier[lower] ()+ identifier[plural] [ literal[int] :]
))
identifier[SINGULARS] . identifier[insert] ( literal[int] ,(
literal[string] . identifier[format] ( identifier[plural] [ literal[int] ]. identifier[upper] (), identifier[caseinsensitive] ( identifier[plural] [ literal[int] :])),
identifier[singular] [ literal[int] ]. identifier[upper] ()+ identifier[singular] [ literal[int] :]
))
identifier[SINGULARS] . identifier[insert] ( literal[int] ,(
literal[string] . identifier[format] ( identifier[plural] [ literal[int] ]. identifier[lower] (), identifier[caseinsensitive] ( identifier[plural] [ literal[int] :])),
identifier[singular] [ literal[int] ]. identifier[lower] ()+ identifier[singular] [ literal[int] :]
))
|
def _irregular(singular, plural):
"""
A convenience function to add appropriate rules to plurals and singular
for irregular words.
:param singular: irregular word in singular form
:param plural: irregular word in plural form
"""
def caseinsensitive(string):
return ''.join(('[' + char + char.upper() + ']' for char in string))
if singular[0].upper() == plural[0].upper():
PLURALS.insert(0, ('(?i)({}){}$'.format(singular[0], singular[1:]), '\\1' + plural[1:]))
PLURALS.insert(0, ('(?i)({}){}$'.format(plural[0], plural[1:]), '\\1' + plural[1:]))
SINGULARS.insert(0, ('(?i)({}){}$'.format(plural[0], plural[1:]), '\\1' + singular[1:])) # depends on [control=['if'], data=[]]
else:
PLURALS.insert(0, ('{}{}$'.format(singular[0].upper(), caseinsensitive(singular[1:])), plural[0].upper() + plural[1:]))
PLURALS.insert(0, ('{}{}$'.format(singular[0].lower(), caseinsensitive(singular[1:])), plural[0].lower() + plural[1:]))
PLURALS.insert(0, ('{}{}$'.format(plural[0].upper(), caseinsensitive(plural[1:])), plural[0].upper() + plural[1:]))
PLURALS.insert(0, ('{}{}$'.format(plural[0].lower(), caseinsensitive(plural[1:])), plural[0].lower() + plural[1:]))
SINGULARS.insert(0, ('{}{}$'.format(plural[0].upper(), caseinsensitive(plural[1:])), singular[0].upper() + singular[1:]))
SINGULARS.insert(0, ('{}{}$'.format(plural[0].lower(), caseinsensitive(plural[1:])), singular[0].lower() + singular[1:]))
|
def columnize(array, displaywidth=80, colsep = ' ',
arrange_vertical=True, ljust=True, lineprefix='',
opts={}):
"""Return a list of strings as a compact set of columns arranged
horizontally or vertically.
For example, for a line width of 4 characters (arranged vertically):
['1', '2,', '3', '4'] => '1 3\n2 4\n'
or arranged horizontally:
['1', '2,', '3', '4'] => '1 2\n3 4\n'
Each column is only as wide as necessary. By default, columns are
separated by two spaces - one was not legible enough. Set "colsep"
to adjust the string separate columns. Set `displaywidth' to set
the line width.
Normally, consecutive items go down from the top to bottom from
the left-most column to the right-most. If "arrange_vertical" is
set false, consecutive items will go across, left to right, top to
bottom."""
if not isinstance(array, (list, tuple)):
raise TypeError((
'array needs to be an instance of a list or a tuple'))
o = {}
if len(opts.keys()) > 0:
for key in default_opts.keys():
o[key] = get_option(key, opts)
pass
if o['arrange_array']:
o['array_prefix'] = '['
o['lineprefix'] = ' '
o['linesuffix'] = ",\n"
o['array_suffix'] = "]\n"
o['colsep'] = ', '
o['arrange_vertical'] = False
pass
else:
o = default_opts.copy()
o['displaywidth'] = displaywidth
o['colsep'] = colsep
o['arrange_vertical'] = arrange_vertical
o['ljust'] = ljust
o['lineprefix'] = lineprefix
pass
# if o['ljust'] is None:
# o['ljust'] = !(list.all?{|datum| datum.kind_of?(Numeric)})
# pass
if o['colfmt']:
array = [(o['colfmt'] % i) for i in array]
else:
array = [str(i) for i in array]
pass
# Some degenerate cases
size = len(array)
if 0 == size:
return "<empty>\n"
elif size == 1:
return '%s%s%s\n' % (o['array_prefix'], str(array[0]),
o['array_suffix'])
o['displaywidth'] = max(4, o['displaywidth'] - len(o['lineprefix']))
if o['arrange_vertical']:
array_index = lambda nrows, row, col: nrows*col + row
# Try every row count from 1 upwards
for nrows in range(1, size+1):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -len(o['colsep'])
for col in range(ncols):
# get max column width for this column
colwidth = 0
for row in range(nrows):
i = array_index(nrows, row, col)
if i >= size: break
x = array[i]
colwidth = max(colwidth, len(x))
pass
colwidths.append(colwidth)
totwidth += colwidth + len(o['colsep'])
if totwidth > o['displaywidth']:
break
pass
if totwidth <= o['displaywidth']:
break
pass
# The smallest number of rows computed and the
# max widths for each column has been obtained.
# Now we just have to format each of the
# rows.
s = ''
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = array[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
if o['ljust']:
texts[col] = texts[col].ljust(colwidths[col])
else:
texts[col] = texts[col].rjust(colwidths[col])
pass
pass
s += "%s%s%s" % (o['lineprefix'], str(o['colsep'].join(texts)),
o['linesuffix'])
pass
return s
else:
array_index = lambda ncols, row, col: ncols*(row-1) + col
# Try every column count from size downwards
colwidths = []
for ncols in range(size, 0, -1):
# Try every row count from 1 upwards
min_rows = (size+ncols-1) // ncols
nrows = min_rows -1
while nrows < size:
nrows += 1
rounded_size = nrows * ncols
colwidths = []
totwidth = -len(o['colsep'])
for col in range(ncols):
# get max column width for this column
colwidth = 0
for row in range(1, nrows+1):
i = array_index(ncols, row, col)
if i >= rounded_size: break
elif i < size:
x = array[i]
colwidth = max(colwidth, len(x))
pass
pass
colwidths.append(colwidth)
totwidth += colwidth + len(o['colsep'])
if totwidth >= o['displaywidth']:
break
pass
if totwidth <= o['displaywidth'] and i >= rounded_size-1:
# Found the right nrows and ncols
# print "right nrows and ncols"
nrows = row
break
elif totwidth >= o['displaywidth']:
# print "reduce ncols", ncols
# Need to reduce ncols
break
pass
if totwidth <= o['displaywidth'] and i >= rounded_size-1:
break
pass
# The smallest number of rows computed and the
# max widths for each column has been obtained.
# Now we just have to format each of the
# rows.
s = ''
if len(o['array_prefix']) != 0:
prefix = o['array_prefix']
else:
prefix = o['lineprefix']
pass
for row in range(1, nrows+1):
texts = []
for col in range(ncols):
i = array_index(ncols, row, col)
if i >= size:
break
else: x = array[i]
texts.append(x)
pass
for col in range(len(texts)):
if o['ljust']:
texts[col] = texts[col].ljust(colwidths[col])
else:
texts[col] = texts[col].rjust(colwidths[col])
pass
pass
s += "%s%s%s" % (prefix, str(o['colsep'].join(texts)),
o['linesuffix'])
prefix = o['lineprefix']
pass
if o['arrange_array']:
colsep = o['colsep'].rstrip()
colsep_pos = -(len(colsep)+1)
if s[colsep_pos:] == colsep + "\n":
s = s[:colsep_pos] + o['array_suffix'] + "\n"
pass
pass
else:
s += o['array_suffix']
pass
return s
pass
|
def function[columnize, parameter[array, displaywidth, colsep, arrange_vertical, ljust, lineprefix, opts]]:
constant[Return a list of strings as a compact set of columns arranged
horizontally or vertically.
For example, for a line width of 4 characters (arranged vertically):
['1', '2,', '3', '4'] => '1 3
2 4
'
or arranged horizontally:
['1', '2,', '3', '4'] => '1 2
3 4
'
Each column is only as wide as necessary. By default, columns are
separated by two spaces - one was not legible enough. Set "colsep"
to adjust the string separate columns. Set `displaywidth' to set
the line width.
Normally, consecutive items go down from the top to bottom from
the left-most column to the right-most. If "arrange_vertical" is
set false, consecutive items will go across, left to right, top to
bottom.]
if <ast.UnaryOp object at 0x7da20c6e7970> begin[:]
<ast.Raise object at 0x7da20c6e70d0>
variable[o] assign[=] dictionary[[], []]
if compare[call[name[len], parameter[call[name[opts].keys, parameter[]]]] greater[>] constant[0]] begin[:]
for taget[name[key]] in starred[call[name[default_opts].keys, parameter[]]] begin[:]
call[name[o]][name[key]] assign[=] call[name[get_option], parameter[name[key], name[opts]]]
pass
if call[name[o]][constant[arrange_array]] begin[:]
call[name[o]][constant[array_prefix]] assign[=] constant[[]
call[name[o]][constant[lineprefix]] assign[=] constant[ ]
call[name[o]][constant[linesuffix]] assign[=] constant[,
]
call[name[o]][constant[array_suffix]] assign[=] constant[]
]
call[name[o]][constant[colsep]] assign[=] constant[, ]
call[name[o]][constant[arrange_vertical]] assign[=] constant[False]
pass
if call[name[o]][constant[colfmt]] begin[:]
variable[array] assign[=] <ast.ListComp object at 0x7da20c6e5420>
variable[size] assign[=] call[name[len], parameter[name[array]]]
if compare[constant[0] equal[==] name[size]] begin[:]
return[constant[<empty>
]]
call[name[o]][constant[displaywidth]] assign[=] call[name[max], parameter[constant[4], binary_operation[call[name[o]][constant[displaywidth]] - call[name[len], parameter[call[name[o]][constant[lineprefix]]]]]]]
if call[name[o]][constant[arrange_vertical]] begin[:]
variable[array_index] assign[=] <ast.Lambda object at 0x7da20c6e4370>
for taget[name[nrows]] in starred[call[name[range], parameter[constant[1], binary_operation[name[size] + constant[1]]]]] begin[:]
variable[ncols] assign[=] binary_operation[binary_operation[binary_operation[name[size] + name[nrows]] - constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> name[nrows]]
variable[colwidths] assign[=] list[[]]
variable[totwidth] assign[=] <ast.UnaryOp object at 0x7da20c6e6da0>
for taget[name[col]] in starred[call[name[range], parameter[name[ncols]]]] begin[:]
variable[colwidth] assign[=] constant[0]
for taget[name[row]] in starred[call[name[range], parameter[name[nrows]]]] begin[:]
variable[i] assign[=] call[name[array_index], parameter[name[nrows], name[row], name[col]]]
if compare[name[i] greater_or_equal[>=] name[size]] begin[:]
break
variable[x] assign[=] call[name[array]][name[i]]
variable[colwidth] assign[=] call[name[max], parameter[name[colwidth], call[name[len], parameter[name[x]]]]]
pass
call[name[colwidths].append, parameter[name[colwidth]]]
<ast.AugAssign object at 0x7da18fe91390>
if compare[name[totwidth] greater[>] call[name[o]][constant[displaywidth]]] begin[:]
break
pass
if compare[name[totwidth] less_or_equal[<=] call[name[o]][constant[displaywidth]]] begin[:]
break
pass
variable[s] assign[=] constant[]
for taget[name[row]] in starred[call[name[range], parameter[name[nrows]]]] begin[:]
variable[texts] assign[=] list[[]]
for taget[name[col]] in starred[call[name[range], parameter[name[ncols]]]] begin[:]
variable[i] assign[=] binary_operation[name[row] + binary_operation[name[nrows] * name[col]]]
if compare[name[i] greater_or_equal[>=] name[size]] begin[:]
variable[x] assign[=] constant[]
call[name[texts].append, parameter[name[x]]]
while <ast.BoolOp object at 0x7da18fe91f60> begin[:]
<ast.Delete object at 0x7da18fe912d0>
for taget[name[col]] in starred[call[name[range], parameter[call[name[len], parameter[name[texts]]]]]] begin[:]
if call[name[o]][constant[ljust]] begin[:]
call[name[texts]][name[col]] assign[=] call[call[name[texts]][name[col]].ljust, parameter[call[name[colwidths]][name[col]]]]
pass
<ast.AugAssign object at 0x7da18fe920b0>
pass
return[name[s]]
pass
|
keyword[def] identifier[columnize] ( identifier[array] , identifier[displaywidth] = literal[int] , identifier[colsep] = literal[string] ,
identifier[arrange_vertical] = keyword[True] , identifier[ljust] = keyword[True] , identifier[lineprefix] = literal[string] ,
identifier[opts] ={}):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[array] ,( identifier[list] , identifier[tuple] )):
keyword[raise] identifier[TypeError] ((
literal[string] ))
identifier[o] ={}
keyword[if] identifier[len] ( identifier[opts] . identifier[keys] ())> literal[int] :
keyword[for] identifier[key] keyword[in] identifier[default_opts] . identifier[keys] ():
identifier[o] [ identifier[key] ]= identifier[get_option] ( identifier[key] , identifier[opts] )
keyword[pass]
keyword[if] identifier[o] [ literal[string] ]:
identifier[o] [ literal[string] ]= literal[string]
identifier[o] [ literal[string] ]= literal[string]
identifier[o] [ literal[string] ]= literal[string]
identifier[o] [ literal[string] ]= literal[string]
identifier[o] [ literal[string] ]= literal[string]
identifier[o] [ literal[string] ]= keyword[False]
keyword[pass]
keyword[else] :
identifier[o] = identifier[default_opts] . identifier[copy] ()
identifier[o] [ literal[string] ]= identifier[displaywidth]
identifier[o] [ literal[string] ]= identifier[colsep]
identifier[o] [ literal[string] ]= identifier[arrange_vertical]
identifier[o] [ literal[string] ]= identifier[ljust]
identifier[o] [ literal[string] ]= identifier[lineprefix]
keyword[pass]
keyword[if] identifier[o] [ literal[string] ]:
identifier[array] =[( identifier[o] [ literal[string] ]% identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[array] ]
keyword[else] :
identifier[array] =[ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[array] ]
keyword[pass]
identifier[size] = identifier[len] ( identifier[array] )
keyword[if] literal[int] == identifier[size] :
keyword[return] literal[string]
keyword[elif] identifier[size] == literal[int] :
keyword[return] literal[string] %( identifier[o] [ literal[string] ], identifier[str] ( identifier[array] [ literal[int] ]),
identifier[o] [ literal[string] ])
identifier[o] [ literal[string] ]= identifier[max] ( literal[int] , identifier[o] [ literal[string] ]- identifier[len] ( identifier[o] [ literal[string] ]))
keyword[if] identifier[o] [ literal[string] ]:
identifier[array_index] = keyword[lambda] identifier[nrows] , identifier[row] , identifier[col] : identifier[nrows] * identifier[col] + identifier[row]
keyword[for] identifier[nrows] keyword[in] identifier[range] ( literal[int] , identifier[size] + literal[int] ):
identifier[ncols] =( identifier[size] + identifier[nrows] - literal[int] )// identifier[nrows]
identifier[colwidths] =[]
identifier[totwidth] =- identifier[len] ( identifier[o] [ literal[string] ])
keyword[for] identifier[col] keyword[in] identifier[range] ( identifier[ncols] ):
identifier[colwidth] = literal[int]
keyword[for] identifier[row] keyword[in] identifier[range] ( identifier[nrows] ):
identifier[i] = identifier[array_index] ( identifier[nrows] , identifier[row] , identifier[col] )
keyword[if] identifier[i] >= identifier[size] : keyword[break]
identifier[x] = identifier[array] [ identifier[i] ]
identifier[colwidth] = identifier[max] ( identifier[colwidth] , identifier[len] ( identifier[x] ))
keyword[pass]
identifier[colwidths] . identifier[append] ( identifier[colwidth] )
identifier[totwidth] += identifier[colwidth] + identifier[len] ( identifier[o] [ literal[string] ])
keyword[if] identifier[totwidth] > identifier[o] [ literal[string] ]:
keyword[break]
keyword[pass]
keyword[if] identifier[totwidth] <= identifier[o] [ literal[string] ]:
keyword[break]
keyword[pass]
identifier[s] = literal[string]
keyword[for] identifier[row] keyword[in] identifier[range] ( identifier[nrows] ):
identifier[texts] =[]
keyword[for] identifier[col] keyword[in] identifier[range] ( identifier[ncols] ):
identifier[i] = identifier[row] + identifier[nrows] * identifier[col]
keyword[if] identifier[i] >= identifier[size] :
identifier[x] = literal[string]
keyword[else] :
identifier[x] = identifier[array] [ identifier[i] ]
identifier[texts] . identifier[append] ( identifier[x] )
keyword[while] identifier[texts] keyword[and] keyword[not] identifier[texts] [- literal[int] ]:
keyword[del] identifier[texts] [- literal[int] ]
keyword[for] identifier[col] keyword[in] identifier[range] ( identifier[len] ( identifier[texts] )):
keyword[if] identifier[o] [ literal[string] ]:
identifier[texts] [ identifier[col] ]= identifier[texts] [ identifier[col] ]. identifier[ljust] ( identifier[colwidths] [ identifier[col] ])
keyword[else] :
identifier[texts] [ identifier[col] ]= identifier[texts] [ identifier[col] ]. identifier[rjust] ( identifier[colwidths] [ identifier[col] ])
keyword[pass]
keyword[pass]
identifier[s] += literal[string] %( identifier[o] [ literal[string] ], identifier[str] ( identifier[o] [ literal[string] ]. identifier[join] ( identifier[texts] )),
identifier[o] [ literal[string] ])
keyword[pass]
keyword[return] identifier[s]
keyword[else] :
identifier[array_index] = keyword[lambda] identifier[ncols] , identifier[row] , identifier[col] : identifier[ncols] *( identifier[row] - literal[int] )+ identifier[col]
identifier[colwidths] =[]
keyword[for] identifier[ncols] keyword[in] identifier[range] ( identifier[size] , literal[int] ,- literal[int] ):
identifier[min_rows] =( identifier[size] + identifier[ncols] - literal[int] )// identifier[ncols]
identifier[nrows] = identifier[min_rows] - literal[int]
keyword[while] identifier[nrows] < identifier[size] :
identifier[nrows] += literal[int]
identifier[rounded_size] = identifier[nrows] * identifier[ncols]
identifier[colwidths] =[]
identifier[totwidth] =- identifier[len] ( identifier[o] [ literal[string] ])
keyword[for] identifier[col] keyword[in] identifier[range] ( identifier[ncols] ):
identifier[colwidth] = literal[int]
keyword[for] identifier[row] keyword[in] identifier[range] ( literal[int] , identifier[nrows] + literal[int] ):
identifier[i] = identifier[array_index] ( identifier[ncols] , identifier[row] , identifier[col] )
keyword[if] identifier[i] >= identifier[rounded_size] : keyword[break]
keyword[elif] identifier[i] < identifier[size] :
identifier[x] = identifier[array] [ identifier[i] ]
identifier[colwidth] = identifier[max] ( identifier[colwidth] , identifier[len] ( identifier[x] ))
keyword[pass]
keyword[pass]
identifier[colwidths] . identifier[append] ( identifier[colwidth] )
identifier[totwidth] += identifier[colwidth] + identifier[len] ( identifier[o] [ literal[string] ])
keyword[if] identifier[totwidth] >= identifier[o] [ literal[string] ]:
keyword[break]
keyword[pass]
keyword[if] identifier[totwidth] <= identifier[o] [ literal[string] ] keyword[and] identifier[i] >= identifier[rounded_size] - literal[int] :
identifier[nrows] = identifier[row]
keyword[break]
keyword[elif] identifier[totwidth] >= identifier[o] [ literal[string] ]:
keyword[break]
keyword[pass]
keyword[if] identifier[totwidth] <= identifier[o] [ literal[string] ] keyword[and] identifier[i] >= identifier[rounded_size] - literal[int] :
keyword[break]
keyword[pass]
identifier[s] = literal[string]
keyword[if] identifier[len] ( identifier[o] [ literal[string] ])!= literal[int] :
identifier[prefix] = identifier[o] [ literal[string] ]
keyword[else] :
identifier[prefix] = identifier[o] [ literal[string] ]
keyword[pass]
keyword[for] identifier[row] keyword[in] identifier[range] ( literal[int] , identifier[nrows] + literal[int] ):
identifier[texts] =[]
keyword[for] identifier[col] keyword[in] identifier[range] ( identifier[ncols] ):
identifier[i] = identifier[array_index] ( identifier[ncols] , identifier[row] , identifier[col] )
keyword[if] identifier[i] >= identifier[size] :
keyword[break]
keyword[else] : identifier[x] = identifier[array] [ identifier[i] ]
identifier[texts] . identifier[append] ( identifier[x] )
keyword[pass]
keyword[for] identifier[col] keyword[in] identifier[range] ( identifier[len] ( identifier[texts] )):
keyword[if] identifier[o] [ literal[string] ]:
identifier[texts] [ identifier[col] ]= identifier[texts] [ identifier[col] ]. identifier[ljust] ( identifier[colwidths] [ identifier[col] ])
keyword[else] :
identifier[texts] [ identifier[col] ]= identifier[texts] [ identifier[col] ]. identifier[rjust] ( identifier[colwidths] [ identifier[col] ])
keyword[pass]
keyword[pass]
identifier[s] += literal[string] %( identifier[prefix] , identifier[str] ( identifier[o] [ literal[string] ]. identifier[join] ( identifier[texts] )),
identifier[o] [ literal[string] ])
identifier[prefix] = identifier[o] [ literal[string] ]
keyword[pass]
keyword[if] identifier[o] [ literal[string] ]:
identifier[colsep] = identifier[o] [ literal[string] ]. identifier[rstrip] ()
identifier[colsep_pos] =-( identifier[len] ( identifier[colsep] )+ literal[int] )
keyword[if] identifier[s] [ identifier[colsep_pos] :]== identifier[colsep] + literal[string] :
identifier[s] = identifier[s] [: identifier[colsep_pos] ]+ identifier[o] [ literal[string] ]+ literal[string]
keyword[pass]
keyword[pass]
keyword[else] :
identifier[s] += identifier[o] [ literal[string] ]
keyword[pass]
keyword[return] identifier[s]
keyword[pass]
|
def columnize(array, displaywidth=80, colsep=' ', arrange_vertical=True, ljust=True, lineprefix='', opts={}):
"""Return a list of strings as a compact set of columns arranged
horizontally or vertically.
For example, for a line width of 4 characters (arranged vertically):
['1', '2,', '3', '4'] => '1 3
2 4
'
or arranged horizontally:
['1', '2,', '3', '4'] => '1 2
3 4
'
Each column is only as wide as necessary. By default, columns are
separated by two spaces - one was not legible enough. Set "colsep"
to adjust the string separate columns. Set `displaywidth' to set
the line width.
Normally, consecutive items go down from the top to bottom from
the left-most column to the right-most. If "arrange_vertical" is
set false, consecutive items will go across, left to right, top to
bottom."""
if not isinstance(array, (list, tuple)):
raise TypeError('array needs to be an instance of a list or a tuple') # depends on [control=['if'], data=[]]
o = {}
if len(opts.keys()) > 0:
for key in default_opts.keys():
o[key] = get_option(key, opts)
pass # depends on [control=['for'], data=['key']]
if o['arrange_array']:
o['array_prefix'] = '['
o['lineprefix'] = ' '
o['linesuffix'] = ',\n'
o['array_suffix'] = ']\n'
o['colsep'] = ', '
o['arrange_vertical'] = False
pass # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
o = default_opts.copy()
o['displaywidth'] = displaywidth
o['colsep'] = colsep
o['arrange_vertical'] = arrange_vertical
o['ljust'] = ljust
o['lineprefix'] = lineprefix
pass
# if o['ljust'] is None:
# o['ljust'] = !(list.all?{|datum| datum.kind_of?(Numeric)})
# pass
if o['colfmt']:
array = [o['colfmt'] % i for i in array] # depends on [control=['if'], data=[]]
else:
array = [str(i) for i in array]
pass
# Some degenerate cases
size = len(array)
if 0 == size:
return '<empty>\n' # depends on [control=['if'], data=[]]
elif size == 1:
return '%s%s%s\n' % (o['array_prefix'], str(array[0]), o['array_suffix']) # depends on [control=['if'], data=[]]
o['displaywidth'] = max(4, o['displaywidth'] - len(o['lineprefix']))
if o['arrange_vertical']:
array_index = lambda nrows, row, col: nrows * col + row
# Try every row count from 1 upwards
for nrows in range(1, size + 1):
ncols = (size + nrows - 1) // nrows
colwidths = []
totwidth = -len(o['colsep'])
for col in range(ncols):
# get max column width for this column
colwidth = 0
for row in range(nrows):
i = array_index(nrows, row, col)
if i >= size:
break # depends on [control=['if'], data=[]]
x = array[i]
colwidth = max(colwidth, len(x))
pass # depends on [control=['for'], data=['row']]
colwidths.append(colwidth)
totwidth += colwidth + len(o['colsep'])
if totwidth > o['displaywidth']:
break # depends on [control=['if'], data=[]]
pass # depends on [control=['for'], data=['col']]
if totwidth <= o['displaywidth']:
break # depends on [control=['if'], data=[]]
pass # depends on [control=['for'], data=['nrows']]
# The smallest number of rows computed and the
# max widths for each column has been obtained.
# Now we just have to format each of the
# rows.
s = ''
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows * col
if i >= size:
x = '' # depends on [control=['if'], data=[]]
else:
x = array[i]
texts.append(x) # depends on [control=['for'], data=['col']]
while texts and (not texts[-1]):
del texts[-1] # depends on [control=['while'], data=[]]
for col in range(len(texts)):
if o['ljust']:
texts[col] = texts[col].ljust(colwidths[col]) # depends on [control=['if'], data=[]]
else:
texts[col] = texts[col].rjust(colwidths[col])
pass
pass # depends on [control=['for'], data=['col']]
s += '%s%s%s' % (o['lineprefix'], str(o['colsep'].join(texts)), o['linesuffix'])
pass # depends on [control=['for'], data=['row']]
return s # depends on [control=['if'], data=[]]
else:
array_index = lambda ncols, row, col: ncols * (row - 1) + col
# Try every column count from size downwards
colwidths = []
for ncols in range(size, 0, -1):
# Try every row count from 1 upwards
min_rows = (size + ncols - 1) // ncols
nrows = min_rows - 1
while nrows < size:
nrows += 1
rounded_size = nrows * ncols
colwidths = []
totwidth = -len(o['colsep'])
for col in range(ncols):
# get max column width for this column
colwidth = 0
for row in range(1, nrows + 1):
i = array_index(ncols, row, col)
if i >= rounded_size:
break # depends on [control=['if'], data=[]]
elif i < size:
x = array[i]
colwidth = max(colwidth, len(x))
pass # depends on [control=['if'], data=['i']]
pass # depends on [control=['for'], data=['row']]
colwidths.append(colwidth)
totwidth += colwidth + len(o['colsep'])
if totwidth >= o['displaywidth']:
break # depends on [control=['if'], data=[]]
pass # depends on [control=['for'], data=['col']]
if totwidth <= o['displaywidth'] and i >= rounded_size - 1:
# Found the right nrows and ncols
# print "right nrows and ncols"
nrows = row
break # depends on [control=['if'], data=[]]
elif totwidth >= o['displaywidth']:
# print "reduce ncols", ncols
# Need to reduce ncols
break # depends on [control=['if'], data=[]]
pass # depends on [control=['while'], data=['nrows', 'size']]
if totwidth <= o['displaywidth'] and i >= rounded_size - 1:
break # depends on [control=['if'], data=[]]
pass # depends on [control=['for'], data=['ncols']]
# The smallest number of rows computed and the
# max widths for each column has been obtained.
# Now we just have to format each of the
# rows.
s = ''
if len(o['array_prefix']) != 0:
prefix = o['array_prefix'] # depends on [control=['if'], data=[]]
else:
prefix = o['lineprefix']
pass
for row in range(1, nrows + 1):
texts = []
for col in range(ncols):
i = array_index(ncols, row, col)
if i >= size:
break # depends on [control=['if'], data=[]]
else:
x = array[i]
texts.append(x)
pass # depends on [control=['for'], data=['col']]
for col in range(len(texts)):
if o['ljust']:
texts[col] = texts[col].ljust(colwidths[col]) # depends on [control=['if'], data=[]]
else:
texts[col] = texts[col].rjust(colwidths[col])
pass
pass # depends on [control=['for'], data=['col']]
s += '%s%s%s' % (prefix, str(o['colsep'].join(texts)), o['linesuffix'])
prefix = o['lineprefix']
pass # depends on [control=['for'], data=['row']]
if o['arrange_array']:
colsep = o['colsep'].rstrip()
colsep_pos = -(len(colsep) + 1)
if s[colsep_pos:] == colsep + '\n':
s = s[:colsep_pos] + o['array_suffix'] + '\n'
pass # depends on [control=['if'], data=[]]
pass # depends on [control=['if'], data=[]]
else:
s += o['array_suffix']
pass
return s
pass
|
def cancel_job(self, job_id=None, job_name=None):
"""Cancel a running job.
Args:
job_id (str, optional): Identifier of job to be canceled.
job_name (str, optional): Name of job to be canceled.
Returns:
dict: JSON response for the job cancel operation.
"""
payload = {}
if job_name is not None:
payload['job_name'] = job_name
if job_id is not None:
payload['job_id'] = job_id
jobs_url = self._get_url('jobs_path')
res = self.rest_client.session.delete(jobs_url, params=payload)
_handle_http_errors(res)
return res.json()
|
def function[cancel_job, parameter[self, job_id, job_name]]:
constant[Cancel a running job.
Args:
job_id (str, optional): Identifier of job to be canceled.
job_name (str, optional): Name of job to be canceled.
Returns:
dict: JSON response for the job cancel operation.
]
variable[payload] assign[=] dictionary[[], []]
if compare[name[job_name] is_not constant[None]] begin[:]
call[name[payload]][constant[job_name]] assign[=] name[job_name]
if compare[name[job_id] is_not constant[None]] begin[:]
call[name[payload]][constant[job_id]] assign[=] name[job_id]
variable[jobs_url] assign[=] call[name[self]._get_url, parameter[constant[jobs_path]]]
variable[res] assign[=] call[name[self].rest_client.session.delete, parameter[name[jobs_url]]]
call[name[_handle_http_errors], parameter[name[res]]]
return[call[name[res].json, parameter[]]]
|
keyword[def] identifier[cancel_job] ( identifier[self] , identifier[job_id] = keyword[None] , identifier[job_name] = keyword[None] ):
literal[string]
identifier[payload] ={}
keyword[if] identifier[job_name] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[job_name]
keyword[if] identifier[job_id] keyword[is] keyword[not] keyword[None] :
identifier[payload] [ literal[string] ]= identifier[job_id]
identifier[jobs_url] = identifier[self] . identifier[_get_url] ( literal[string] )
identifier[res] = identifier[self] . identifier[rest_client] . identifier[session] . identifier[delete] ( identifier[jobs_url] , identifier[params] = identifier[payload] )
identifier[_handle_http_errors] ( identifier[res] )
keyword[return] identifier[res] . identifier[json] ()
|
def cancel_job(self, job_id=None, job_name=None):
"""Cancel a running job.
Args:
job_id (str, optional): Identifier of job to be canceled.
job_name (str, optional): Name of job to be canceled.
Returns:
dict: JSON response for the job cancel operation.
"""
payload = {}
if job_name is not None:
payload['job_name'] = job_name # depends on [control=['if'], data=['job_name']]
if job_id is not None:
payload['job_id'] = job_id # depends on [control=['if'], data=['job_id']]
jobs_url = self._get_url('jobs_path')
res = self.rest_client.session.delete(jobs_url, params=payload)
_handle_http_errors(res)
return res.json()
|
def network_profiles(self):
"""Get all the AP profiles."""
profiles = self._wifi_ctrl.network_profiles(self._raw_obj)
if self._logger.isEnabledFor(logging.INFO):
for profile in profiles:
self._logger.info("Get profile:")
self._logger.info("\tssid: %s", profile.ssid)
self._logger.info("\tauth: %s", profile.auth)
self._logger.info("\takm: %s", profile.akm)
self._logger.info("\tcipher: %s", profile.cipher)
return profiles
|
def function[network_profiles, parameter[self]]:
constant[Get all the AP profiles.]
variable[profiles] assign[=] call[name[self]._wifi_ctrl.network_profiles, parameter[name[self]._raw_obj]]
if call[name[self]._logger.isEnabledFor, parameter[name[logging].INFO]] begin[:]
for taget[name[profile]] in starred[name[profiles]] begin[:]
call[name[self]._logger.info, parameter[constant[Get profile:]]]
call[name[self]._logger.info, parameter[constant[ ssid: %s], name[profile].ssid]]
call[name[self]._logger.info, parameter[constant[ auth: %s], name[profile].auth]]
call[name[self]._logger.info, parameter[constant[ akm: %s], name[profile].akm]]
call[name[self]._logger.info, parameter[constant[ cipher: %s], name[profile].cipher]]
return[name[profiles]]
|
keyword[def] identifier[network_profiles] ( identifier[self] ):
literal[string]
identifier[profiles] = identifier[self] . identifier[_wifi_ctrl] . identifier[network_profiles] ( identifier[self] . identifier[_raw_obj] )
keyword[if] identifier[self] . identifier[_logger] . identifier[isEnabledFor] ( identifier[logging] . identifier[INFO] ):
keyword[for] identifier[profile] keyword[in] identifier[profiles] :
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] , identifier[profile] . identifier[ssid] )
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] , identifier[profile] . identifier[auth] )
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] , identifier[profile] . identifier[akm] )
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] , identifier[profile] . identifier[cipher] )
keyword[return] identifier[profiles]
|
def network_profiles(self):
"""Get all the AP profiles."""
profiles = self._wifi_ctrl.network_profiles(self._raw_obj)
if self._logger.isEnabledFor(logging.INFO):
for profile in profiles:
self._logger.info('Get profile:')
self._logger.info('\tssid: %s', profile.ssid)
self._logger.info('\tauth: %s', profile.auth)
self._logger.info('\takm: %s', profile.akm)
self._logger.info('\tcipher: %s', profile.cipher) # depends on [control=['for'], data=['profile']] # depends on [control=['if'], data=[]]
return profiles
|
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and \
not isinstance(s, numbers.Number):
raise ValueError("parse_string_unsafe only accepts string/number inputs but a %s was given" %
type(s))
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s)
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s))
except ValueError:
# Nope, this is not a plain number
pass
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# Split the string into the value and the unit
val, unit = s[:index], s[index:]
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k')
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit)
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit]
else:
# This is not a unit we recognize
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
try:
unit_class
except UnboundLocalError:
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
return unit_class(float(val))
|
def function[parse_string_unsafe, parameter[s, system]]:
constant[Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
]
if <ast.BoolOp object at 0x7da207f02ce0> begin[:]
<ast.Raise object at 0x7da207f02650>
if call[name[isinstance], parameter[name[s], name[numbers].Number]] begin[:]
return[call[name[Byte], parameter[name[s]]]]
if call[name[isinstance], parameter[name[s], tuple[[<ast.Name object at 0x7da207f01120>, <ast.Name object at 0x7da207f03340>]]]] begin[:]
<ast.Try object at 0x7da207f01f60>
<ast.Try object at 0x7da207f01060>
<ast.Tuple object at 0x7da18f00c850> assign[=] tuple[[<ast.Subscript object at 0x7da18f00c130>, <ast.Subscript object at 0x7da18f00f0a0>]]
variable[unit] assign[=] call[name[unit].rstrip, parameter[constant[Bb]]]
<ast.AugAssign object at 0x7da18f00e410>
if compare[call[name[len], parameter[name[unit]]] equal[==] constant[2]] begin[:]
if compare[name[system] equal[==] name[NIST]] begin[:]
variable[unit] assign[=] call[name[capitalize_first], parameter[name[unit]]]
variable[_unit] assign[=] call[name[list], parameter[name[unit]]]
call[name[_unit].insert, parameter[constant[1], constant[i]]]
variable[unit] assign[=] call[constant[].join, parameter[name[_unit]]]
variable[unit_class] assign[=] call[call[name[globals], parameter[]]][name[unit]]
<ast.Try object at 0x7da1b26ae350>
return[call[name[unit_class], parameter[call[name[float], parameter[name[val]]]]]]
|
keyword[def] identifier[parse_string_unsafe] ( identifier[s] , identifier[system] = identifier[SI] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[s] ,( identifier[str] , identifier[unicode] )) keyword[and] keyword[not] identifier[isinstance] ( identifier[s] , identifier[numbers] . identifier[Number] ):
keyword[raise] identifier[ValueError] ( literal[string] %
identifier[type] ( identifier[s] ))
keyword[if] identifier[isinstance] ( identifier[s] , identifier[numbers] . identifier[Number] ):
keyword[return] identifier[Byte] ( identifier[s] )
keyword[if] identifier[isinstance] ( identifier[s] ,( identifier[str] , identifier[unicode] )):
keyword[try] :
keyword[return] identifier[Byte] ( identifier[float] ( identifier[s] ))
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[try] :
identifier[index] = identifier[list] ([ identifier[i] . identifier[isalpha] () keyword[for] identifier[i] keyword[in] identifier[s] ]). identifier[index] ( keyword[True] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[s] )
identifier[val] , identifier[unit] = identifier[s] [: identifier[index] ], identifier[s] [ identifier[index] :]
identifier[unit] = identifier[unit] . identifier[rstrip] ( literal[string] )
identifier[unit] += literal[string]
keyword[if] identifier[len] ( identifier[unit] )== literal[int] :
keyword[if] identifier[system] == identifier[NIST] :
identifier[unit] = identifier[capitalize_first] ( identifier[unit] )
identifier[_unit] = identifier[list] ( identifier[unit] )
identifier[_unit] . identifier[insert] ( literal[int] , literal[string] )
identifier[unit] = literal[string] . identifier[join] ( identifier[_unit] )
identifier[unit_class] = identifier[globals] ()[ identifier[unit] ]
keyword[else] :
keyword[if] identifier[unit] . identifier[startswith] ( literal[string] ):
identifier[unit] = identifier[unit] . identifier[replace] ( literal[string] , literal[string] )
keyword[elif] keyword[not] identifier[unit] . identifier[startswith] ( literal[string] ):
identifier[unit] = identifier[capitalize_first] ( identifier[unit] )
keyword[if] identifier[unit] [ literal[int] ] keyword[in] identifier[SI_PREFIXES] :
identifier[unit_class] = identifier[globals] ()[ identifier[unit] ]
keyword[elif] identifier[len] ( identifier[unit] )== literal[int] :
identifier[unit] = identifier[capitalize_first] ( identifier[unit] )
keyword[if] identifier[unit] [: literal[int] ] keyword[in] identifier[NIST_PREFIXES] :
identifier[unit_class] = identifier[globals] ()[ identifier[unit] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[unit] )
keyword[try] :
identifier[unit_class]
keyword[except] identifier[UnboundLocalError] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[unit] )
keyword[return] identifier[unit_class] ( identifier[float] ( identifier[val] ))
|
def parse_string_unsafe(s, system=SI):
"""Attempt to parse a string with ambiguous units and try to make a
bitmath object out of it.
This may produce inaccurate results if parsing shell output. For
example `ls` may say a 2730 Byte file is '2.7K'. 2730 Bytes == 2.73 kB
~= 2.666 KiB. See the documentation for all of the important details.
Note the following caveats:
* All inputs are assumed to be byte-based (as opposed to bit based)
* Numerical inputs (those without any units) are assumed to be a
number of bytes
* Inputs with single letter units (k, M, G, etc) are assumed to be SI
units (base-10). Set the `system` parameter to `bitmath.NIST` to
change this behavior.
* Inputs with an `i` character following the leading letter (Ki, Mi,
Gi) are assumed to be NIST units (base 2)
* Capitalization does not matter
"""
if not isinstance(s, (str, unicode)) and (not isinstance(s, numbers.Number)):
raise ValueError('parse_string_unsafe only accepts string/number inputs but a %s was given' % type(s)) # depends on [control=['if'], data=[]]
######################################################################
# Is the input simple to parse? Just a number, or a number
# masquerading as a string perhaps?
# Test case: raw number input (easy!)
if isinstance(s, numbers.Number):
# It's just a number. Assume bytes
return Byte(s) # depends on [control=['if'], data=[]]
# Test case: a number pretending to be a string
if isinstance(s, (str, unicode)):
try:
# Can we turn it directly into a number?
return Byte(float(s)) # depends on [control=['try'], data=[]]
except ValueError:
# Nope, this is not a plain number
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
######################################################################
# At this point:
# - the input is also not just a number wrapped in a string
# - nor is is just a plain number type
#
# We need to do some more digging around now to figure out exactly
# what we were given and possibly normalize the input into a
# format we can recognize.
# First we'll separate the number and the unit.
#
# Get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True) # depends on [control=['try'], data=[]]
except ValueError: # pragma: no cover
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s) # depends on [control=['except'], data=[]]
# Split the string into the value and the unit
(val, unit) = (s[:index], s[index:])
# Don't trust anything. We'll make sure the correct 'b' is in place.
unit = unit.rstrip('Bb')
unit += 'B'
# At this point we can expect `unit` to be either:
#
# - 2 Characters (for SI, ex: kB or GB)
# - 3 Caracters (so NIST, ex: KiB, or GiB)
#
# A unit with any other number of chars is not a valid unit
# SI
if len(unit) == 2:
# Has NIST parsing been requested?
if system == NIST:
# NIST units requested. Ensure the unit begins with a
# capital letter and is followed by an 'i' character.
unit = capitalize_first(unit)
# Insert an 'i' char after the first letter
_unit = list(unit)
_unit.insert(1, 'i')
# Collapse the list back into a 3 letter string
unit = ''.join(_unit)
unit_class = globals()[unit] # depends on [control=['if'], data=[]]
else:
# Default parsing (SI format)
#
# Edge-case checking: SI 'thousand' is a lower-case K
if unit.startswith('K'):
unit = unit.replace('K', 'k') # depends on [control=['if'], data=[]]
elif not unit.startswith('k'):
# Otherwise, ensure the first char is capitalized
unit = capitalize_first(unit) # depends on [control=['if'], data=[]]
# This is an SI-type unit
if unit[0] in SI_PREFIXES:
unit_class = globals()[unit] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# NIST
elif len(unit) == 3:
unit = capitalize_first(unit)
# This is a NIST-type unit
if unit[:2] in NIST_PREFIXES:
unit_class = globals()[unit] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# This is not a unit we recognize
raise ValueError('The unit %s is not a valid bitmath unit' % unit)
try:
unit_class # depends on [control=['try'], data=[]]
except UnboundLocalError:
raise ValueError('The unit %s is not a valid bitmath unit' % unit) # depends on [control=['except'], data=[]]
return unit_class(float(val))
|
def table_schema(self):
"""
Returns the table schema.
:returns: dict
"""
if self.__dict__.get('_table_schema') is None:
self._table_schema = None
table_schema = {}
for row in self.query_schema():
name, default, dtype = self.db().lexicon.column_info(row)
if isinstance(default, str):
json_matches = re.findall(r"^\'(.*)\'::jsonb$", default)
if len(json_matches) > 0:
default = json.loads(json_matches[0])
if name == self.primary_key:
default = None
table_schema[name] = {'default': default, 'type': dtype}
if len(table_schema):
self._table_schema = table_schema
return self._table_schema
|
def function[table_schema, parameter[self]]:
constant[
Returns the table schema.
:returns: dict
]
if compare[call[name[self].__dict__.get, parameter[constant[_table_schema]]] is constant[None]] begin[:]
name[self]._table_schema assign[=] constant[None]
variable[table_schema] assign[=] dictionary[[], []]
for taget[name[row]] in starred[call[name[self].query_schema, parameter[]]] begin[:]
<ast.Tuple object at 0x7da2045670d0> assign[=] call[call[name[self].db, parameter[]].lexicon.column_info, parameter[name[row]]]
if call[name[isinstance], parameter[name[default], name[str]]] begin[:]
variable[json_matches] assign[=] call[name[re].findall, parameter[constant[^\'(.*)\'::jsonb$], name[default]]]
if compare[call[name[len], parameter[name[json_matches]]] greater[>] constant[0]] begin[:]
variable[default] assign[=] call[name[json].loads, parameter[call[name[json_matches]][constant[0]]]]
if compare[name[name] equal[==] name[self].primary_key] begin[:]
variable[default] assign[=] constant[None]
call[name[table_schema]][name[name]] assign[=] dictionary[[<ast.Constant object at 0x7da204567fa0>, <ast.Constant object at 0x7da2045666b0>], [<ast.Name object at 0x7da2045673a0>, <ast.Name object at 0x7da204564c70>]]
if call[name[len], parameter[name[table_schema]]] begin[:]
name[self]._table_schema assign[=] name[table_schema]
return[name[self]._table_schema]
|
keyword[def] identifier[table_schema] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__dict__] . identifier[get] ( literal[string] ) keyword[is] keyword[None] :
identifier[self] . identifier[_table_schema] = keyword[None]
identifier[table_schema] ={}
keyword[for] identifier[row] keyword[in] identifier[self] . identifier[query_schema] ():
identifier[name] , identifier[default] , identifier[dtype] = identifier[self] . identifier[db] (). identifier[lexicon] . identifier[column_info] ( identifier[row] )
keyword[if] identifier[isinstance] ( identifier[default] , identifier[str] ):
identifier[json_matches] = identifier[re] . identifier[findall] ( literal[string] , identifier[default] )
keyword[if] identifier[len] ( identifier[json_matches] )> literal[int] :
identifier[default] = identifier[json] . identifier[loads] ( identifier[json_matches] [ literal[int] ])
keyword[if] identifier[name] == identifier[self] . identifier[primary_key] :
identifier[default] = keyword[None]
identifier[table_schema] [ identifier[name] ]={ literal[string] : identifier[default] , literal[string] : identifier[dtype] }
keyword[if] identifier[len] ( identifier[table_schema] ):
identifier[self] . identifier[_table_schema] = identifier[table_schema]
keyword[return] identifier[self] . identifier[_table_schema]
|
def table_schema(self):
"""
Returns the table schema.
:returns: dict
"""
if self.__dict__.get('_table_schema') is None:
self._table_schema = None
table_schema = {}
for row in self.query_schema():
(name, default, dtype) = self.db().lexicon.column_info(row)
if isinstance(default, str):
json_matches = re.findall("^\\'(.*)\\'::jsonb$", default)
if len(json_matches) > 0:
default = json.loads(json_matches[0]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if name == self.primary_key:
default = None # depends on [control=['if'], data=[]]
table_schema[name] = {'default': default, 'type': dtype} # depends on [control=['for'], data=['row']]
if len(table_schema):
self._table_schema = table_schema # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self._table_schema
|
def format_seq(self, outstream=None, linewidth=70):
"""
Print a sequence in a readable format.
:param outstream: if `None`, formatted sequence is returned as a
string; otherwise, it is treated as a file-like
object and the formatted sequence is printed to the
outstream
:param linewidth: width for wrapping sequences over multiple lines; set
to 0 for no wrapping
"""
if linewidth == 0 or len(self.seq) <= linewidth:
if outstream is None:
return self.seq
else:
print(self.seq, file=outstream)
return
i = 0
seq = ''
while i < len(self.seq):
if outstream is None:
seq += self.seq[i:i+linewidth] + '\n'
else:
print(self.seq[i:i+linewidth], file=outstream)
i += linewidth
if outstream is None:
return seq
|
def function[format_seq, parameter[self, outstream, linewidth]]:
constant[
Print a sequence in a readable format.
:param outstream: if `None`, formatted sequence is returned as a
string; otherwise, it is treated as a file-like
object and the formatted sequence is printed to the
outstream
:param linewidth: width for wrapping sequences over multiple lines; set
to 0 for no wrapping
]
if <ast.BoolOp object at 0x7da20c992560> begin[:]
if compare[name[outstream] is constant[None]] begin[:]
return[name[self].seq]
variable[i] assign[=] constant[0]
variable[seq] assign[=] constant[]
while compare[name[i] less[<] call[name[len], parameter[name[self].seq]]] begin[:]
if compare[name[outstream] is constant[None]] begin[:]
<ast.AugAssign object at 0x7da20c991390>
<ast.AugAssign object at 0x7da20c993a60>
if compare[name[outstream] is constant[None]] begin[:]
return[name[seq]]
|
keyword[def] identifier[format_seq] ( identifier[self] , identifier[outstream] = keyword[None] , identifier[linewidth] = literal[int] ):
literal[string]
keyword[if] identifier[linewidth] == literal[int] keyword[or] identifier[len] ( identifier[self] . identifier[seq] )<= identifier[linewidth] :
keyword[if] identifier[outstream] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[seq]
keyword[else] :
identifier[print] ( identifier[self] . identifier[seq] , identifier[file] = identifier[outstream] )
keyword[return]
identifier[i] = literal[int]
identifier[seq] = literal[string]
keyword[while] identifier[i] < identifier[len] ( identifier[self] . identifier[seq] ):
keyword[if] identifier[outstream] keyword[is] keyword[None] :
identifier[seq] += identifier[self] . identifier[seq] [ identifier[i] : identifier[i] + identifier[linewidth] ]+ literal[string]
keyword[else] :
identifier[print] ( identifier[self] . identifier[seq] [ identifier[i] : identifier[i] + identifier[linewidth] ], identifier[file] = identifier[outstream] )
identifier[i] += identifier[linewidth]
keyword[if] identifier[outstream] keyword[is] keyword[None] :
keyword[return] identifier[seq]
|
def format_seq(self, outstream=None, linewidth=70):
"""
Print a sequence in a readable format.
:param outstream: if `None`, formatted sequence is returned as a
string; otherwise, it is treated as a file-like
object and the formatted sequence is printed to the
outstream
:param linewidth: width for wrapping sequences over multiple lines; set
to 0 for no wrapping
"""
if linewidth == 0 or len(self.seq) <= linewidth:
if outstream is None:
return self.seq # depends on [control=['if'], data=[]]
else:
print(self.seq, file=outstream)
return # depends on [control=['if'], data=[]]
i = 0
seq = ''
while i < len(self.seq):
if outstream is None:
seq += self.seq[i:i + linewidth] + '\n' # depends on [control=['if'], data=[]]
else:
print(self.seq[i:i + linewidth], file=outstream)
i += linewidth # depends on [control=['while'], data=['i']]
if outstream is None:
return seq # depends on [control=['if'], data=[]]
|
def update_state(url, state_obj):
"""Update the state of a given model run. The state object is a Json
representation of the state as created by the SCO-Server.
Throws a ValueError if the resource is unknown or the update state
request failed.
Parameters
----------
url : string
Url to POST model run create model run request
state_obj : Json object
State object serialization as expected by the API.
"""
# POST update run state request
try:
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(state_obj))
except urllib2.URLError as ex:
raise ValueError(str(ex))
# Throw exception if resource was unknown or update request failed
if response.code == 400:
raise ValueError(response.message)
elif response.code == 404:
raise ValueError('unknown model run')
|
def function[update_state, parameter[url, state_obj]]:
constant[Update the state of a given model run. The state object is a Json
representation of the state as created by the SCO-Server.
Throws a ValueError if the resource is unknown or the update state
request failed.
Parameters
----------
url : string
Url to POST model run create model run request
state_obj : Json object
State object serialization as expected by the API.
]
<ast.Try object at 0x7da2043465f0>
if compare[name[response].code equal[==] constant[400]] begin[:]
<ast.Raise object at 0x7da18f09f460>
|
keyword[def] identifier[update_state] ( identifier[url] , identifier[state_obj] ):
literal[string]
keyword[try] :
identifier[req] = identifier[urllib2] . identifier[Request] ( identifier[url] )
identifier[req] . identifier[add_header] ( literal[string] , literal[string] )
identifier[response] = identifier[urllib2] . identifier[urlopen] ( identifier[req] , identifier[json] . identifier[dumps] ( identifier[state_obj] ))
keyword[except] identifier[urllib2] . identifier[URLError] keyword[as] identifier[ex] :
keyword[raise] identifier[ValueError] ( identifier[str] ( identifier[ex] ))
keyword[if] identifier[response] . identifier[code] == literal[int] :
keyword[raise] identifier[ValueError] ( identifier[response] . identifier[message] )
keyword[elif] identifier[response] . identifier[code] == literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
|
def update_state(url, state_obj):
"""Update the state of a given model run. The state object is a Json
representation of the state as created by the SCO-Server.
Throws a ValueError if the resource is unknown or the update state
request failed.
Parameters
----------
url : string
Url to POST model run create model run request
state_obj : Json object
State object serialization as expected by the API.
"""
# POST update run state request
try:
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(state_obj)) # depends on [control=['try'], data=[]]
except urllib2.URLError as ex:
raise ValueError(str(ex)) # depends on [control=['except'], data=['ex']]
# Throw exception if resource was unknown or update request failed
if response.code == 400:
raise ValueError(response.message) # depends on [control=['if'], data=[]]
elif response.code == 404:
raise ValueError('unknown model run') # depends on [control=['if'], data=[]]
|
def sequenceToWord(sequence):
"""
converts a sequence (one-hot) in a reber string
"""
reberString = ''
for i in xrange(len(sequence)):
index = np.where(sequence[i]==1.)[0][0]
reberString += chars[index]
return reberString
|
def function[sequenceToWord, parameter[sequence]]:
constant[
converts a sequence (one-hot) in a reber string
]
variable[reberString] assign[=] constant[]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[sequence]]]]]] begin[:]
variable[index] assign[=] call[call[call[name[np].where, parameter[compare[call[name[sequence]][name[i]] equal[==] constant[1.0]]]]][constant[0]]][constant[0]]
<ast.AugAssign object at 0x7da1b085ef80>
return[name[reberString]]
|
keyword[def] identifier[sequenceToWord] ( identifier[sequence] ):
literal[string]
identifier[reberString] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[sequence] )):
identifier[index] = identifier[np] . identifier[where] ( identifier[sequence] [ identifier[i] ]== literal[int] )[ literal[int] ][ literal[int] ]
identifier[reberString] += identifier[chars] [ identifier[index] ]
keyword[return] identifier[reberString]
|
def sequenceToWord(sequence):
"""
converts a sequence (one-hot) in a reber string
"""
reberString = ''
for i in xrange(len(sequence)):
index = np.where(sequence[i] == 1.0)[0][0]
reberString += chars[index] # depends on [control=['for'], data=['i']]
return reberString
|
def list_datacenters(kwargs=None, call=None):
'''
List all the data centers for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_datacenters my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_datacenters function must be called with '
'-f or --function.'
)
return {'Datacenters': salt.utils.vmware.list_datacenters(_get_si())}
|
def function[list_datacenters, parameter[kwargs, call]]:
constant[
List all the data centers for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_datacenters my-vmware-config
]
if compare[name[call] not_equal[!=] constant[function]] begin[:]
<ast.Raise object at 0x7da18f811240>
return[dictionary[[<ast.Constant object at 0x7da18f811780>], [<ast.Call object at 0x7da18f8135e0>]]]
|
keyword[def] identifier[list_datacenters] ( identifier[kwargs] = keyword[None] , identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] != literal[string] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
literal[string]
)
keyword[return] { literal[string] : identifier[salt] . identifier[utils] . identifier[vmware] . identifier[list_datacenters] ( identifier[_get_si] ())}
|
def list_datacenters(kwargs=None, call=None):
"""
List all the data centers for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_datacenters my-vmware-config
"""
if call != 'function':
raise SaltCloudSystemExit('The list_datacenters function must be called with -f or --function.') # depends on [control=['if'], data=[]]
return {'Datacenters': salt.utils.vmware.list_datacenters(_get_si())}
|
def oauth_required(f):
"""
decorator to add to a view to require an oauth user
:return: decorated function
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if 'oauth_user_uri' not in session or session['oauth_user_uri'] is None:
return redirect(url_for('.r_oauth_login', next=request.url))
return f(*args,**kwargs)
return decorated_function
|
def function[oauth_required, parameter[f]]:
constant[
decorator to add to a view to require an oauth user
:return: decorated function
]
def function[decorated_function, parameter[]]:
if <ast.BoolOp object at 0x7da18fe918a0> begin[:]
return[call[name[redirect], parameter[call[name[url_for], parameter[constant[.r_oauth_login]]]]]]
return[call[name[f], parameter[<ast.Starred object at 0x7da18fe92b60>]]]
return[name[decorated_function]]
|
keyword[def] identifier[oauth_required] ( identifier[f] ):
literal[string]
@ identifier[wraps] ( identifier[f] )
keyword[def] identifier[decorated_function] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[session] keyword[or] identifier[session] [ literal[string] ] keyword[is] keyword[None] :
keyword[return] identifier[redirect] ( identifier[url_for] ( literal[string] , identifier[next] = identifier[request] . identifier[url] ))
keyword[return] identifier[f] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[decorated_function]
|
def oauth_required(f):
"""
decorator to add to a view to require an oauth user
:return: decorated function
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if 'oauth_user_uri' not in session or session['oauth_user_uri'] is None:
return redirect(url_for('.r_oauth_login', next=request.url)) # depends on [control=['if'], data=[]]
return f(*args, **kwargs)
return decorated_function
|
def load( filename ):
"""
Loads the profile from the inputed filename.
:param filename | <str>
"""
try:
f = open(filename, 'r')
except IOError:
logger.exception('Could not load the file: %s' % filename)
return False
strdata = f.read()
f.close()
return XViewProfile.fromString(strdata)
|
def function[load, parameter[filename]]:
constant[
Loads the profile from the inputed filename.
:param filename | <str>
]
<ast.Try object at 0x7da18bcc8100>
variable[strdata] assign[=] call[name[f].read, parameter[]]
call[name[f].close, parameter[]]
return[call[name[XViewProfile].fromString, parameter[name[strdata]]]]
|
keyword[def] identifier[load] ( identifier[filename] ):
literal[string]
keyword[try] :
identifier[f] = identifier[open] ( identifier[filename] , literal[string] )
keyword[except] identifier[IOError] :
identifier[logger] . identifier[exception] ( literal[string] % identifier[filename] )
keyword[return] keyword[False]
identifier[strdata] = identifier[f] . identifier[read] ()
identifier[f] . identifier[close] ()
keyword[return] identifier[XViewProfile] . identifier[fromString] ( identifier[strdata] )
|
def load(filename):
"""
Loads the profile from the inputed filename.
:param filename | <str>
"""
try:
f = open(filename, 'r') # depends on [control=['try'], data=[]]
except IOError:
logger.exception('Could not load the file: %s' % filename)
return False # depends on [control=['except'], data=[]]
strdata = f.read()
f.close()
return XViewProfile.fromString(strdata)
|
def notify(self, level, value, target=None, ntype=None, rule=None):
"""Notify main reactor about event."""
# Did we see the event before?
if target in self.state and level == self.state[target]:
return False
# Do we see the event first time?
if target not in self.state and level == 'normal' \
and not self.reactor.options['send_initial']:
return False
self.state[target] = level
return self.reactor.notify(level, self, value, target=target, ntype=ntype, rule=rule)
|
def function[notify, parameter[self, level, value, target, ntype, rule]]:
constant[Notify main reactor about event.]
if <ast.BoolOp object at 0x7da1b0e14190> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b0e161a0> begin[:]
return[constant[False]]
call[name[self].state][name[target]] assign[=] name[level]
return[call[name[self].reactor.notify, parameter[name[level], name[self], name[value]]]]
|
keyword[def] identifier[notify] ( identifier[self] , identifier[level] , identifier[value] , identifier[target] = keyword[None] , identifier[ntype] = keyword[None] , identifier[rule] = keyword[None] ):
literal[string]
keyword[if] identifier[target] keyword[in] identifier[self] . identifier[state] keyword[and] identifier[level] == identifier[self] . identifier[state] [ identifier[target] ]:
keyword[return] keyword[False]
keyword[if] identifier[target] keyword[not] keyword[in] identifier[self] . identifier[state] keyword[and] identifier[level] == literal[string] keyword[and] keyword[not] identifier[self] . identifier[reactor] . identifier[options] [ literal[string] ]:
keyword[return] keyword[False]
identifier[self] . identifier[state] [ identifier[target] ]= identifier[level]
keyword[return] identifier[self] . identifier[reactor] . identifier[notify] ( identifier[level] , identifier[self] , identifier[value] , identifier[target] = identifier[target] , identifier[ntype] = identifier[ntype] , identifier[rule] = identifier[rule] )
|
def notify(self, level, value, target=None, ntype=None, rule=None):
"""Notify main reactor about event."""
# Did we see the event before?
if target in self.state and level == self.state[target]:
return False # depends on [control=['if'], data=[]]
# Do we see the event first time?
if target not in self.state and level == 'normal' and (not self.reactor.options['send_initial']):
return False # depends on [control=['if'], data=[]]
self.state[target] = level
return self.reactor.notify(level, self, value, target=target, ntype=ntype, rule=rule)
|
def add_object_file(self, obj_file):
"""
Add object file to the jit. object_file can be instance of
:class:ObjectFile or a string representing file system path
"""
if isinstance(obj_file, str):
obj_file = object_file.ObjectFileRef.from_path(obj_file)
ffi.lib.LLVMPY_MCJITAddObjectFile(self, obj_file)
|
def function[add_object_file, parameter[self, obj_file]]:
constant[
Add object file to the jit. object_file can be instance of
:class:ObjectFile or a string representing file system path
]
if call[name[isinstance], parameter[name[obj_file], name[str]]] begin[:]
variable[obj_file] assign[=] call[name[object_file].ObjectFileRef.from_path, parameter[name[obj_file]]]
call[name[ffi].lib.LLVMPY_MCJITAddObjectFile, parameter[name[self], name[obj_file]]]
|
keyword[def] identifier[add_object_file] ( identifier[self] , identifier[obj_file] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj_file] , identifier[str] ):
identifier[obj_file] = identifier[object_file] . identifier[ObjectFileRef] . identifier[from_path] ( identifier[obj_file] )
identifier[ffi] . identifier[lib] . identifier[LLVMPY_MCJITAddObjectFile] ( identifier[self] , identifier[obj_file] )
|
def add_object_file(self, obj_file):
"""
Add object file to the jit. object_file can be instance of
:class:ObjectFile or a string representing file system path
"""
if isinstance(obj_file, str):
obj_file = object_file.ObjectFileRef.from_path(obj_file) # depends on [control=['if'], data=[]]
ffi.lib.LLVMPY_MCJITAddObjectFile(self, obj_file)
|
def search_item_by_name_and_folder_name(self, name, folder_name,
token=None):
"""
Return all items with a given name and parent folder name.
:param name: The name of the item to search by.
:type name: string
:param folder_name: The name of the parent folder to search by.
:type folder_name: string
:param token: (optional) A valid token for the user in question.
:type token: None | string
:returns: A list of all items with the given name and parent folder
name.
:rtype: list[dict]
"""
parameters = dict()
parameters['name'] = name
parameters['folderName'] = folder_name
if token:
parameters['token'] = token
response = self.request('midas.item.searchbynameandfoldername',
parameters)
return response['items']
|
def function[search_item_by_name_and_folder_name, parameter[self, name, folder_name, token]]:
constant[
Return all items with a given name and parent folder name.
:param name: The name of the item to search by.
:type name: string
:param folder_name: The name of the parent folder to search by.
:type folder_name: string
:param token: (optional) A valid token for the user in question.
:type token: None | string
:returns: A list of all items with the given name and parent folder
name.
:rtype: list[dict]
]
variable[parameters] assign[=] call[name[dict], parameter[]]
call[name[parameters]][constant[name]] assign[=] name[name]
call[name[parameters]][constant[folderName]] assign[=] name[folder_name]
if name[token] begin[:]
call[name[parameters]][constant[token]] assign[=] name[token]
variable[response] assign[=] call[name[self].request, parameter[constant[midas.item.searchbynameandfoldername], name[parameters]]]
return[call[name[response]][constant[items]]]
|
keyword[def] identifier[search_item_by_name_and_folder_name] ( identifier[self] , identifier[name] , identifier[folder_name] ,
identifier[token] = keyword[None] ):
literal[string]
identifier[parameters] = identifier[dict] ()
identifier[parameters] [ literal[string] ]= identifier[name]
identifier[parameters] [ literal[string] ]= identifier[folder_name]
keyword[if] identifier[token] :
identifier[parameters] [ literal[string] ]= identifier[token]
identifier[response] = identifier[self] . identifier[request] ( literal[string] ,
identifier[parameters] )
keyword[return] identifier[response] [ literal[string] ]
|
def search_item_by_name_and_folder_name(self, name, folder_name, token=None):
"""
Return all items with a given name and parent folder name.
:param name: The name of the item to search by.
:type name: string
:param folder_name: The name of the parent folder to search by.
:type folder_name: string
:param token: (optional) A valid token for the user in question.
:type token: None | string
:returns: A list of all items with the given name and parent folder
name.
:rtype: list[dict]
"""
parameters = dict()
parameters['name'] = name
parameters['folderName'] = folder_name
if token:
parameters['token'] = token # depends on [control=['if'], data=[]]
response = self.request('midas.item.searchbynameandfoldername', parameters)
return response['items']
|
def otsu_segmentation(image, k, mask=None):
"""
Otsu image segmentation
This is a very fast segmentation algorithm good for quick explortation,
but does not return probability maps.
ANTsR function: `thresholdImage(image, 'Otsu', k)`
Arguments
---------
image : ANTsImage
input image
k : integer
integer number of classes. Note that a background class will
be added to this, so the resulting segmentation will
have k+1 unique values.
mask : ANTsImage
segment inside this mask
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> seg = mni.otsu_segmentation(k=3) #0=bg,1=csf,2=gm,3=wm
"""
if mask is not None:
image = image.mask_image(mask)
seg = image.threshold_image('Otsu', k)
return seg
|
def function[otsu_segmentation, parameter[image, k, mask]]:
constant[
Otsu image segmentation
This is a very fast segmentation algorithm good for quick explortation,
but does not return probability maps.
ANTsR function: `thresholdImage(image, 'Otsu', k)`
Arguments
---------
image : ANTsImage
input image
k : integer
integer number of classes. Note that a background class will
be added to this, so the resulting segmentation will
have k+1 unique values.
mask : ANTsImage
segment inside this mask
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> seg = mni.otsu_segmentation(k=3) #0=bg,1=csf,2=gm,3=wm
]
if compare[name[mask] is_not constant[None]] begin[:]
variable[image] assign[=] call[name[image].mask_image, parameter[name[mask]]]
variable[seg] assign[=] call[name[image].threshold_image, parameter[constant[Otsu], name[k]]]
return[name[seg]]
|
keyword[def] identifier[otsu_segmentation] ( identifier[image] , identifier[k] , identifier[mask] = keyword[None] ):
literal[string]
keyword[if] identifier[mask] keyword[is] keyword[not] keyword[None] :
identifier[image] = identifier[image] . identifier[mask_image] ( identifier[mask] )
identifier[seg] = identifier[image] . identifier[threshold_image] ( literal[string] , identifier[k] )
keyword[return] identifier[seg]
|
def otsu_segmentation(image, k, mask=None):
"""
Otsu image segmentation
This is a very fast segmentation algorithm good for quick explortation,
but does not return probability maps.
ANTsR function: `thresholdImage(image, 'Otsu', k)`
Arguments
---------
image : ANTsImage
input image
k : integer
integer number of classes. Note that a background class will
be added to this, so the resulting segmentation will
have k+1 unique values.
mask : ANTsImage
segment inside this mask
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mni = ants.image_read(ants.get_data('mni'))
>>> seg = mni.otsu_segmentation(k=3) #0=bg,1=csf,2=gm,3=wm
"""
if mask is not None:
image = image.mask_image(mask) # depends on [control=['if'], data=['mask']]
seg = image.threshold_image('Otsu', k)
return seg
|
def rax(a, boot, threads, \
fast = False, run_rax = False, run_iq = False, model = False, cluster = False, node = False):
"""
run raxml on 'a' (alignment) with 'boot' (bootstraps) and 'threads' (threads)
store all files in raxml_a_b
1. give every sequence a short identifier
2. convert fasta to phylip
3. run raxml
4. convert ids in raxml tree to original names
"""
a = os.path.abspath(a)
a_base = a.rsplit('/', 1)[1]
out_dir = '%s/%s_rax_boots_%s' % \
(a.rsplit('/', 1)[0], a_base.rsplit('.', 1)[0], boot)
os.system('mkdir -p %s' % (out_dir))
os.system('ln -sf %s %s/%s' % (os.path.abspath(a), out_dir, a.rsplit('/', 1)[1]))
os.chdir(out_dir)
a_id, a_id_lookup = get_ids(a_base)
a_id_phylip = convert2phylip(a_id)
rax_out = '%s.raxml.txt' % (a_id_phylip)
if fast is True:
final_fast = '%s.fasttree.tree' % (a_id_lookup.rsplit('.', 2)[0])
fast_tree = run_fast(a_id, threads, cluster, node)
good_fast = fix_tree(fast_tree, a_id_lookup, final_fast)
yield '%s/%s' % (out_dir, final_fast)
# run IQ-Tree or RAxML
if run_iq is True:
final_iq = '%s.iq.tree' % (a_id_lookup.rsplit('.', 2)[0])
iq_out = '%s.iq.out' % (a_id_phylip)
iq_tree = run_iqtree(a_id_phylip, model, threads, cluster, node)
good_tree = fix_tree(iq_tree, a_id_lookup, final_iq)
yield '%s/%s' % (out_dir, final_iq)
elif run_rax is True:
final_rax = '%s.raxml.tree' % (a_id_lookup.rsplit('.', 2)[0])
rax_tree = run_raxml(rax_out, boot, a_id_phylip, threads, a_id, model, cluster, node)
good_tree = fix_tree(rax_tree, a_id_lookup, final_rax)
yield '%s/%s' % (out_dir, final_rax)
|
def function[rax, parameter[a, boot, threads, fast, run_rax, run_iq, model, cluster, node]]:
constant[
run raxml on 'a' (alignment) with 'boot' (bootstraps) and 'threads' (threads)
store all files in raxml_a_b
1. give every sequence a short identifier
2. convert fasta to phylip
3. run raxml
4. convert ids in raxml tree to original names
]
variable[a] assign[=] call[name[os].path.abspath, parameter[name[a]]]
variable[a_base] assign[=] call[call[name[a].rsplit, parameter[constant[/], constant[1]]]][constant[1]]
variable[out_dir] assign[=] binary_operation[constant[%s/%s_rax_boots_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20c76e350>, <ast.Subscript object at 0x7da20c76e3b0>, <ast.Name object at 0x7da1b2440f10>]]]
call[name[os].system, parameter[binary_operation[constant[mkdir -p %s] <ast.Mod object at 0x7da2590d6920> name[out_dir]]]]
call[name[os].system, parameter[binary_operation[constant[ln -sf %s %s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da2101f4f70>, <ast.Name object at 0x7da20ed9bd30>, <ast.Subscript object at 0x7da20ed9a8f0>]]]]]
call[name[os].chdir, parameter[name[out_dir]]]
<ast.Tuple object at 0x7da18f722770> assign[=] call[name[get_ids], parameter[name[a_base]]]
variable[a_id_phylip] assign[=] call[name[convert2phylip], parameter[name[a_id]]]
variable[rax_out] assign[=] binary_operation[constant[%s.raxml.txt] <ast.Mod object at 0x7da2590d6920> name[a_id_phylip]]
if compare[name[fast] is constant[True]] begin[:]
variable[final_fast] assign[=] binary_operation[constant[%s.fasttree.tree] <ast.Mod object at 0x7da2590d6920> call[call[name[a_id_lookup].rsplit, parameter[constant[.], constant[2]]]][constant[0]]]
variable[fast_tree] assign[=] call[name[run_fast], parameter[name[a_id], name[threads], name[cluster], name[node]]]
variable[good_fast] assign[=] call[name[fix_tree], parameter[name[fast_tree], name[a_id_lookup], name[final_fast]]]
<ast.Yield object at 0x7da18f7201f0>
if compare[name[run_iq] is constant[True]] begin[:]
variable[final_iq] assign[=] binary_operation[constant[%s.iq.tree] <ast.Mod object at 0x7da2590d6920> call[call[name[a_id_lookup].rsplit, parameter[constant[.], constant[2]]]][constant[0]]]
variable[iq_out] assign[=] binary_operation[constant[%s.iq.out] <ast.Mod object at 0x7da2590d6920> name[a_id_phylip]]
variable[iq_tree] assign[=] call[name[run_iqtree], parameter[name[a_id_phylip], name[model], name[threads], name[cluster], name[node]]]
variable[good_tree] assign[=] call[name[fix_tree], parameter[name[iq_tree], name[a_id_lookup], name[final_iq]]]
<ast.Yield object at 0x7da18f720430>
|
keyword[def] identifier[rax] ( identifier[a] , identifier[boot] , identifier[threads] , identifier[fast] = keyword[False] , identifier[run_rax] = keyword[False] , identifier[run_iq] = keyword[False] , identifier[model] = keyword[False] , identifier[cluster] = keyword[False] , identifier[node] = keyword[False] ):
literal[string]
identifier[a] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[a] )
identifier[a_base] = identifier[a] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[out_dir] = literal[string] %( identifier[a] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ], identifier[a_base] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ], identifier[boot] )
identifier[os] . identifier[system] ( literal[string] %( identifier[out_dir] ))
identifier[os] . identifier[system] ( literal[string] %( identifier[os] . identifier[path] . identifier[abspath] ( identifier[a] ), identifier[out_dir] , identifier[a] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]))
identifier[os] . identifier[chdir] ( identifier[out_dir] )
identifier[a_id] , identifier[a_id_lookup] = identifier[get_ids] ( identifier[a_base] )
identifier[a_id_phylip] = identifier[convert2phylip] ( identifier[a_id] )
identifier[rax_out] = literal[string] %( identifier[a_id_phylip] )
keyword[if] identifier[fast] keyword[is] keyword[True] :
identifier[final_fast] = literal[string] %( identifier[a_id_lookup] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ])
identifier[fast_tree] = identifier[run_fast] ( identifier[a_id] , identifier[threads] , identifier[cluster] , identifier[node] )
identifier[good_fast] = identifier[fix_tree] ( identifier[fast_tree] , identifier[a_id_lookup] , identifier[final_fast] )
keyword[yield] literal[string] %( identifier[out_dir] , identifier[final_fast] )
keyword[if] identifier[run_iq] keyword[is] keyword[True] :
identifier[final_iq] = literal[string] %( identifier[a_id_lookup] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ])
identifier[iq_out] = literal[string] %( identifier[a_id_phylip] )
identifier[iq_tree] = identifier[run_iqtree] ( identifier[a_id_phylip] , identifier[model] , identifier[threads] , identifier[cluster] , identifier[node] )
identifier[good_tree] = identifier[fix_tree] ( identifier[iq_tree] , identifier[a_id_lookup] , identifier[final_iq] )
keyword[yield] literal[string] %( identifier[out_dir] , identifier[final_iq] )
keyword[elif] identifier[run_rax] keyword[is] keyword[True] :
identifier[final_rax] = literal[string] %( identifier[a_id_lookup] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ])
identifier[rax_tree] = identifier[run_raxml] ( identifier[rax_out] , identifier[boot] , identifier[a_id_phylip] , identifier[threads] , identifier[a_id] , identifier[model] , identifier[cluster] , identifier[node] )
identifier[good_tree] = identifier[fix_tree] ( identifier[rax_tree] , identifier[a_id_lookup] , identifier[final_rax] )
keyword[yield] literal[string] %( identifier[out_dir] , identifier[final_rax] )
|
def rax(a, boot, threads, fast=False, run_rax=False, run_iq=False, model=False, cluster=False, node=False):
"""
run raxml on 'a' (alignment) with 'boot' (bootstraps) and 'threads' (threads)
store all files in raxml_a_b
1. give every sequence a short identifier
2. convert fasta to phylip
3. run raxml
4. convert ids in raxml tree to original names
"""
a = os.path.abspath(a)
a_base = a.rsplit('/', 1)[1]
out_dir = '%s/%s_rax_boots_%s' % (a.rsplit('/', 1)[0], a_base.rsplit('.', 1)[0], boot)
os.system('mkdir -p %s' % out_dir)
os.system('ln -sf %s %s/%s' % (os.path.abspath(a), out_dir, a.rsplit('/', 1)[1]))
os.chdir(out_dir)
(a_id, a_id_lookup) = get_ids(a_base)
a_id_phylip = convert2phylip(a_id)
rax_out = '%s.raxml.txt' % a_id_phylip
if fast is True:
final_fast = '%s.fasttree.tree' % a_id_lookup.rsplit('.', 2)[0]
fast_tree = run_fast(a_id, threads, cluster, node)
good_fast = fix_tree(fast_tree, a_id_lookup, final_fast)
yield ('%s/%s' % (out_dir, final_fast)) # depends on [control=['if'], data=[]]
# run IQ-Tree or RAxML
if run_iq is True:
final_iq = '%s.iq.tree' % a_id_lookup.rsplit('.', 2)[0]
iq_out = '%s.iq.out' % a_id_phylip
iq_tree = run_iqtree(a_id_phylip, model, threads, cluster, node)
good_tree = fix_tree(iq_tree, a_id_lookup, final_iq)
yield ('%s/%s' % (out_dir, final_iq)) # depends on [control=['if'], data=[]]
elif run_rax is True:
final_rax = '%s.raxml.tree' % a_id_lookup.rsplit('.', 2)[0]
rax_tree = run_raxml(rax_out, boot, a_id_phylip, threads, a_id, model, cluster, node)
good_tree = fix_tree(rax_tree, a_id_lookup, final_rax)
yield ('%s/%s' % (out_dir, final_rax)) # depends on [control=['if'], data=[]]
|
def bind_proxy(self, name, proxy):
"""Adds a mask that maps to a given proxy."""
if not len(name) or name[0] != '/' or name[-1] != '/':
raise ValueError(
"name must start and end with '/': {0}".format(name))
self._folder_proxys.insert(0, (name, proxy))
|
def function[bind_proxy, parameter[self, name, proxy]]:
constant[Adds a mask that maps to a given proxy.]
if <ast.BoolOp object at 0x7da2043466e0> begin[:]
<ast.Raise object at 0x7da204345300>
call[name[self]._folder_proxys.insert, parameter[constant[0], tuple[[<ast.Name object at 0x7da204344040>, <ast.Name object at 0x7da204345d50>]]]]
|
keyword[def] identifier[bind_proxy] ( identifier[self] , identifier[name] , identifier[proxy] ):
literal[string]
keyword[if] keyword[not] identifier[len] ( identifier[name] ) keyword[or] identifier[name] [ literal[int] ]!= literal[string] keyword[or] identifier[name] [- literal[int] ]!= literal[string] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[name] ))
identifier[self] . identifier[_folder_proxys] . identifier[insert] ( literal[int] ,( identifier[name] , identifier[proxy] ))
|
def bind_proxy(self, name, proxy):
"""Adds a mask that maps to a given proxy."""
if not len(name) or name[0] != '/' or name[-1] != '/':
raise ValueError("name must start and end with '/': {0}".format(name)) # depends on [control=['if'], data=[]]
self._folder_proxys.insert(0, (name, proxy))
|
def _normalize(esfilter):
"""
TODO: DO NOT USE Data, WE ARE SPENDING TOO MUCH TIME WRAPPING/UNWRAPPING
REALLY, WE JUST COLLAPSE CASCADING `and` AND `or` FILTERS
"""
if esfilter == MATCH_ALL or esfilter == MATCH_NONE or esfilter.isNormal:
return esfilter
# Log.note("from: " + convert.value2json(esfilter))
isDiff = True
while isDiff:
isDiff = False
if esfilter.bool.filter:
terms = esfilter.bool.filter
for (i0, t0), (i1, t1) in itertools.product(
enumerate(terms), enumerate(terms)
):
if i0 == i1:
continue # SAME, IGNORE
# TERM FILTER ALREADY ASSUMES EXISTENCE
with suppress_exception:
if (
t0.exists.field != None
and t0.exists.field == t1.term.items()[0][0]
):
terms[i0] = MATCH_ALL
continue
# IDENTICAL CAN BE REMOVED
with suppress_exception:
if t0 == t1:
terms[i0] = MATCH_ALL
continue
# MERGE range FILTER WITH SAME FIELD
if i0 > i1:
continue # SAME, IGNORE
with suppress_exception:
f0, tt0 = t0.range.items()[0]
f1, tt1 = t1.range.items()[0]
if f0 == f1:
set_default(terms[i0].range[literal_field(f1)], tt1)
terms[i1] = MATCH_ALL
output = []
for a in terms:
if is_container(a):
from mo_logs import Log
Log.error("and clause is not allowed a list inside a list")
a_ = _normalize(a)
if a_ is not a:
isDiff = True
a = a_
if a == MATCH_ALL:
isDiff = True
continue
if a == MATCH_NONE:
return MATCH_NONE
if a.bool.filter:
isDiff = True
a.isNormal = None
output.extend(a.bool.filter)
else:
a.isNormal = None
output.append(a)
if not output:
return MATCH_ALL
elif len(output) == 1:
# output[0].isNormal = True
esfilter = output[0]
break
elif isDiff:
esfilter = es_and(output)
continue
if esfilter.bool.should:
output = []
for a in esfilter.bool.should:
a_ = _normalize(a)
if a_ is not a:
isDiff = True
a = a_
if a.bool.should:
a.isNormal = None
isDiff = True
output.extend(a.bool.should)
else:
a.isNormal = None
output.append(a)
if not output:
return MATCH_NONE
elif len(output) == 1:
esfilter = output[0]
break
elif isDiff:
esfilter = wrap(es_or(output))
continue
if esfilter.term != None:
if esfilter.term.keys():
esfilter.isNormal = True
return esfilter
else:
return MATCH_ALL
if esfilter.terms:
for k, v in esfilter.terms.items():
if len(v) > 0:
if OR(vv == None for vv in v):
rest = [vv for vv in v if vv != None]
if len(rest) > 0:
output = es_or([es_missing(k), {"terms": {k: rest}}])
else:
output = es_missing(k)
output.isNormal = True
return output
else:
esfilter.isNormal = True
return esfilter
return MATCH_NONE
if esfilter.bool.must_not:
_sub = esfilter.bool.must_not
sub = _normalize(_sub)
if sub == MATCH_NONE:
return MATCH_ALL
elif sub == MATCH_ALL:
return MATCH_NONE
elif sub is not _sub:
sub.isNormal = None
return wrap({"bool": {"must_not": sub, "isNormal": True}})
else:
sub.isNormal = None
esfilter.isNormal = True
return esfilter
|
def function[_normalize, parameter[esfilter]]:
constant[
TODO: DO NOT USE Data, WE ARE SPENDING TOO MUCH TIME WRAPPING/UNWRAPPING
REALLY, WE JUST COLLAPSE CASCADING `and` AND `or` FILTERS
]
if <ast.BoolOp object at 0x7da1b0ab5780> begin[:]
return[name[esfilter]]
variable[isDiff] assign[=] constant[True]
while name[isDiff] begin[:]
variable[isDiff] assign[=] constant[False]
if name[esfilter].bool.filter begin[:]
variable[terms] assign[=] name[esfilter].bool.filter
for taget[tuple[[<ast.Tuple object at 0x7da1b0ab5ae0>, <ast.Tuple object at 0x7da1b0ab4850>]]] in starred[call[name[itertools].product, parameter[call[name[enumerate], parameter[name[terms]]], call[name[enumerate], parameter[name[terms]]]]]] begin[:]
if compare[name[i0] equal[==] name[i1]] begin[:]
continue
with name[suppress_exception] begin[:]
if <ast.BoolOp object at 0x7da1b0ab7640> begin[:]
call[name[terms]][name[i0]] assign[=] name[MATCH_ALL]
continue
with name[suppress_exception] begin[:]
if compare[name[t0] equal[==] name[t1]] begin[:]
call[name[terms]][name[i0]] assign[=] name[MATCH_ALL]
continue
if compare[name[i0] greater[>] name[i1]] begin[:]
continue
with name[suppress_exception] begin[:]
<ast.Tuple object at 0x7da1b0a3ce50> assign[=] call[call[name[t0].range.items, parameter[]]][constant[0]]
<ast.Tuple object at 0x7da1b0a3faf0> assign[=] call[call[name[t1].range.items, parameter[]]][constant[0]]
if compare[name[f0] equal[==] name[f1]] begin[:]
call[name[set_default], parameter[call[call[name[terms]][name[i0]].range][call[name[literal_field], parameter[name[f1]]]], name[tt1]]]
call[name[terms]][name[i1]] assign[=] name[MATCH_ALL]
variable[output] assign[=] list[[]]
for taget[name[a]] in starred[name[terms]] begin[:]
if call[name[is_container], parameter[name[a]]] begin[:]
from relative_module[mo_logs] import module[Log]
call[name[Log].error, parameter[constant[and clause is not allowed a list inside a list]]]
variable[a_] assign[=] call[name[_normalize], parameter[name[a]]]
if compare[name[a_] is_not name[a]] begin[:]
variable[isDiff] assign[=] constant[True]
variable[a] assign[=] name[a_]
if compare[name[a] equal[==] name[MATCH_ALL]] begin[:]
variable[isDiff] assign[=] constant[True]
continue
if compare[name[a] equal[==] name[MATCH_NONE]] begin[:]
return[name[MATCH_NONE]]
if name[a].bool.filter begin[:]
variable[isDiff] assign[=] constant[True]
name[a].isNormal assign[=] constant[None]
call[name[output].extend, parameter[name[a].bool.filter]]
if <ast.UnaryOp object at 0x7da1b0a04880> begin[:]
return[name[MATCH_ALL]]
continue
if name[esfilter].bool.should begin[:]
variable[output] assign[=] list[[]]
for taget[name[a]] in starred[name[esfilter].bool.should] begin[:]
variable[a_] assign[=] call[name[_normalize], parameter[name[a]]]
if compare[name[a_] is_not name[a]] begin[:]
variable[isDiff] assign[=] constant[True]
variable[a] assign[=] name[a_]
if name[a].bool.should begin[:]
name[a].isNormal assign[=] constant[None]
variable[isDiff] assign[=] constant[True]
call[name[output].extend, parameter[name[a].bool.should]]
if <ast.UnaryOp object at 0x7da1b0a066e0> begin[:]
return[name[MATCH_NONE]]
continue
if compare[name[esfilter].term not_equal[!=] constant[None]] begin[:]
if call[name[esfilter].term.keys, parameter[]] begin[:]
name[esfilter].isNormal assign[=] constant[True]
return[name[esfilter]]
if name[esfilter].terms begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0a046d0>, <ast.Name object at 0x7da1b0a046a0>]]] in starred[call[name[esfilter].terms.items, parameter[]]] begin[:]
if compare[call[name[len], parameter[name[v]]] greater[>] constant[0]] begin[:]
if call[name[OR], parameter[<ast.GeneratorExp object at 0x7da1b0a04310>]] begin[:]
variable[rest] assign[=] <ast.ListComp object at 0x7da1b0a042e0>
if compare[call[name[len], parameter[name[rest]]] greater[>] constant[0]] begin[:]
variable[output] assign[=] call[name[es_or], parameter[list[[<ast.Call object at 0x7da1b0a04850>, <ast.Dict object at 0x7da1b0a04fd0>]]]]
name[output].isNormal assign[=] constant[True]
return[name[output]]
return[name[MATCH_NONE]]
if name[esfilter].bool.must_not begin[:]
variable[_sub] assign[=] name[esfilter].bool.must_not
variable[sub] assign[=] call[name[_normalize], parameter[name[_sub]]]
if compare[name[sub] equal[==] name[MATCH_NONE]] begin[:]
return[name[MATCH_ALL]]
name[esfilter].isNormal assign[=] constant[True]
return[name[esfilter]]
|
keyword[def] identifier[_normalize] ( identifier[esfilter] ):
literal[string]
keyword[if] identifier[esfilter] == identifier[MATCH_ALL] keyword[or] identifier[esfilter] == identifier[MATCH_NONE] keyword[or] identifier[esfilter] . identifier[isNormal] :
keyword[return] identifier[esfilter]
identifier[isDiff] = keyword[True]
keyword[while] identifier[isDiff] :
identifier[isDiff] = keyword[False]
keyword[if] identifier[esfilter] . identifier[bool] . identifier[filter] :
identifier[terms] = identifier[esfilter] . identifier[bool] . identifier[filter]
keyword[for] ( identifier[i0] , identifier[t0] ),( identifier[i1] , identifier[t1] ) keyword[in] identifier[itertools] . identifier[product] (
identifier[enumerate] ( identifier[terms] ), identifier[enumerate] ( identifier[terms] )
):
keyword[if] identifier[i0] == identifier[i1] :
keyword[continue]
keyword[with] identifier[suppress_exception] :
keyword[if] (
identifier[t0] . identifier[exists] . identifier[field] != keyword[None]
keyword[and] identifier[t0] . identifier[exists] . identifier[field] == identifier[t1] . identifier[term] . identifier[items] ()[ literal[int] ][ literal[int] ]
):
identifier[terms] [ identifier[i0] ]= identifier[MATCH_ALL]
keyword[continue]
keyword[with] identifier[suppress_exception] :
keyword[if] identifier[t0] == identifier[t1] :
identifier[terms] [ identifier[i0] ]= identifier[MATCH_ALL]
keyword[continue]
keyword[if] identifier[i0] > identifier[i1] :
keyword[continue]
keyword[with] identifier[suppress_exception] :
identifier[f0] , identifier[tt0] = identifier[t0] . identifier[range] . identifier[items] ()[ literal[int] ]
identifier[f1] , identifier[tt1] = identifier[t1] . identifier[range] . identifier[items] ()[ literal[int] ]
keyword[if] identifier[f0] == identifier[f1] :
identifier[set_default] ( identifier[terms] [ identifier[i0] ]. identifier[range] [ identifier[literal_field] ( identifier[f1] )], identifier[tt1] )
identifier[terms] [ identifier[i1] ]= identifier[MATCH_ALL]
identifier[output] =[]
keyword[for] identifier[a] keyword[in] identifier[terms] :
keyword[if] identifier[is_container] ( identifier[a] ):
keyword[from] identifier[mo_logs] keyword[import] identifier[Log]
identifier[Log] . identifier[error] ( literal[string] )
identifier[a_] = identifier[_normalize] ( identifier[a] )
keyword[if] identifier[a_] keyword[is] keyword[not] identifier[a] :
identifier[isDiff] = keyword[True]
identifier[a] = identifier[a_]
keyword[if] identifier[a] == identifier[MATCH_ALL] :
identifier[isDiff] = keyword[True]
keyword[continue]
keyword[if] identifier[a] == identifier[MATCH_NONE] :
keyword[return] identifier[MATCH_NONE]
keyword[if] identifier[a] . identifier[bool] . identifier[filter] :
identifier[isDiff] = keyword[True]
identifier[a] . identifier[isNormal] = keyword[None]
identifier[output] . identifier[extend] ( identifier[a] . identifier[bool] . identifier[filter] )
keyword[else] :
identifier[a] . identifier[isNormal] = keyword[None]
identifier[output] . identifier[append] ( identifier[a] )
keyword[if] keyword[not] identifier[output] :
keyword[return] identifier[MATCH_ALL]
keyword[elif] identifier[len] ( identifier[output] )== literal[int] :
identifier[esfilter] = identifier[output] [ literal[int] ]
keyword[break]
keyword[elif] identifier[isDiff] :
identifier[esfilter] = identifier[es_and] ( identifier[output] )
keyword[continue]
keyword[if] identifier[esfilter] . identifier[bool] . identifier[should] :
identifier[output] =[]
keyword[for] identifier[a] keyword[in] identifier[esfilter] . identifier[bool] . identifier[should] :
identifier[a_] = identifier[_normalize] ( identifier[a] )
keyword[if] identifier[a_] keyword[is] keyword[not] identifier[a] :
identifier[isDiff] = keyword[True]
identifier[a] = identifier[a_]
keyword[if] identifier[a] . identifier[bool] . identifier[should] :
identifier[a] . identifier[isNormal] = keyword[None]
identifier[isDiff] = keyword[True]
identifier[output] . identifier[extend] ( identifier[a] . identifier[bool] . identifier[should] )
keyword[else] :
identifier[a] . identifier[isNormal] = keyword[None]
identifier[output] . identifier[append] ( identifier[a] )
keyword[if] keyword[not] identifier[output] :
keyword[return] identifier[MATCH_NONE]
keyword[elif] identifier[len] ( identifier[output] )== literal[int] :
identifier[esfilter] = identifier[output] [ literal[int] ]
keyword[break]
keyword[elif] identifier[isDiff] :
identifier[esfilter] = identifier[wrap] ( identifier[es_or] ( identifier[output] ))
keyword[continue]
keyword[if] identifier[esfilter] . identifier[term] != keyword[None] :
keyword[if] identifier[esfilter] . identifier[term] . identifier[keys] ():
identifier[esfilter] . identifier[isNormal] = keyword[True]
keyword[return] identifier[esfilter]
keyword[else] :
keyword[return] identifier[MATCH_ALL]
keyword[if] identifier[esfilter] . identifier[terms] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[esfilter] . identifier[terms] . identifier[items] ():
keyword[if] identifier[len] ( identifier[v] )> literal[int] :
keyword[if] identifier[OR] ( identifier[vv] == keyword[None] keyword[for] identifier[vv] keyword[in] identifier[v] ):
identifier[rest] =[ identifier[vv] keyword[for] identifier[vv] keyword[in] identifier[v] keyword[if] identifier[vv] != keyword[None] ]
keyword[if] identifier[len] ( identifier[rest] )> literal[int] :
identifier[output] = identifier[es_or] ([ identifier[es_missing] ( identifier[k] ),{ literal[string] :{ identifier[k] : identifier[rest] }}])
keyword[else] :
identifier[output] = identifier[es_missing] ( identifier[k] )
identifier[output] . identifier[isNormal] = keyword[True]
keyword[return] identifier[output]
keyword[else] :
identifier[esfilter] . identifier[isNormal] = keyword[True]
keyword[return] identifier[esfilter]
keyword[return] identifier[MATCH_NONE]
keyword[if] identifier[esfilter] . identifier[bool] . identifier[must_not] :
identifier[_sub] = identifier[esfilter] . identifier[bool] . identifier[must_not]
identifier[sub] = identifier[_normalize] ( identifier[_sub] )
keyword[if] identifier[sub] == identifier[MATCH_NONE] :
keyword[return] identifier[MATCH_ALL]
keyword[elif] identifier[sub] == identifier[MATCH_ALL] :
keyword[return] identifier[MATCH_NONE]
keyword[elif] identifier[sub] keyword[is] keyword[not] identifier[_sub] :
identifier[sub] . identifier[isNormal] = keyword[None]
keyword[return] identifier[wrap] ({ literal[string] :{ literal[string] : identifier[sub] , literal[string] : keyword[True] }})
keyword[else] :
identifier[sub] . identifier[isNormal] = keyword[None]
identifier[esfilter] . identifier[isNormal] = keyword[True]
keyword[return] identifier[esfilter]
|
def _normalize(esfilter):
"""
TODO: DO NOT USE Data, WE ARE SPENDING TOO MUCH TIME WRAPPING/UNWRAPPING
REALLY, WE JUST COLLAPSE CASCADING `and` AND `or` FILTERS
"""
if esfilter == MATCH_ALL or esfilter == MATCH_NONE or esfilter.isNormal:
return esfilter # depends on [control=['if'], data=[]]
# Log.note("from: " + convert.value2json(esfilter))
isDiff = True
while isDiff:
isDiff = False
if esfilter.bool.filter:
terms = esfilter.bool.filter
for ((i0, t0), (i1, t1)) in itertools.product(enumerate(terms), enumerate(terms)):
if i0 == i1:
continue # SAME, IGNORE # depends on [control=['if'], data=[]]
# TERM FILTER ALREADY ASSUMES EXISTENCE
with suppress_exception:
if t0.exists.field != None and t0.exists.field == t1.term.items()[0][0]:
terms[i0] = MATCH_ALL
continue # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
# IDENTICAL CAN BE REMOVED
with suppress_exception:
if t0 == t1:
terms[i0] = MATCH_ALL
continue # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
# MERGE range FILTER WITH SAME FIELD
if i0 > i1:
continue # SAME, IGNORE # depends on [control=['if'], data=[]]
with suppress_exception:
(f0, tt0) = t0.range.items()[0]
(f1, tt1) = t1.range.items()[0]
if f0 == f1:
set_default(terms[i0].range[literal_field(f1)], tt1)
terms[i1] = MATCH_ALL # depends on [control=['if'], data=['f1']] # depends on [control=['with'], data=[]] # depends on [control=['for'], data=[]]
output = []
for a in terms:
if is_container(a):
from mo_logs import Log
Log.error('and clause is not allowed a list inside a list') # depends on [control=['if'], data=[]]
a_ = _normalize(a)
if a_ is not a:
isDiff = True # depends on [control=['if'], data=[]]
a = a_
if a == MATCH_ALL:
isDiff = True
continue # depends on [control=['if'], data=[]]
if a == MATCH_NONE:
return MATCH_NONE # depends on [control=['if'], data=['MATCH_NONE']]
if a.bool.filter:
isDiff = True
a.isNormal = None
output.extend(a.bool.filter) # depends on [control=['if'], data=[]]
else:
a.isNormal = None
output.append(a) # depends on [control=['for'], data=['a']]
if not output:
return MATCH_ALL # depends on [control=['if'], data=[]]
elif len(output) == 1:
# output[0].isNormal = True
esfilter = output[0]
break # depends on [control=['if'], data=[]]
elif isDiff:
esfilter = es_and(output) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
if esfilter.bool.should:
output = []
for a in esfilter.bool.should:
a_ = _normalize(a)
if a_ is not a:
isDiff = True # depends on [control=['if'], data=[]]
a = a_
if a.bool.should:
a.isNormal = None
isDiff = True
output.extend(a.bool.should) # depends on [control=['if'], data=[]]
else:
a.isNormal = None
output.append(a) # depends on [control=['for'], data=['a']]
if not output:
return MATCH_NONE # depends on [control=['if'], data=[]]
elif len(output) == 1:
esfilter = output[0]
break # depends on [control=['if'], data=[]]
elif isDiff:
esfilter = wrap(es_or(output)) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
if esfilter.term != None:
if esfilter.term.keys():
esfilter.isNormal = True
return esfilter # depends on [control=['if'], data=[]]
else:
return MATCH_ALL # depends on [control=['if'], data=[]]
if esfilter.terms:
for (k, v) in esfilter.terms.items():
if len(v) > 0:
if OR((vv == None for vv in v)):
rest = [vv for vv in v if vv != None]
if len(rest) > 0:
output = es_or([es_missing(k), {'terms': {k: rest}}]) # depends on [control=['if'], data=[]]
else:
output = es_missing(k)
output.isNormal = True
return output # depends on [control=['if'], data=[]]
else:
esfilter.isNormal = True
return esfilter # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return MATCH_NONE # depends on [control=['if'], data=[]]
if esfilter.bool.must_not:
_sub = esfilter.bool.must_not
sub = _normalize(_sub)
if sub == MATCH_NONE:
return MATCH_ALL # depends on [control=['if'], data=[]]
elif sub == MATCH_ALL:
return MATCH_NONE # depends on [control=['if'], data=[]]
elif sub is not _sub:
sub.isNormal = None
return wrap({'bool': {'must_not': sub, 'isNormal': True}}) # depends on [control=['if'], data=['sub']]
else:
sub.isNormal = None # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
esfilter.isNormal = True
return esfilter
|
def _flip_feature(self, feature, parent_len):
'''Adjust a feature's location when flipping DNA.
:param feature: The feature to flip.
:type feature: coral.Feature
:param parent_len: The length of the sequence to which the feature belongs.
:type parent_len: int
'''
copy = feature.copy()
# Put on the other strand
if copy.strand == 0:
copy.strand = 1
else:
copy.strand = 0
# Adjust locations - guarantee that start is always less than end
copy.start = parent_len - copy.start
copy.stop = parent_len - copy.stop
copy.start, copy.stop = copy.stop, copy.start
return copy
|
def function[_flip_feature, parameter[self, feature, parent_len]]:
constant[Adjust a feature's location when flipping DNA.
:param feature: The feature to flip.
:type feature: coral.Feature
:param parent_len: The length of the sequence to which the feature belongs.
:type parent_len: int
]
variable[copy] assign[=] call[name[feature].copy, parameter[]]
if compare[name[copy].strand equal[==] constant[0]] begin[:]
name[copy].strand assign[=] constant[1]
name[copy].start assign[=] binary_operation[name[parent_len] - name[copy].start]
name[copy].stop assign[=] binary_operation[name[parent_len] - name[copy].stop]
<ast.Tuple object at 0x7da20c76fe80> assign[=] tuple[[<ast.Attribute object at 0x7da20c76c700>, <ast.Attribute object at 0x7da20c76d960>]]
return[name[copy]]
|
keyword[def] identifier[_flip_feature] ( identifier[self] , identifier[feature] , identifier[parent_len] ):
literal[string]
identifier[copy] = identifier[feature] . identifier[copy] ()
keyword[if] identifier[copy] . identifier[strand] == literal[int] :
identifier[copy] . identifier[strand] = literal[int]
keyword[else] :
identifier[copy] . identifier[strand] = literal[int]
identifier[copy] . identifier[start] = identifier[parent_len] - identifier[copy] . identifier[start]
identifier[copy] . identifier[stop] = identifier[parent_len] - identifier[copy] . identifier[stop]
identifier[copy] . identifier[start] , identifier[copy] . identifier[stop] = identifier[copy] . identifier[stop] , identifier[copy] . identifier[start]
keyword[return] identifier[copy]
|
def _flip_feature(self, feature, parent_len):
"""Adjust a feature's location when flipping DNA.
:param feature: The feature to flip.
:type feature: coral.Feature
:param parent_len: The length of the sequence to which the feature belongs.
:type parent_len: int
"""
copy = feature.copy()
# Put on the other strand
if copy.strand == 0:
copy.strand = 1 # depends on [control=['if'], data=[]]
else:
copy.strand = 0
# Adjust locations - guarantee that start is always less than end
copy.start = parent_len - copy.start
copy.stop = parent_len - copy.stop
(copy.start, copy.stop) = (copy.stop, copy.start)
return copy
|
def focusOutEvent(self, event):
"""
Processes when this widget loses focus.
:param event | <QFocusEvent>
"""
if not self.signalsBlocked():
self.focusChanged.emit(False)
self.focusExited.emit()
return super(XTextEdit, self).focusOutEvent(event)
|
def function[focusOutEvent, parameter[self, event]]:
constant[
Processes when this widget loses focus.
:param event | <QFocusEvent>
]
if <ast.UnaryOp object at 0x7da18f00fbe0> begin[:]
call[name[self].focusChanged.emit, parameter[constant[False]]]
call[name[self].focusExited.emit, parameter[]]
return[call[call[name[super], parameter[name[XTextEdit], name[self]]].focusOutEvent, parameter[name[event]]]]
|
keyword[def] identifier[focusOutEvent] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[signalsBlocked] ():
identifier[self] . identifier[focusChanged] . identifier[emit] ( keyword[False] )
identifier[self] . identifier[focusExited] . identifier[emit] ()
keyword[return] identifier[super] ( identifier[XTextEdit] , identifier[self] ). identifier[focusOutEvent] ( identifier[event] )
|
def focusOutEvent(self, event):
"""
Processes when this widget loses focus.
:param event | <QFocusEvent>
"""
if not self.signalsBlocked():
self.focusChanged.emit(False)
self.focusExited.emit() # depends on [control=['if'], data=[]]
return super(XTextEdit, self).focusOutEvent(event)
|
def consume(self, tokens):
'''Have this parameter consume some tokens.
This stores the consumed value for later use and returns the
modified tokens array for further processing.
'''
n = len(tokens) if self._nargs == -1 else self._nargs
if n > len(tokens):
exit('Error: Not enough arguments for "{}".'.format(self._name), True)
try:
consumed = [self._type(e) if self._type is not None else e for e in tokens[:n]]
except ValueError as e:
exit('Error: Invalid type given to "{}", expected {}.'.format(
self._name, self._type.__name__), True)
if n == 1 and self._nargs == 1:
consumed = consumed[0]
self.post_consume(consumed)
return tokens[n:]
|
def function[consume, parameter[self, tokens]]:
constant[Have this parameter consume some tokens.
This stores the consumed value for later use and returns the
modified tokens array for further processing.
]
variable[n] assign[=] <ast.IfExp object at 0x7da2043450f0>
if compare[name[n] greater[>] call[name[len], parameter[name[tokens]]]] begin[:]
call[name[exit], parameter[call[constant[Error: Not enough arguments for "{}".].format, parameter[name[self]._name]], constant[True]]]
<ast.Try object at 0x7da204347880>
if <ast.BoolOp object at 0x7da204347040> begin[:]
variable[consumed] assign[=] call[name[consumed]][constant[0]]
call[name[self].post_consume, parameter[name[consumed]]]
return[call[name[tokens]][<ast.Slice object at 0x7da204345450>]]
|
keyword[def] identifier[consume] ( identifier[self] , identifier[tokens] ):
literal[string]
identifier[n] = identifier[len] ( identifier[tokens] ) keyword[if] identifier[self] . identifier[_nargs] ==- literal[int] keyword[else] identifier[self] . identifier[_nargs]
keyword[if] identifier[n] > identifier[len] ( identifier[tokens] ):
identifier[exit] ( literal[string] . identifier[format] ( identifier[self] . identifier[_name] ), keyword[True] )
keyword[try] :
identifier[consumed] =[ identifier[self] . identifier[_type] ( identifier[e] ) keyword[if] identifier[self] . identifier[_type] keyword[is] keyword[not] keyword[None] keyword[else] identifier[e] keyword[for] identifier[e] keyword[in] identifier[tokens] [: identifier[n] ]]
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[exit] ( literal[string] . identifier[format] (
identifier[self] . identifier[_name] , identifier[self] . identifier[_type] . identifier[__name__] ), keyword[True] )
keyword[if] identifier[n] == literal[int] keyword[and] identifier[self] . identifier[_nargs] == literal[int] :
identifier[consumed] = identifier[consumed] [ literal[int] ]
identifier[self] . identifier[post_consume] ( identifier[consumed] )
keyword[return] identifier[tokens] [ identifier[n] :]
|
def consume(self, tokens):
"""Have this parameter consume some tokens.
This stores the consumed value for later use and returns the
modified tokens array for further processing.
"""
n = len(tokens) if self._nargs == -1 else self._nargs
if n > len(tokens):
exit('Error: Not enough arguments for "{}".'.format(self._name), True) # depends on [control=['if'], data=[]]
try:
consumed = [self._type(e) if self._type is not None else e for e in tokens[:n]] # depends on [control=['try'], data=[]]
except ValueError as e:
exit('Error: Invalid type given to "{}", expected {}.'.format(self._name, self._type.__name__), True) # depends on [control=['except'], data=[]]
if n == 1 and self._nargs == 1:
consumed = consumed[0] # depends on [control=['if'], data=[]]
self.post_consume(consumed)
return tokens[n:]
|
def get_bytes(self, index, size):
"""
Extracts several bytes from a bitvector, where the index refers to the byte in a big-endian order
:param index: the byte index at which to start extracting
:param size: the number of bytes to extract
:return: A BV of size ``size * 8``
"""
pos = self.size() // 8 - 1 - index
return self[pos * 8 + 7 : (pos - size + 1) * 8]
|
def function[get_bytes, parameter[self, index, size]]:
constant[
Extracts several bytes from a bitvector, where the index refers to the byte in a big-endian order
:param index: the byte index at which to start extracting
:param size: the number of bytes to extract
:return: A BV of size ``size * 8``
]
variable[pos] assign[=] binary_operation[binary_operation[binary_operation[call[name[self].size, parameter[]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[8]] - constant[1]] - name[index]]
return[call[name[self]][<ast.Slice object at 0x7da18dc9a230>]]
|
keyword[def] identifier[get_bytes] ( identifier[self] , identifier[index] , identifier[size] ):
literal[string]
identifier[pos] = identifier[self] . identifier[size] ()// literal[int] - literal[int] - identifier[index]
keyword[return] identifier[self] [ identifier[pos] * literal[int] + literal[int] :( identifier[pos] - identifier[size] + literal[int] )* literal[int] ]
|
def get_bytes(self, index, size):
"""
Extracts several bytes from a bitvector, where the index refers to the byte in a big-endian order
:param index: the byte index at which to start extracting
:param size: the number of bytes to extract
:return: A BV of size ``size * 8``
"""
pos = self.size() // 8 - 1 - index
return self[pos * 8 + 7:(pos - size + 1) * 8]
|
def data2schema(
_data=None, _force=False, _besteffort=True, _registry=None,
_factory=None, _buildkwargs=None, **kwargs
):
"""Get the schema able to instanciate input data.
The default value of schema will be data.
Can be used such as a decorator:
..code-block:: python
@data2schema
def example(): pass # return a function schema
@data2schema(_registry=myregistry)
def example(): pass # return a function schema with specific registry
..warning::
return this function id _data is None.
:param _data: data possibly generated by a schema. Required but in case of
decorator.
:param bool _force: if True (False by default), create the data schema
on the fly if it does not exist.
:param bool _besteffort: if True (default), find a schema class able to
validate data class by inheritance.
:param SchemaRegistry _registry: default registry to use. Global by
default.
:param SchemaFactory factory: default factory to use. Global by default.
:param dict _buildkwargs: factory builder kwargs.
:param kwargs: schema class kwargs.
:return: Schema.
:rtype: Schema.
"""
if _data is None:
return lambda _data: data2schema(
_data, _force=False, _besteffort=True, _registry=None,
_factory=None, _buildkwargs=None, **kwargs
)
result = None
fdata = _data() if isinstance(_data, DynamicValue) else _data
datatype = type(fdata)
content = getattr(fdata, '__dict__', {})
if _buildkwargs:
content.udpate(_buildkwargs)
schemacls = datatype2schemacls(
_datatype=datatype, _registry=_registry, _factory=_factory,
_force=_force, _besteffort=_besteffort, **content
)
if schemacls is not None:
result = schemacls(default=_data, **kwargs)
for attrname in dir(_data):
if not hasattr(schemacls, attrname):
attr = getattr(_data, attrname)
if attr is not None:
setattr(result, attrname, attr)
if result is None and _data is None:
result = AnySchema()
return result
|
def function[data2schema, parameter[_data, _force, _besteffort, _registry, _factory, _buildkwargs]]:
constant[Get the schema able to instanciate input data.
The default value of schema will be data.
Can be used such as a decorator:
..code-block:: python
@data2schema
def example(): pass # return a function schema
@data2schema(_registry=myregistry)
def example(): pass # return a function schema with specific registry
..warning::
return this function id _data is None.
:param _data: data possibly generated by a schema. Required but in case of
decorator.
:param bool _force: if True (False by default), create the data schema
on the fly if it does not exist.
:param bool _besteffort: if True (default), find a schema class able to
validate data class by inheritance.
:param SchemaRegistry _registry: default registry to use. Global by
default.
:param SchemaFactory factory: default factory to use. Global by default.
:param dict _buildkwargs: factory builder kwargs.
:param kwargs: schema class kwargs.
:return: Schema.
:rtype: Schema.
]
if compare[name[_data] is constant[None]] begin[:]
return[<ast.Lambda object at 0x7da18bcc9660>]
variable[result] assign[=] constant[None]
variable[fdata] assign[=] <ast.IfExp object at 0x7da204347490>
variable[datatype] assign[=] call[name[type], parameter[name[fdata]]]
variable[content] assign[=] call[name[getattr], parameter[name[fdata], constant[__dict__], dictionary[[], []]]]
if name[_buildkwargs] begin[:]
call[name[content].udpate, parameter[name[_buildkwargs]]]
variable[schemacls] assign[=] call[name[datatype2schemacls], parameter[]]
if compare[name[schemacls] is_not constant[None]] begin[:]
variable[result] assign[=] call[name[schemacls], parameter[]]
for taget[name[attrname]] in starred[call[name[dir], parameter[name[_data]]]] begin[:]
if <ast.UnaryOp object at 0x7da18f00c340> begin[:]
variable[attr] assign[=] call[name[getattr], parameter[name[_data], name[attrname]]]
if compare[name[attr] is_not constant[None]] begin[:]
call[name[setattr], parameter[name[result], name[attrname], name[attr]]]
if <ast.BoolOp object at 0x7da204566140> begin[:]
variable[result] assign[=] call[name[AnySchema], parameter[]]
return[name[result]]
|
keyword[def] identifier[data2schema] (
identifier[_data] = keyword[None] , identifier[_force] = keyword[False] , identifier[_besteffort] = keyword[True] , identifier[_registry] = keyword[None] ,
identifier[_factory] = keyword[None] , identifier[_buildkwargs] = keyword[None] ,** identifier[kwargs]
):
literal[string]
keyword[if] identifier[_data] keyword[is] keyword[None] :
keyword[return] keyword[lambda] identifier[_data] : identifier[data2schema] (
identifier[_data] , identifier[_force] = keyword[False] , identifier[_besteffort] = keyword[True] , identifier[_registry] = keyword[None] ,
identifier[_factory] = keyword[None] , identifier[_buildkwargs] = keyword[None] ,** identifier[kwargs]
)
identifier[result] = keyword[None]
identifier[fdata] = identifier[_data] () keyword[if] identifier[isinstance] ( identifier[_data] , identifier[DynamicValue] ) keyword[else] identifier[_data]
identifier[datatype] = identifier[type] ( identifier[fdata] )
identifier[content] = identifier[getattr] ( identifier[fdata] , literal[string] ,{})
keyword[if] identifier[_buildkwargs] :
identifier[content] . identifier[udpate] ( identifier[_buildkwargs] )
identifier[schemacls] = identifier[datatype2schemacls] (
identifier[_datatype] = identifier[datatype] , identifier[_registry] = identifier[_registry] , identifier[_factory] = identifier[_factory] ,
identifier[_force] = identifier[_force] , identifier[_besteffort] = identifier[_besteffort] ,** identifier[content]
)
keyword[if] identifier[schemacls] keyword[is] keyword[not] keyword[None] :
identifier[result] = identifier[schemacls] ( identifier[default] = identifier[_data] ,** identifier[kwargs] )
keyword[for] identifier[attrname] keyword[in] identifier[dir] ( identifier[_data] ):
keyword[if] keyword[not] identifier[hasattr] ( identifier[schemacls] , identifier[attrname] ):
identifier[attr] = identifier[getattr] ( identifier[_data] , identifier[attrname] )
keyword[if] identifier[attr] keyword[is] keyword[not] keyword[None] :
identifier[setattr] ( identifier[result] , identifier[attrname] , identifier[attr] )
keyword[if] identifier[result] keyword[is] keyword[None] keyword[and] identifier[_data] keyword[is] keyword[None] :
identifier[result] = identifier[AnySchema] ()
keyword[return] identifier[result]
|
def data2schema(_data=None, _force=False, _besteffort=True, _registry=None, _factory=None, _buildkwargs=None, **kwargs):
"""Get the schema able to instanciate input data.
The default value of schema will be data.
Can be used such as a decorator:
..code-block:: python
@data2schema
def example(): pass # return a function schema
@data2schema(_registry=myregistry)
def example(): pass # return a function schema with specific registry
..warning::
return this function id _data is None.
:param _data: data possibly generated by a schema. Required but in case of
decorator.
:param bool _force: if True (False by default), create the data schema
on the fly if it does not exist.
:param bool _besteffort: if True (default), find a schema class able to
validate data class by inheritance.
:param SchemaRegistry _registry: default registry to use. Global by
default.
:param SchemaFactory factory: default factory to use. Global by default.
:param dict _buildkwargs: factory builder kwargs.
:param kwargs: schema class kwargs.
:return: Schema.
:rtype: Schema.
"""
if _data is None:
return lambda _data: data2schema(_data, _force=False, _besteffort=True, _registry=None, _factory=None, _buildkwargs=None, **kwargs) # depends on [control=['if'], data=['_data']]
result = None
fdata = _data() if isinstance(_data, DynamicValue) else _data
datatype = type(fdata)
content = getattr(fdata, '__dict__', {})
if _buildkwargs:
content.udpate(_buildkwargs) # depends on [control=['if'], data=[]]
schemacls = datatype2schemacls(_datatype=datatype, _registry=_registry, _factory=_factory, _force=_force, _besteffort=_besteffort, **content)
if schemacls is not None:
result = schemacls(default=_data, **kwargs)
for attrname in dir(_data):
if not hasattr(schemacls, attrname):
attr = getattr(_data, attrname)
if attr is not None:
setattr(result, attrname, attr) # depends on [control=['if'], data=['attr']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attrname']] # depends on [control=['if'], data=['schemacls']]
if result is None and _data is None:
result = AnySchema() # depends on [control=['if'], data=[]]
return result
|
def LJMP(cpu, cs_selector, target):
"""
We are just going to ignore the CS selector for now.
"""
logger.info("LJMP: Jumping to: %r:%r", cs_selector.read(), target.read())
cpu.CS = cs_selector.read()
cpu.PC = target.read()
|
def function[LJMP, parameter[cpu, cs_selector, target]]:
constant[
We are just going to ignore the CS selector for now.
]
call[name[logger].info, parameter[constant[LJMP: Jumping to: %r:%r], call[name[cs_selector].read, parameter[]], call[name[target].read, parameter[]]]]
name[cpu].CS assign[=] call[name[cs_selector].read, parameter[]]
name[cpu].PC assign[=] call[name[target].read, parameter[]]
|
keyword[def] identifier[LJMP] ( identifier[cpu] , identifier[cs_selector] , identifier[target] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] , identifier[cs_selector] . identifier[read] (), identifier[target] . identifier[read] ())
identifier[cpu] . identifier[CS] = identifier[cs_selector] . identifier[read] ()
identifier[cpu] . identifier[PC] = identifier[target] . identifier[read] ()
|
def LJMP(cpu, cs_selector, target):
"""
We are just going to ignore the CS selector for now.
"""
logger.info('LJMP: Jumping to: %r:%r', cs_selector.read(), target.read())
cpu.CS = cs_selector.read()
cpu.PC = target.read()
|
def _generate_examples(self, num_examples, data_path, label_path):
"""Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
"""
images = _extract_mnist_images(data_path, num_examples)
labels = _extract_mnist_labels(label_path, num_examples)
data = list(zip(images, labels))
# Data is shuffled automatically to distribute classes uniformly.
for image, label in data:
yield {
"image": image,
"label": label,
}
|
def function[_generate_examples, parameter[self, num_examples, data_path, label_path]]:
constant[Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
]
variable[images] assign[=] call[name[_extract_mnist_images], parameter[name[data_path], name[num_examples]]]
variable[labels] assign[=] call[name[_extract_mnist_labels], parameter[name[label_path], name[num_examples]]]
variable[data] assign[=] call[name[list], parameter[call[name[zip], parameter[name[images], name[labels]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1e8df60>, <ast.Name object at 0x7da1b1e8c9a0>]]] in starred[name[data]] begin[:]
<ast.Yield object at 0x7da1b1e8c730>
|
keyword[def] identifier[_generate_examples] ( identifier[self] , identifier[num_examples] , identifier[data_path] , identifier[label_path] ):
literal[string]
identifier[images] = identifier[_extract_mnist_images] ( identifier[data_path] , identifier[num_examples] )
identifier[labels] = identifier[_extract_mnist_labels] ( identifier[label_path] , identifier[num_examples] )
identifier[data] = identifier[list] ( identifier[zip] ( identifier[images] , identifier[labels] ))
keyword[for] identifier[image] , identifier[label] keyword[in] identifier[data] :
keyword[yield] {
literal[string] : identifier[image] ,
literal[string] : identifier[label] ,
}
|
def _generate_examples(self, num_examples, data_path, label_path):
"""Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
"""
images = _extract_mnist_images(data_path, num_examples)
labels = _extract_mnist_labels(label_path, num_examples)
data = list(zip(images, labels))
# Data is shuffled automatically to distribute classes uniformly.
for (image, label) in data:
yield {'image': image, 'label': label} # depends on [control=['for'], data=[]]
|
def _pretend_to_run(self, migration, method):
"""
Pretend to run the migration.
:param migration: The migration
:type migration: orator.migrations.migration.Migration
:param method: The method to execute
:type method: str
"""
self._note("")
names = []
for query in self._get_queries(migration, method):
name = migration.__class__.__name__
bindings = None
if isinstance(query, tuple):
query, bindings = query
query = highlight(query, SqlLexer(), CommandFormatter()).strip()
if bindings:
query = (query, bindings)
if name not in names:
self._note("[<info>{}</info>]".format(name))
names.append(name)
self._note(query)
|
def function[_pretend_to_run, parameter[self, migration, method]]:
constant[
Pretend to run the migration.
:param migration: The migration
:type migration: orator.migrations.migration.Migration
:param method: The method to execute
:type method: str
]
call[name[self]._note, parameter[constant[]]]
variable[names] assign[=] list[[]]
for taget[name[query]] in starred[call[name[self]._get_queries, parameter[name[migration], name[method]]]] begin[:]
variable[name] assign[=] name[migration].__class__.__name__
variable[bindings] assign[=] constant[None]
if call[name[isinstance], parameter[name[query], name[tuple]]] begin[:]
<ast.Tuple object at 0x7da18eb555a0> assign[=] name[query]
variable[query] assign[=] call[call[name[highlight], parameter[name[query], call[name[SqlLexer], parameter[]], call[name[CommandFormatter], parameter[]]]].strip, parameter[]]
if name[bindings] begin[:]
variable[query] assign[=] tuple[[<ast.Name object at 0x7da18eb558a0>, <ast.Name object at 0x7da18eb55300>]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[names]] begin[:]
call[name[self]._note, parameter[call[constant[[<info>{}</info>]].format, parameter[name[name]]]]]
call[name[names].append, parameter[name[name]]]
call[name[self]._note, parameter[name[query]]]
|
keyword[def] identifier[_pretend_to_run] ( identifier[self] , identifier[migration] , identifier[method] ):
literal[string]
identifier[self] . identifier[_note] ( literal[string] )
identifier[names] =[]
keyword[for] identifier[query] keyword[in] identifier[self] . identifier[_get_queries] ( identifier[migration] , identifier[method] ):
identifier[name] = identifier[migration] . identifier[__class__] . identifier[__name__]
identifier[bindings] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[query] , identifier[tuple] ):
identifier[query] , identifier[bindings] = identifier[query]
identifier[query] = identifier[highlight] ( identifier[query] , identifier[SqlLexer] (), identifier[CommandFormatter] ()). identifier[strip] ()
keyword[if] identifier[bindings] :
identifier[query] =( identifier[query] , identifier[bindings] )
keyword[if] identifier[name] keyword[not] keyword[in] identifier[names] :
identifier[self] . identifier[_note] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[names] . identifier[append] ( identifier[name] )
identifier[self] . identifier[_note] ( identifier[query] )
|
def _pretend_to_run(self, migration, method):
"""
Pretend to run the migration.
:param migration: The migration
:type migration: orator.migrations.migration.Migration
:param method: The method to execute
:type method: str
"""
self._note('')
names = []
for query in self._get_queries(migration, method):
name = migration.__class__.__name__
bindings = None
if isinstance(query, tuple):
(query, bindings) = query # depends on [control=['if'], data=[]]
query = highlight(query, SqlLexer(), CommandFormatter()).strip()
if bindings:
query = (query, bindings) # depends on [control=['if'], data=[]]
if name not in names:
self._note('[<info>{}</info>]'.format(name))
names.append(name) # depends on [control=['if'], data=['name', 'names']]
self._note(query) # depends on [control=['for'], data=['query']]
|
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > constants.CLIENT_MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
fd.Seek(args.offset)
offset = fd.Tell()
data = fd.Read(args.length)
except (IOError, OSError) as e:
self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.IOERROR, e)
return
# Now return the data to the server
self.SendReply(
rdf_client.BufferReference(
offset=offset, data=data, length=len(data), pathspec=fd.pathspec))
|
def function[Run, parameter[self, args]]:
constant[Reads a buffer on the client and sends it to the server.]
if compare[name[args].length greater[>] name[constants].CLIENT_MAX_BUFFER_SIZE] begin[:]
<ast.Raise object at 0x7da1b1b876a0>
<ast.Try object at 0x7da1b1b87d00>
call[name[self].SendReply, parameter[call[name[rdf_client].BufferReference, parameter[]]]]
|
keyword[def] identifier[Run] ( identifier[self] , identifier[args] ):
literal[string]
keyword[if] identifier[args] . identifier[length] > identifier[constants] . identifier[CLIENT_MAX_BUFFER_SIZE] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[try] :
identifier[fd] = identifier[vfs] . identifier[VFSOpen] ( identifier[args] . identifier[pathspec] , identifier[progress_callback] = identifier[self] . identifier[Progress] )
identifier[fd] . identifier[Seek] ( identifier[args] . identifier[offset] )
identifier[offset] = identifier[fd] . identifier[Tell] ()
identifier[data] = identifier[fd] . identifier[Read] ( identifier[args] . identifier[length] )
keyword[except] ( identifier[IOError] , identifier[OSError] ) keyword[as] identifier[e] :
identifier[self] . identifier[SetStatus] ( identifier[rdf_flows] . identifier[GrrStatus] . identifier[ReturnedStatus] . identifier[IOERROR] , identifier[e] )
keyword[return]
identifier[self] . identifier[SendReply] (
identifier[rdf_client] . identifier[BufferReference] (
identifier[offset] = identifier[offset] , identifier[data] = identifier[data] , identifier[length] = identifier[len] ( identifier[data] ), identifier[pathspec] = identifier[fd] . identifier[pathspec] ))
|
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > constants.CLIENT_MAX_BUFFER_SIZE:
raise RuntimeError('Can not read buffers this large.') # depends on [control=['if'], data=[]]
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
fd.Seek(args.offset)
offset = fd.Tell()
data = fd.Read(args.length) # depends on [control=['try'], data=[]]
except (IOError, OSError) as e:
self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.IOERROR, e)
return # depends on [control=['except'], data=['e']]
# Now return the data to the server
self.SendReply(rdf_client.BufferReference(offset=offset, data=data, length=len(data), pathspec=fd.pathspec))
|
def get_midi_data(self):
"""Collect and return the raw, binary MIDI data from the tracks."""
tracks = [t.get_midi_data() for t in self.tracks if t.track_data != '']
return self.header() + ''.join(tracks)
|
def function[get_midi_data, parameter[self]]:
constant[Collect and return the raw, binary MIDI data from the tracks.]
variable[tracks] assign[=] <ast.ListComp object at 0x7da1b26add50>
return[binary_operation[call[name[self].header, parameter[]] + call[constant[].join, parameter[name[tracks]]]]]
|
keyword[def] identifier[get_midi_data] ( identifier[self] ):
literal[string]
identifier[tracks] =[ identifier[t] . identifier[get_midi_data] () keyword[for] identifier[t] keyword[in] identifier[self] . identifier[tracks] keyword[if] identifier[t] . identifier[track_data] != literal[string] ]
keyword[return] identifier[self] . identifier[header] ()+ literal[string] . identifier[join] ( identifier[tracks] )
|
def get_midi_data(self):
"""Collect and return the raw, binary MIDI data from the tracks."""
tracks = [t.get_midi_data() for t in self.tracks if t.track_data != '']
return self.header() + ''.join(tracks)
|
def _get_encoder_data_shapes(self, bucket_key: int, batch_size: int) -> List[mx.io.DataDesc]:
"""
Returns data shapes of the encoder module.
:param bucket_key: Maximum input length.
:return: List of data descriptions.
"""
return [mx.io.DataDesc(name=C.SOURCE_NAME,
shape=(batch_size, bucket_key, self.num_source_factors),
layout=C.BATCH_MAJOR)]
|
def function[_get_encoder_data_shapes, parameter[self, bucket_key, batch_size]]:
constant[
Returns data shapes of the encoder module.
:param bucket_key: Maximum input length.
:return: List of data descriptions.
]
return[list[[<ast.Call object at 0x7da1b1d23ca0>]]]
|
keyword[def] identifier[_get_encoder_data_shapes] ( identifier[self] , identifier[bucket_key] : identifier[int] , identifier[batch_size] : identifier[int] )-> identifier[List] [ identifier[mx] . identifier[io] . identifier[DataDesc] ]:
literal[string]
keyword[return] [ identifier[mx] . identifier[io] . identifier[DataDesc] ( identifier[name] = identifier[C] . identifier[SOURCE_NAME] ,
identifier[shape] =( identifier[batch_size] , identifier[bucket_key] , identifier[self] . identifier[num_source_factors] ),
identifier[layout] = identifier[C] . identifier[BATCH_MAJOR] )]
|
def _get_encoder_data_shapes(self, bucket_key: int, batch_size: int) -> List[mx.io.DataDesc]:
"""
Returns data shapes of the encoder module.
:param bucket_key: Maximum input length.
:return: List of data descriptions.
"""
return [mx.io.DataDesc(name=C.SOURCE_NAME, shape=(batch_size, bucket_key, self.num_source_factors), layout=C.BATCH_MAJOR)]
|
def ask_question(self, field_name, pattern=NAME_PATTERN, is_required=False,
password=False):
"""Ask a question and get the input values.
This method will validade the input values.
Args:
field_name(string): Field name used to ask for input value.
pattern(tuple): Pattern to validate the input value.
is_required(bool): Boolean value if the input value is required.
password(bool): Boolean value to get input password with mask.
Returns:
input_value(string): Input value validated.
"""
input_value = ""
question = ("Insert the field using the pattern below:"
"\n{}\n{}: ".format(pattern[0], field_name))
while not input_value:
input_value = getpass(question) if password else input(question)
if not (input_value or is_required):
break
if password:
confirm_password = getpass('Confirm your password: ')
if confirm_password != input_value:
print("Password does not match")
input_value = ""
if not self.valid_attribute(input_value, pattern[1]):
error_message = "The content must fit the pattern: {}\n"
print(error_message.format(pattern[0]))
input_value = ""
return input_value
|
def function[ask_question, parameter[self, field_name, pattern, is_required, password]]:
constant[Ask a question and get the input values.
This method will validade the input values.
Args:
field_name(string): Field name used to ask for input value.
pattern(tuple): Pattern to validate the input value.
is_required(bool): Boolean value if the input value is required.
password(bool): Boolean value to get input password with mask.
Returns:
input_value(string): Input value validated.
]
variable[input_value] assign[=] constant[]
variable[question] assign[=] call[constant[Insert the field using the pattern below:
{}
{}: ].format, parameter[call[name[pattern]][constant[0]], name[field_name]]]
while <ast.UnaryOp object at 0x7da18dc99780> begin[:]
variable[input_value] assign[=] <ast.IfExp object at 0x7da18dc99750>
if <ast.UnaryOp object at 0x7da18dc9b460> begin[:]
break
if name[password] begin[:]
variable[confirm_password] assign[=] call[name[getpass], parameter[constant[Confirm your password: ]]]
if compare[name[confirm_password] not_equal[!=] name[input_value]] begin[:]
call[name[print], parameter[constant[Password does not match]]]
variable[input_value] assign[=] constant[]
if <ast.UnaryOp object at 0x7da18dc9aaa0> begin[:]
variable[error_message] assign[=] constant[The content must fit the pattern: {}
]
call[name[print], parameter[call[name[error_message].format, parameter[call[name[pattern]][constant[0]]]]]]
variable[input_value] assign[=] constant[]
return[name[input_value]]
|
keyword[def] identifier[ask_question] ( identifier[self] , identifier[field_name] , identifier[pattern] = identifier[NAME_PATTERN] , identifier[is_required] = keyword[False] ,
identifier[password] = keyword[False] ):
literal[string]
identifier[input_value] = literal[string]
identifier[question] =( literal[string]
literal[string] . identifier[format] ( identifier[pattern] [ literal[int] ], identifier[field_name] ))
keyword[while] keyword[not] identifier[input_value] :
identifier[input_value] = identifier[getpass] ( identifier[question] ) keyword[if] identifier[password] keyword[else] identifier[input] ( identifier[question] )
keyword[if] keyword[not] ( identifier[input_value] keyword[or] identifier[is_required] ):
keyword[break]
keyword[if] identifier[password] :
identifier[confirm_password] = identifier[getpass] ( literal[string] )
keyword[if] identifier[confirm_password] != identifier[input_value] :
identifier[print] ( literal[string] )
identifier[input_value] = literal[string]
keyword[if] keyword[not] identifier[self] . identifier[valid_attribute] ( identifier[input_value] , identifier[pattern] [ literal[int] ]):
identifier[error_message] = literal[string]
identifier[print] ( identifier[error_message] . identifier[format] ( identifier[pattern] [ literal[int] ]))
identifier[input_value] = literal[string]
keyword[return] identifier[input_value]
|
def ask_question(self, field_name, pattern=NAME_PATTERN, is_required=False, password=False):
"""Ask a question and get the input values.
This method will validade the input values.
Args:
field_name(string): Field name used to ask for input value.
pattern(tuple): Pattern to validate the input value.
is_required(bool): Boolean value if the input value is required.
password(bool): Boolean value to get input password with mask.
Returns:
input_value(string): Input value validated.
"""
input_value = ''
question = 'Insert the field using the pattern below:\n{}\n{}: '.format(pattern[0], field_name)
while not input_value:
input_value = getpass(question) if password else input(question)
if not (input_value or is_required):
break # depends on [control=['if'], data=[]]
if password:
confirm_password = getpass('Confirm your password: ')
if confirm_password != input_value:
print('Password does not match')
input_value = '' # depends on [control=['if'], data=['input_value']] # depends on [control=['if'], data=[]]
if not self.valid_attribute(input_value, pattern[1]):
error_message = 'The content must fit the pattern: {}\n'
print(error_message.format(pattern[0]))
input_value = '' # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return input_value
|
def find_same_between_dicts(dict1, dict2):
"""
查找两个字典中的相同点,包括键、值、项,仅支持 hashable 对象
:param:
* dict1: (dict) 比较的字典 1
* dict2: (dict) 比较的字典 2
:return:
* dup_info: (namedtuple) 返回两个字典中相同的信息组成的具名元组
举例如下::
print('--- find_same_between_dicts demo---')
dict1 = {'x':1, 'y':2, 'z':3}
dict2 = {'w':10, 'x':1, 'y':2}
res = find_same_between_dicts(dict1, dict2)
print(res.item)
print(res.key)
print(res.value)
print('---')
执行结果::
--- find_same_between_dicts demo---
set([('x', 1)])
{'x', 'y'}
{1}
---
"""
Same_info = namedtuple('Same_info', ['item', 'key', 'value'])
same_info = Same_info(set(dict1.items()) & set(dict2.items()),
set(dict1.keys()) & set(dict2.keys()),
set(dict1.values()) & set(dict2.values()))
return same_info
|
def function[find_same_between_dicts, parameter[dict1, dict2]]:
constant[
查找两个字典中的相同点,包括键、值、项,仅支持 hashable 对象
:param:
* dict1: (dict) 比较的字典 1
* dict2: (dict) 比较的字典 2
:return:
* dup_info: (namedtuple) 返回两个字典中相同的信息组成的具名元组
举例如下::
print('--- find_same_between_dicts demo---')
dict1 = {'x':1, 'y':2, 'z':3}
dict2 = {'w':10, 'x':1, 'y':2}
res = find_same_between_dicts(dict1, dict2)
print(res.item)
print(res.key)
print(res.value)
print('---')
执行结果::
--- find_same_between_dicts demo---
set([('x', 1)])
{'x', 'y'}
{1}
---
]
variable[Same_info] assign[=] call[name[namedtuple], parameter[constant[Same_info], list[[<ast.Constant object at 0x7da18ede42e0>, <ast.Constant object at 0x7da18ede7910>, <ast.Constant object at 0x7da18ede4100>]]]]
variable[same_info] assign[=] call[name[Same_info], parameter[binary_operation[call[name[set], parameter[call[name[dict1].items, parameter[]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[call[name[dict2].items, parameter[]]]]], binary_operation[call[name[set], parameter[call[name[dict1].keys, parameter[]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[call[name[dict2].keys, parameter[]]]]], binary_operation[call[name[set], parameter[call[name[dict1].values, parameter[]]]] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[call[name[dict2].values, parameter[]]]]]]]
return[name[same_info]]
|
keyword[def] identifier[find_same_between_dicts] ( identifier[dict1] , identifier[dict2] ):
literal[string]
identifier[Same_info] = identifier[namedtuple] ( literal[string] ,[ literal[string] , literal[string] , literal[string] ])
identifier[same_info] = identifier[Same_info] ( identifier[set] ( identifier[dict1] . identifier[items] ())& identifier[set] ( identifier[dict2] . identifier[items] ()),
identifier[set] ( identifier[dict1] . identifier[keys] ())& identifier[set] ( identifier[dict2] . identifier[keys] ()),
identifier[set] ( identifier[dict1] . identifier[values] ())& identifier[set] ( identifier[dict2] . identifier[values] ()))
keyword[return] identifier[same_info]
|
def find_same_between_dicts(dict1, dict2):
"""
查找两个字典中的相同点,包括键、值、项,仅支持 hashable 对象
:param:
* dict1: (dict) 比较的字典 1
* dict2: (dict) 比较的字典 2
:return:
* dup_info: (namedtuple) 返回两个字典中相同的信息组成的具名元组
举例如下::
print('--- find_same_between_dicts demo---')
dict1 = {'x':1, 'y':2, 'z':3}
dict2 = {'w':10, 'x':1, 'y':2}
res = find_same_between_dicts(dict1, dict2)
print(res.item)
print(res.key)
print(res.value)
print('---')
执行结果::
--- find_same_between_dicts demo---
set([('x', 1)])
{'x', 'y'}
{1}
---
"""
Same_info = namedtuple('Same_info', ['item', 'key', 'value'])
same_info = Same_info(set(dict1.items()) & set(dict2.items()), set(dict1.keys()) & set(dict2.keys()), set(dict1.values()) & set(dict2.values()))
return same_info
|
def timestamp(num_params, p_levels, k_choices, N):
"""
Returns a uniform timestamp with parameter values for file identification
"""
string = "_v%s_l%s_gs%s_k%s_N%s_%s.txt" % (num_params,
p_levels,
k_choices,
N,
dt.strftime(dt.now(),
"%d%m%y%H%M%S"))
return string
|
def function[timestamp, parameter[num_params, p_levels, k_choices, N]]:
constant[
Returns a uniform timestamp with parameter values for file identification
]
variable[string] assign[=] binary_operation[constant[_v%s_l%s_gs%s_k%s_N%s_%s.txt] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1634cd0>, <ast.Name object at 0x7da1b1634ca0>, <ast.Name object at 0x7da1b1634be0>, <ast.Name object at 0x7da1b1634f40>, <ast.Call object at 0x7da1b1634fa0>]]]
return[name[string]]
|
keyword[def] identifier[timestamp] ( identifier[num_params] , identifier[p_levels] , identifier[k_choices] , identifier[N] ):
literal[string]
identifier[string] = literal[string] %( identifier[num_params] ,
identifier[p_levels] ,
identifier[k_choices] ,
identifier[N] ,
identifier[dt] . identifier[strftime] ( identifier[dt] . identifier[now] (),
literal[string] ))
keyword[return] identifier[string]
|
def timestamp(num_params, p_levels, k_choices, N):
"""
Returns a uniform timestamp with parameter values for file identification
"""
string = '_v%s_l%s_gs%s_k%s_N%s_%s.txt' % (num_params, p_levels, k_choices, N, dt.strftime(dt.now(), '%d%m%y%H%M%S'))
return string
|
def remove_entry(self, entry):
"""
Remove specified entry.
:param entry: The Entry object to remove.
:type entry: :class:`keepassdb.model.Entry`
"""
if not isinstance(entry, Entry):
raise TypeError("entry param must be of type Entry.")
if not entry in self.entries:
raise ValueError("Entry doesn't exist / not bound to this datbase.")
entry.group.entries.remove(entry)
self.entries.remove(entry)
|
def function[remove_entry, parameter[self, entry]]:
constant[
Remove specified entry.
:param entry: The Entry object to remove.
:type entry: :class:`keepassdb.model.Entry`
]
if <ast.UnaryOp object at 0x7da18fe939a0> begin[:]
<ast.Raise object at 0x7da18fe92500>
if <ast.UnaryOp object at 0x7da18fe93100> begin[:]
<ast.Raise object at 0x7da18fe91ed0>
call[name[entry].group.entries.remove, parameter[name[entry]]]
call[name[self].entries.remove, parameter[name[entry]]]
|
keyword[def] identifier[remove_entry] ( identifier[self] , identifier[entry] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[entry] , identifier[Entry] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[entry] keyword[in] identifier[self] . identifier[entries] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[entry] . identifier[group] . identifier[entries] . identifier[remove] ( identifier[entry] )
identifier[self] . identifier[entries] . identifier[remove] ( identifier[entry] )
|
def remove_entry(self, entry):
"""
Remove specified entry.
:param entry: The Entry object to remove.
:type entry: :class:`keepassdb.model.Entry`
"""
if not isinstance(entry, Entry):
raise TypeError('entry param must be of type Entry.') # depends on [control=['if'], data=[]]
if not entry in self.entries:
raise ValueError("Entry doesn't exist / not bound to this datbase.") # depends on [control=['if'], data=[]]
entry.group.entries.remove(entry)
self.entries.remove(entry)
|
def reply(self):
"""Reply to the selected status"""
status = self.get_selected_status()
app, user = self.app, self.user
if not app or not user:
self.footer.draw_message("You must be logged in to reply", Color.RED)
return
compose_modal = ComposeModal(self.stdscr, default_cw='\n'.join(status['spoiler_text']) or None, resize_callback=self.on_resize)
content, cw = compose_modal.loop()
self.full_redraw()
if content is None:
return
elif len(content) == 0:
self.footer.draw_message("Status must contain content", Color.RED)
return
self.footer.draw_message("Submitting reply...", Color.YELLOW)
response = api.post_status(app, user, content, spoiler_text=cw, sensitive=cw is not None, in_reply_to_id=status['id'])
status = parse_status(response)
self.statuses.insert(0, status)
self.selected += 1
self.left.draw_statuses(self.statuses, self.selected)
self.footer.draw_message("✓ Reply posted", Color.GREEN)
|
def function[reply, parameter[self]]:
constant[Reply to the selected status]
variable[status] assign[=] call[name[self].get_selected_status, parameter[]]
<ast.Tuple object at 0x7da1b1669150> assign[=] tuple[[<ast.Attribute object at 0x7da1b17dc340>, <ast.Attribute object at 0x7da1b17dde70>]]
if <ast.BoolOp object at 0x7da1b17dc8b0> begin[:]
call[name[self].footer.draw_message, parameter[constant[You must be logged in to reply], name[Color].RED]]
return[None]
variable[compose_modal] assign[=] call[name[ComposeModal], parameter[name[self].stdscr]]
<ast.Tuple object at 0x7da1b17dd810> assign[=] call[name[compose_modal].loop, parameter[]]
call[name[self].full_redraw, parameter[]]
if compare[name[content] is constant[None]] begin[:]
return[None]
call[name[self].footer.draw_message, parameter[constant[Submitting reply...], name[Color].YELLOW]]
variable[response] assign[=] call[name[api].post_status, parameter[name[app], name[user], name[content]]]
variable[status] assign[=] call[name[parse_status], parameter[name[response]]]
call[name[self].statuses.insert, parameter[constant[0], name[status]]]
<ast.AugAssign object at 0x7da1b17dea70>
call[name[self].left.draw_statuses, parameter[name[self].statuses, name[self].selected]]
call[name[self].footer.draw_message, parameter[constant[✓ Reply posted], name[Color].GREEN]]
|
keyword[def] identifier[reply] ( identifier[self] ):
literal[string]
identifier[status] = identifier[self] . identifier[get_selected_status] ()
identifier[app] , identifier[user] = identifier[self] . identifier[app] , identifier[self] . identifier[user]
keyword[if] keyword[not] identifier[app] keyword[or] keyword[not] identifier[user] :
identifier[self] . identifier[footer] . identifier[draw_message] ( literal[string] , identifier[Color] . identifier[RED] )
keyword[return]
identifier[compose_modal] = identifier[ComposeModal] ( identifier[self] . identifier[stdscr] , identifier[default_cw] = literal[string] . identifier[join] ( identifier[status] [ literal[string] ]) keyword[or] keyword[None] , identifier[resize_callback] = identifier[self] . identifier[on_resize] )
identifier[content] , identifier[cw] = identifier[compose_modal] . identifier[loop] ()
identifier[self] . identifier[full_redraw] ()
keyword[if] identifier[content] keyword[is] keyword[None] :
keyword[return]
keyword[elif] identifier[len] ( identifier[content] )== literal[int] :
identifier[self] . identifier[footer] . identifier[draw_message] ( literal[string] , identifier[Color] . identifier[RED] )
keyword[return]
identifier[self] . identifier[footer] . identifier[draw_message] ( literal[string] , identifier[Color] . identifier[YELLOW] )
identifier[response] = identifier[api] . identifier[post_status] ( identifier[app] , identifier[user] , identifier[content] , identifier[spoiler_text] = identifier[cw] , identifier[sensitive] = identifier[cw] keyword[is] keyword[not] keyword[None] , identifier[in_reply_to_id] = identifier[status] [ literal[string] ])
identifier[status] = identifier[parse_status] ( identifier[response] )
identifier[self] . identifier[statuses] . identifier[insert] ( literal[int] , identifier[status] )
identifier[self] . identifier[selected] += literal[int]
identifier[self] . identifier[left] . identifier[draw_statuses] ( identifier[self] . identifier[statuses] , identifier[self] . identifier[selected] )
identifier[self] . identifier[footer] . identifier[draw_message] ( literal[string] , identifier[Color] . identifier[GREEN] )
|
def reply(self):
"""Reply to the selected status"""
status = self.get_selected_status()
(app, user) = (self.app, self.user)
if not app or not user:
self.footer.draw_message('You must be logged in to reply', Color.RED)
return # depends on [control=['if'], data=[]]
compose_modal = ComposeModal(self.stdscr, default_cw='\n'.join(status['spoiler_text']) or None, resize_callback=self.on_resize)
(content, cw) = compose_modal.loop()
self.full_redraw()
if content is None:
return # depends on [control=['if'], data=[]]
elif len(content) == 0:
self.footer.draw_message('Status must contain content', Color.RED)
return # depends on [control=['if'], data=[]]
self.footer.draw_message('Submitting reply...', Color.YELLOW)
response = api.post_status(app, user, content, spoiler_text=cw, sensitive=cw is not None, in_reply_to_id=status['id'])
status = parse_status(response)
self.statuses.insert(0, status)
self.selected += 1
self.left.draw_statuses(self.statuses, self.selected)
self.footer.draw_message('✓ Reply posted', Color.GREEN)
|
def moment(arr, moment=1, axis=0, **kwargs):
'''
Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0.
'''
return scipy.stats.moment(arr, moment=moment, axis=axis, **kwargs)
|
def function[moment, parameter[arr, moment, axis]]:
constant[
Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0.
]
return[call[name[scipy].stats.moment, parameter[name[arr]]]]
|
keyword[def] identifier[moment] ( identifier[arr] , identifier[moment] = literal[int] , identifier[axis] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[scipy] . identifier[stats] . identifier[moment] ( identifier[arr] , identifier[moment] = identifier[moment] , identifier[axis] = identifier[axis] ,** identifier[kwargs] )
|
def moment(arr, moment=1, axis=0, **kwargs):
"""
Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0.
"""
return scipy.stats.moment(arr, moment=moment, axis=axis, **kwargs)
|
def _make_verb_helper(verb_func, add_groups=False):
"""
Create function that prepares verb for the verb function
The functions created add expressions to be evaluated to
the verb, then call the core verb function
Parameters
----------
verb_func : function
Core verb function. This is the function called after
expressions created and added to the verb. The core
function should be one of those that implement verbs that
evaluate expressions.
add_groups : bool
If True, a groups attribute is added to the verb. The
groups are the columns created after evaluating the
expressions.
Returns
-------
out : function
A function that implements a helper verb.
"""
@wraps(verb_func)
def _verb_func(verb):
verb.expressions, new_columns = build_expressions(verb)
if add_groups:
verb.groups = new_columns
return verb_func(verb)
return _verb_func
|
def function[_make_verb_helper, parameter[verb_func, add_groups]]:
constant[
Create function that prepares verb for the verb function
The functions created add expressions to be evaluated to
the verb, then call the core verb function
Parameters
----------
verb_func : function
Core verb function. This is the function called after
expressions created and added to the verb. The core
function should be one of those that implement verbs that
evaluate expressions.
add_groups : bool
If True, a groups attribute is added to the verb. The
groups are the columns created after evaluating the
expressions.
Returns
-------
out : function
A function that implements a helper verb.
]
def function[_verb_func, parameter[verb]]:
<ast.Tuple object at 0x7da20c6ab5e0> assign[=] call[name[build_expressions], parameter[name[verb]]]
if name[add_groups] begin[:]
name[verb].groups assign[=] name[new_columns]
return[call[name[verb_func], parameter[name[verb]]]]
return[name[_verb_func]]
|
keyword[def] identifier[_make_verb_helper] ( identifier[verb_func] , identifier[add_groups] = keyword[False] ):
literal[string]
@ identifier[wraps] ( identifier[verb_func] )
keyword[def] identifier[_verb_func] ( identifier[verb] ):
identifier[verb] . identifier[expressions] , identifier[new_columns] = identifier[build_expressions] ( identifier[verb] )
keyword[if] identifier[add_groups] :
identifier[verb] . identifier[groups] = identifier[new_columns]
keyword[return] identifier[verb_func] ( identifier[verb] )
keyword[return] identifier[_verb_func]
|
def _make_verb_helper(verb_func, add_groups=False):
"""
Create function that prepares verb for the verb function
The functions created add expressions to be evaluated to
the verb, then call the core verb function
Parameters
----------
verb_func : function
Core verb function. This is the function called after
expressions created and added to the verb. The core
function should be one of those that implement verbs that
evaluate expressions.
add_groups : bool
If True, a groups attribute is added to the verb. The
groups are the columns created after evaluating the
expressions.
Returns
-------
out : function
A function that implements a helper verb.
"""
@wraps(verb_func)
def _verb_func(verb):
(verb.expressions, new_columns) = build_expressions(verb)
if add_groups:
verb.groups = new_columns # depends on [control=['if'], data=[]]
return verb_func(verb)
return _verb_func
|
def fstab(jail):
'''
Display contents of a fstab(5) file defined in specified
jail's configuration. If no file is defined, return False.
CLI Example:
.. code-block:: bash
salt '*' jail.fstab <jail name>
'''
ret = []
config = show_config(jail)
if 'fstab' in config:
c_fstab = config['fstab']
elif 'mount.fstab' in config:
c_fstab = config['mount.fstab']
if 'fstab' in config or 'mount.fstab' in config:
if os.access(c_fstab, os.R_OK):
with salt.utils.files.fopen(c_fstab, 'r') as _fp:
for line in _fp:
line = salt.utils.stringutils.to_unicode(line)
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
try:
device, mpoint, fstype, opts, dump, pas_ = line.split()
except ValueError:
# Gracefully continue on invalid lines
continue
ret.append({
'device': device,
'mountpoint': mpoint,
'fstype': fstype,
'options': opts,
'dump': dump,
'pass': pas_
})
if not ret:
ret = False
return ret
|
def function[fstab, parameter[jail]]:
constant[
Display contents of a fstab(5) file defined in specified
jail's configuration. If no file is defined, return False.
CLI Example:
.. code-block:: bash
salt '*' jail.fstab <jail name>
]
variable[ret] assign[=] list[[]]
variable[config] assign[=] call[name[show_config], parameter[name[jail]]]
if compare[constant[fstab] in name[config]] begin[:]
variable[c_fstab] assign[=] call[name[config]][constant[fstab]]
if <ast.BoolOp object at 0x7da18dc989d0> begin[:]
if call[name[os].access, parameter[name[c_fstab], name[os].R_OK]] begin[:]
with call[name[salt].utils.files.fopen, parameter[name[c_fstab], constant[r]]] begin[:]
for taget[name[line]] in starred[name[_fp]] begin[:]
variable[line] assign[=] call[name[salt].utils.stringutils.to_unicode, parameter[name[line]]]
variable[line] assign[=] call[name[line].strip, parameter[]]
if <ast.UnaryOp object at 0x7da18dc9a2c0> begin[:]
continue
if call[name[line].startswith, parameter[constant[#]]] begin[:]
continue
<ast.Try object at 0x7da18dc9aa70>
call[name[ret].append, parameter[dictionary[[<ast.Constant object at 0x7da18dc9a020>, <ast.Constant object at 0x7da18dc99720>, <ast.Constant object at 0x7da18dc9bd90>, <ast.Constant object at 0x7da18dc98e80>, <ast.Constant object at 0x7da2043459f0>, <ast.Constant object at 0x7da204345c90>], [<ast.Name object at 0x7da204347f70>, <ast.Name object at 0x7da204345660>, <ast.Name object at 0x7da204345390>, <ast.Name object at 0x7da2043444f0>, <ast.Name object at 0x7da204346ce0>, <ast.Name object at 0x7da204347730>]]]]
if <ast.UnaryOp object at 0x7da207f9ab00> begin[:]
variable[ret] assign[=] constant[False]
return[name[ret]]
|
keyword[def] identifier[fstab] ( identifier[jail] ):
literal[string]
identifier[ret] =[]
identifier[config] = identifier[show_config] ( identifier[jail] )
keyword[if] literal[string] keyword[in] identifier[config] :
identifier[c_fstab] = identifier[config] [ literal[string] ]
keyword[elif] literal[string] keyword[in] identifier[config] :
identifier[c_fstab] = identifier[config] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[config] keyword[or] literal[string] keyword[in] identifier[config] :
keyword[if] identifier[os] . identifier[access] ( identifier[c_fstab] , identifier[os] . identifier[R_OK] ):
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[c_fstab] , literal[string] ) keyword[as] identifier[_fp] :
keyword[for] identifier[line] keyword[in] identifier[_fp] :
identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] )
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] keyword[not] identifier[line] :
keyword[continue]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[try] :
identifier[device] , identifier[mpoint] , identifier[fstype] , identifier[opts] , identifier[dump] , identifier[pas_] = identifier[line] . identifier[split] ()
keyword[except] identifier[ValueError] :
keyword[continue]
identifier[ret] . identifier[append] ({
literal[string] : identifier[device] ,
literal[string] : identifier[mpoint] ,
literal[string] : identifier[fstype] ,
literal[string] : identifier[opts] ,
literal[string] : identifier[dump] ,
literal[string] : identifier[pas_]
})
keyword[if] keyword[not] identifier[ret] :
identifier[ret] = keyword[False]
keyword[return] identifier[ret]
|
def fstab(jail):
"""
Display contents of a fstab(5) file defined in specified
jail's configuration. If no file is defined, return False.
CLI Example:
.. code-block:: bash
salt '*' jail.fstab <jail name>
"""
ret = []
config = show_config(jail)
if 'fstab' in config:
c_fstab = config['fstab'] # depends on [control=['if'], data=['config']]
elif 'mount.fstab' in config:
c_fstab = config['mount.fstab'] # depends on [control=['if'], data=['config']]
if 'fstab' in config or 'mount.fstab' in config:
if os.access(c_fstab, os.R_OK):
with salt.utils.files.fopen(c_fstab, 'r') as _fp:
for line in _fp:
line = salt.utils.stringutils.to_unicode(line)
line = line.strip()
if not line:
continue # depends on [control=['if'], data=[]]
if line.startswith('#'):
continue # depends on [control=['if'], data=[]]
try:
(device, mpoint, fstype, opts, dump, pas_) = line.split() # depends on [control=['try'], data=[]]
except ValueError:
# Gracefully continue on invalid lines
continue # depends on [control=['except'], data=[]]
ret.append({'device': device, 'mountpoint': mpoint, 'fstype': fstype, 'options': opts, 'dump': dump, 'pass': pas_}) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['_fp']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not ret:
ret = False # depends on [control=['if'], data=[]]
return ret
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CallSummaryContext for this CallSummaryInstance
:rtype: twilio.rest.insights.v1.summary.CallSummaryContext
"""
if self._context is None:
self._context = CallSummaryContext(self._version, call_sid=self._solution['call_sid'], )
return self._context
|
def function[_proxy, parameter[self]]:
constant[
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CallSummaryContext for this CallSummaryInstance
:rtype: twilio.rest.insights.v1.summary.CallSummaryContext
]
if compare[name[self]._context is constant[None]] begin[:]
name[self]._context assign[=] call[name[CallSummaryContext], parameter[name[self]._version]]
return[name[self]._context]
|
keyword[def] identifier[_proxy] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_context] keyword[is] keyword[None] :
identifier[self] . identifier[_context] = identifier[CallSummaryContext] ( identifier[self] . identifier[_version] , identifier[call_sid] = identifier[self] . identifier[_solution] [ literal[string] ],)
keyword[return] identifier[self] . identifier[_context]
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CallSummaryContext for this CallSummaryInstance
:rtype: twilio.rest.insights.v1.summary.CallSummaryContext
"""
if self._context is None:
self._context = CallSummaryContext(self._version, call_sid=self._solution['call_sid']) # depends on [control=['if'], data=[]]
return self._context
|
def _add_submenu(self, parent, data):
"""Adds items in data as a submenu to parent"""
for item in data:
obj = item[0]
if obj == wx.Menu:
try:
__, menuname, submenu, menu_id = item
except ValueError:
__, menuname, submenu = item
menu_id = -1
menu = obj()
self._add_submenu(menu, submenu)
if parent == self:
self.menubar.Append(menu, menuname)
else:
parent.AppendMenu(menu_id, menuname, menu)
elif obj == wx.MenuItem:
try:
msgtype, shortcut, helptext, item_id = item[1]
except ValueError:
msgtype, shortcut, helptext = item[1]
item_id = wx.NewId()
try:
style = item[2]
except IndexError:
style = wx.ITEM_NORMAL
menuitem = obj(parent, item_id, shortcut, helptext, style)
self.shortcut2menuitem[shortcut] = menuitem
self.id2menuitem[item_id] = menuitem
parent.AppendItem(menuitem)
self.ids_msgs[item_id] = msgtype
self.parent.Bind(wx.EVT_MENU, self.OnMenu, id=item_id)
elif obj == "Separator":
parent.AppendSeparator()
else:
raise TypeError(_("Menu item unknown"))
|
def function[_add_submenu, parameter[self, parent, data]]:
constant[Adds items in data as a submenu to parent]
for taget[name[item]] in starred[name[data]] begin[:]
variable[obj] assign[=] call[name[item]][constant[0]]
if compare[name[obj] equal[==] name[wx].Menu] begin[:]
<ast.Try object at 0x7da1b1629780>
variable[menu] assign[=] call[name[obj], parameter[]]
call[name[self]._add_submenu, parameter[name[menu], name[submenu]]]
if compare[name[parent] equal[==] name[self]] begin[:]
call[name[self].menubar.Append, parameter[name[menu], name[menuname]]]
|
keyword[def] identifier[_add_submenu] ( identifier[self] , identifier[parent] , identifier[data] ):
literal[string]
keyword[for] identifier[item] keyword[in] identifier[data] :
identifier[obj] = identifier[item] [ literal[int] ]
keyword[if] identifier[obj] == identifier[wx] . identifier[Menu] :
keyword[try] :
identifier[__] , identifier[menuname] , identifier[submenu] , identifier[menu_id] = identifier[item]
keyword[except] identifier[ValueError] :
identifier[__] , identifier[menuname] , identifier[submenu] = identifier[item]
identifier[menu_id] =- literal[int]
identifier[menu] = identifier[obj] ()
identifier[self] . identifier[_add_submenu] ( identifier[menu] , identifier[submenu] )
keyword[if] identifier[parent] == identifier[self] :
identifier[self] . identifier[menubar] . identifier[Append] ( identifier[menu] , identifier[menuname] )
keyword[else] :
identifier[parent] . identifier[AppendMenu] ( identifier[menu_id] , identifier[menuname] , identifier[menu] )
keyword[elif] identifier[obj] == identifier[wx] . identifier[MenuItem] :
keyword[try] :
identifier[msgtype] , identifier[shortcut] , identifier[helptext] , identifier[item_id] = identifier[item] [ literal[int] ]
keyword[except] identifier[ValueError] :
identifier[msgtype] , identifier[shortcut] , identifier[helptext] = identifier[item] [ literal[int] ]
identifier[item_id] = identifier[wx] . identifier[NewId] ()
keyword[try] :
identifier[style] = identifier[item] [ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[style] = identifier[wx] . identifier[ITEM_NORMAL]
identifier[menuitem] = identifier[obj] ( identifier[parent] , identifier[item_id] , identifier[shortcut] , identifier[helptext] , identifier[style] )
identifier[self] . identifier[shortcut2menuitem] [ identifier[shortcut] ]= identifier[menuitem]
identifier[self] . identifier[id2menuitem] [ identifier[item_id] ]= identifier[menuitem]
identifier[parent] . identifier[AppendItem] ( identifier[menuitem] )
identifier[self] . identifier[ids_msgs] [ identifier[item_id] ]= identifier[msgtype]
identifier[self] . identifier[parent] . identifier[Bind] ( identifier[wx] . identifier[EVT_MENU] , identifier[self] . identifier[OnMenu] , identifier[id] = identifier[item_id] )
keyword[elif] identifier[obj] == literal[string] :
identifier[parent] . identifier[AppendSeparator] ()
keyword[else] :
keyword[raise] identifier[TypeError] ( identifier[_] ( literal[string] ))
|
def _add_submenu(self, parent, data):
"""Adds items in data as a submenu to parent"""
for item in data:
obj = item[0]
if obj == wx.Menu:
try:
(__, menuname, submenu, menu_id) = item # depends on [control=['try'], data=[]]
except ValueError:
(__, menuname, submenu) = item
menu_id = -1 # depends on [control=['except'], data=[]]
menu = obj()
self._add_submenu(menu, submenu)
if parent == self:
self.menubar.Append(menu, menuname) # depends on [control=['if'], data=['self']]
else:
parent.AppendMenu(menu_id, menuname, menu) # depends on [control=['if'], data=['obj']]
elif obj == wx.MenuItem:
try:
(msgtype, shortcut, helptext, item_id) = item[1] # depends on [control=['try'], data=[]]
except ValueError:
(msgtype, shortcut, helptext) = item[1]
item_id = wx.NewId() # depends on [control=['except'], data=[]]
try:
style = item[2] # depends on [control=['try'], data=[]]
except IndexError:
style = wx.ITEM_NORMAL # depends on [control=['except'], data=[]]
menuitem = obj(parent, item_id, shortcut, helptext, style)
self.shortcut2menuitem[shortcut] = menuitem
self.id2menuitem[item_id] = menuitem
parent.AppendItem(menuitem)
self.ids_msgs[item_id] = msgtype
self.parent.Bind(wx.EVT_MENU, self.OnMenu, id=item_id) # depends on [control=['if'], data=['obj']]
elif obj == 'Separator':
parent.AppendSeparator() # depends on [control=['if'], data=[]]
else:
raise TypeError(_('Menu item unknown')) # depends on [control=['for'], data=['item']]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.