code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def apply_K(df, k):
"""Apply the geometric factors to the dataset and compute (apparent)
resistivities/conductivities
"""
if 'k' not in df.columns:
df['k'] = k
if 'rho_a' not in df.columns:
df['rho_a'] = df['r'] * df['k']
if 'sigma_a' not in df.columns:
df['sigma_a'] = 1.0 / df['rho_a']
if 'Zt' in df.columns:
df['rho_a_complex'] = df['Zt'] * df['k']
return df | def function[apply_K, parameter[df, k]]:
constant[Apply the geometric factors to the dataset and compute (apparent)
resistivities/conductivities
]
if compare[constant[k] <ast.NotIn object at 0x7da2590d7190> name[df].columns] begin[:]
call[name[df]][constant[k]] assign[=] name[k]
if compare[constant[rho_a] <ast.NotIn object at 0x7da2590d7190> name[df].columns] begin[:]
call[name[df]][constant[rho_a]] assign[=] binary_operation[call[name[df]][constant[r]] * call[name[df]][constant[k]]]
if compare[constant[sigma_a] <ast.NotIn object at 0x7da2590d7190> name[df].columns] begin[:]
call[name[df]][constant[sigma_a]] assign[=] binary_operation[constant[1.0] / call[name[df]][constant[rho_a]]]
if compare[constant[Zt] in name[df].columns] begin[:]
call[name[df]][constant[rho_a_complex]] assign[=] binary_operation[call[name[df]][constant[Zt]] * call[name[df]][constant[k]]]
return[name[df]] | keyword[def] identifier[apply_K] ( identifier[df] , identifier[k] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[df] . identifier[columns] :
identifier[df] [ literal[string] ]= identifier[k]
keyword[if] literal[string] keyword[not] keyword[in] identifier[df] . identifier[columns] :
identifier[df] [ literal[string] ]= identifier[df] [ literal[string] ]* identifier[df] [ literal[string] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[df] . identifier[columns] :
identifier[df] [ literal[string] ]= literal[int] / identifier[df] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[df] . identifier[columns] :
identifier[df] [ literal[string] ]= identifier[df] [ literal[string] ]* identifier[df] [ literal[string] ]
keyword[return] identifier[df] | def apply_K(df, k):
"""Apply the geometric factors to the dataset and compute (apparent)
resistivities/conductivities
"""
if 'k' not in df.columns:
df['k'] = k # depends on [control=['if'], data=[]]
if 'rho_a' not in df.columns:
df['rho_a'] = df['r'] * df['k'] # depends on [control=['if'], data=[]]
if 'sigma_a' not in df.columns:
df['sigma_a'] = 1.0 / df['rho_a'] # depends on [control=['if'], data=[]]
if 'Zt' in df.columns:
df['rho_a_complex'] = df['Zt'] * df['k'] # depends on [control=['if'], data=[]]
return df |
def find_exts(top, exts, exclude_dirs=None, include_dirs=None,
match_mode="basename"):
"""
Find all files with the extension listed in `exts` that are located within
the directory tree rooted at `top` (including top itself, but excluding
'.' and '..')
Args:
top (str): Root directory
exts (str or list of strings): List of extensions.
exclude_dirs (str): Wildcards used to exclude particular directories.
Can be concatenated via `|`
include_dirs (str): Wildcards used to select particular directories.
`include_dirs` and `exclude_dirs` are mutually exclusive
match_mode (str): "basename" if match should be done on the basename.
"abspath" for absolute path.
Returns:
(list of str): Absolute paths of the files.
Examples::
# Find all pdf and ps files starting from the current directory.
find_exts(".", ("pdf", "ps"))
# Find all pdf files, exclude hidden directories and dirs whose name
# starts with `_`
find_exts(".", "pdf", exclude_dirs="_*|.*")
# Find all ps files, in the directories whose basename starts with
# output.
find_exts(".", "ps", include_dirs="output*"))
"""
from monty.string import list_strings
exts = list_strings(exts)
# Handle file!
if os.path.isfile(top):
return [os.path.abspath(top)] if any(top.endswith(ext)
for ext in exts) else []
# Build shell-style wildcards.
from monty.fnmatch import WildCard
if exclude_dirs is not None:
exclude_dirs = WildCard(exclude_dirs)
if include_dirs is not None:
include_dirs = WildCard(include_dirs)
mangle = dict(
basename=os.path.basename,
abspath=os.path.abspath)[match_mode]
# Assume directory
paths = []
for dirpath, dirnames, filenames in os.walk(top):
dirpath = os.path.abspath(dirpath)
if exclude_dirs and exclude_dirs.match(mangle(dirpath)):
continue
if include_dirs and not include_dirs.match(mangle(dirpath)):
continue
for filename in filenames:
if any(filename.endswith(ext) for ext in exts):
paths.append(os.path.join(dirpath, filename))
return paths | def function[find_exts, parameter[top, exts, exclude_dirs, include_dirs, match_mode]]:
constant[
Find all files with the extension listed in `exts` that are located within
the directory tree rooted at `top` (including top itself, but excluding
'.' and '..')
Args:
top (str): Root directory
exts (str or list of strings): List of extensions.
exclude_dirs (str): Wildcards used to exclude particular directories.
Can be concatenated via `|`
include_dirs (str): Wildcards used to select particular directories.
`include_dirs` and `exclude_dirs` are mutually exclusive
match_mode (str): "basename" if match should be done on the basename.
"abspath" for absolute path.
Returns:
(list of str): Absolute paths of the files.
Examples::
# Find all pdf and ps files starting from the current directory.
find_exts(".", ("pdf", "ps"))
# Find all pdf files, exclude hidden directories and dirs whose name
# starts with `_`
find_exts(".", "pdf", exclude_dirs="_*|.*")
# Find all ps files, in the directories whose basename starts with
# output.
find_exts(".", "ps", include_dirs="output*"))
]
from relative_module[monty.string] import module[list_strings]
variable[exts] assign[=] call[name[list_strings], parameter[name[exts]]]
if call[name[os].path.isfile, parameter[name[top]]] begin[:]
return[<ast.IfExp object at 0x7da1b13b78b0>]
from relative_module[monty.fnmatch] import module[WildCard]
if compare[name[exclude_dirs] is_not constant[None]] begin[:]
variable[exclude_dirs] assign[=] call[name[WildCard], parameter[name[exclude_dirs]]]
if compare[name[include_dirs] is_not constant[None]] begin[:]
variable[include_dirs] assign[=] call[name[WildCard], parameter[name[include_dirs]]]
variable[mangle] assign[=] call[call[name[dict], parameter[]]][name[match_mode]]
variable[paths] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1396e60>, <ast.Name object at 0x7da1b1395ea0>, <ast.Name object at 0x7da1b1394fa0>]]] in starred[call[name[os].walk, parameter[name[top]]]] begin[:]
variable[dirpath] assign[=] call[name[os].path.abspath, parameter[name[dirpath]]]
if <ast.BoolOp object at 0x7da1b1504490> begin[:]
continue
if <ast.BoolOp object at 0x7da1b1504370> begin[:]
continue
for taget[name[filename]] in starred[name[filenames]] begin[:]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b1507ca0>]] begin[:]
call[name[paths].append, parameter[call[name[os].path.join, parameter[name[dirpath], name[filename]]]]]
return[name[paths]] | keyword[def] identifier[find_exts] ( identifier[top] , identifier[exts] , identifier[exclude_dirs] = keyword[None] , identifier[include_dirs] = keyword[None] ,
identifier[match_mode] = literal[string] ):
literal[string]
keyword[from] identifier[monty] . identifier[string] keyword[import] identifier[list_strings]
identifier[exts] = identifier[list_strings] ( identifier[exts] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[top] ):
keyword[return] [ identifier[os] . identifier[path] . identifier[abspath] ( identifier[top] )] keyword[if] identifier[any] ( identifier[top] . identifier[endswith] ( identifier[ext] )
keyword[for] identifier[ext] keyword[in] identifier[exts] ) keyword[else] []
keyword[from] identifier[monty] . identifier[fnmatch] keyword[import] identifier[WildCard]
keyword[if] identifier[exclude_dirs] keyword[is] keyword[not] keyword[None] :
identifier[exclude_dirs] = identifier[WildCard] ( identifier[exclude_dirs] )
keyword[if] identifier[include_dirs] keyword[is] keyword[not] keyword[None] :
identifier[include_dirs] = identifier[WildCard] ( identifier[include_dirs] )
identifier[mangle] = identifier[dict] (
identifier[basename] = identifier[os] . identifier[path] . identifier[basename] ,
identifier[abspath] = identifier[os] . identifier[path] . identifier[abspath] )[ identifier[match_mode] ]
identifier[paths] =[]
keyword[for] identifier[dirpath] , identifier[dirnames] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[top] ):
identifier[dirpath] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[dirpath] )
keyword[if] identifier[exclude_dirs] keyword[and] identifier[exclude_dirs] . identifier[match] ( identifier[mangle] ( identifier[dirpath] )):
keyword[continue]
keyword[if] identifier[include_dirs] keyword[and] keyword[not] identifier[include_dirs] . identifier[match] ( identifier[mangle] ( identifier[dirpath] )):
keyword[continue]
keyword[for] identifier[filename] keyword[in] identifier[filenames] :
keyword[if] identifier[any] ( identifier[filename] . identifier[endswith] ( identifier[ext] ) keyword[for] identifier[ext] keyword[in] identifier[exts] ):
identifier[paths] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dirpath] , identifier[filename] ))
keyword[return] identifier[paths] | def find_exts(top, exts, exclude_dirs=None, include_dirs=None, match_mode='basename'):
"""
Find all files with the extension listed in `exts` that are located within
the directory tree rooted at `top` (including top itself, but excluding
'.' and '..')
Args:
top (str): Root directory
exts (str or list of strings): List of extensions.
exclude_dirs (str): Wildcards used to exclude particular directories.
Can be concatenated via `|`
include_dirs (str): Wildcards used to select particular directories.
`include_dirs` and `exclude_dirs` are mutually exclusive
match_mode (str): "basename" if match should be done on the basename.
"abspath" for absolute path.
Returns:
(list of str): Absolute paths of the files.
Examples::
# Find all pdf and ps files starting from the current directory.
find_exts(".", ("pdf", "ps"))
# Find all pdf files, exclude hidden directories and dirs whose name
# starts with `_`
find_exts(".", "pdf", exclude_dirs="_*|.*")
# Find all ps files, in the directories whose basename starts with
# output.
find_exts(".", "ps", include_dirs="output*"))
"""
from monty.string import list_strings
exts = list_strings(exts)
# Handle file!
if os.path.isfile(top):
return [os.path.abspath(top)] if any((top.endswith(ext) for ext in exts)) else [] # depends on [control=['if'], data=[]]
# Build shell-style wildcards.
from monty.fnmatch import WildCard
if exclude_dirs is not None:
exclude_dirs = WildCard(exclude_dirs) # depends on [control=['if'], data=['exclude_dirs']]
if include_dirs is not None:
include_dirs = WildCard(include_dirs) # depends on [control=['if'], data=['include_dirs']]
mangle = dict(basename=os.path.basename, abspath=os.path.abspath)[match_mode]
# Assume directory
paths = []
for (dirpath, dirnames, filenames) in os.walk(top):
dirpath = os.path.abspath(dirpath)
if exclude_dirs and exclude_dirs.match(mangle(dirpath)):
continue # depends on [control=['if'], data=[]]
if include_dirs and (not include_dirs.match(mangle(dirpath))):
continue # depends on [control=['if'], data=[]]
for filename in filenames:
if any((filename.endswith(ext) for ext in exts)):
paths.append(os.path.join(dirpath, filename)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=[]]
return paths |
def _get_ID2position_mapper(self, position_mapper):
'''
Defines a position parser that is used
to map between sample IDs and positions.
Parameters
--------------
{_bases_position_mapper}
TODO: Fix the name to work with more than 26 letters
of the alphabet.
'''
def num_parser(x, order):
i, j = unravel_index(int(x - 1), self.shape, order=order)
return (self.row_labels[i], self.col_labels[j])
if hasattr(position_mapper, '__call__'):
mapper = position_mapper
elif isinstance(position_mapper, collections.Mapping):
mapper = lambda x: position_mapper[x]
elif position_mapper == 'name':
mapper = lambda x: (x[0], int(x[1:]))
elif position_mapper in ('row_first_enumerator', 'number'):
mapper = lambda x: num_parser(x, 'F')
elif position_mapper == 'col_first_enumerator':
mapper = lambda x: num_parser(x, 'C')
else:
msg = '"{}" is not a known key_to_position_parser.'.format(position_mapper)
raise ValueError(msg)
return mapper | def function[_get_ID2position_mapper, parameter[self, position_mapper]]:
constant[
Defines a position parser that is used
to map between sample IDs and positions.
Parameters
--------------
{_bases_position_mapper}
TODO: Fix the name to work with more than 26 letters
of the alphabet.
]
def function[num_parser, parameter[x, order]]:
<ast.Tuple object at 0x7da20e9b3eb0> assign[=] call[name[unravel_index], parameter[call[name[int], parameter[binary_operation[name[x] - constant[1]]]], name[self].shape]]
return[tuple[[<ast.Subscript object at 0x7da20e9b0ac0>, <ast.Subscript object at 0x7da20e9b0f40>]]]
if call[name[hasattr], parameter[name[position_mapper], constant[__call__]]] begin[:]
variable[mapper] assign[=] name[position_mapper]
return[name[mapper]] | keyword[def] identifier[_get_ID2position_mapper] ( identifier[self] , identifier[position_mapper] ):
literal[string]
keyword[def] identifier[num_parser] ( identifier[x] , identifier[order] ):
identifier[i] , identifier[j] = identifier[unravel_index] ( identifier[int] ( identifier[x] - literal[int] ), identifier[self] . identifier[shape] , identifier[order] = identifier[order] )
keyword[return] ( identifier[self] . identifier[row_labels] [ identifier[i] ], identifier[self] . identifier[col_labels] [ identifier[j] ])
keyword[if] identifier[hasattr] ( identifier[position_mapper] , literal[string] ):
identifier[mapper] = identifier[position_mapper]
keyword[elif] identifier[isinstance] ( identifier[position_mapper] , identifier[collections] . identifier[Mapping] ):
identifier[mapper] = keyword[lambda] identifier[x] : identifier[position_mapper] [ identifier[x] ]
keyword[elif] identifier[position_mapper] == literal[string] :
identifier[mapper] = keyword[lambda] identifier[x] :( identifier[x] [ literal[int] ], identifier[int] ( identifier[x] [ literal[int] :]))
keyword[elif] identifier[position_mapper] keyword[in] ( literal[string] , literal[string] ):
identifier[mapper] = keyword[lambda] identifier[x] : identifier[num_parser] ( identifier[x] , literal[string] )
keyword[elif] identifier[position_mapper] == literal[string] :
identifier[mapper] = keyword[lambda] identifier[x] : identifier[num_parser] ( identifier[x] , literal[string] )
keyword[else] :
identifier[msg] = literal[string] . identifier[format] ( identifier[position_mapper] )
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[return] identifier[mapper] | def _get_ID2position_mapper(self, position_mapper):
"""
Defines a position parser that is used
to map between sample IDs and positions.
Parameters
--------------
{_bases_position_mapper}
TODO: Fix the name to work with more than 26 letters
of the alphabet.
"""
def num_parser(x, order):
(i, j) = unravel_index(int(x - 1), self.shape, order=order)
return (self.row_labels[i], self.col_labels[j])
if hasattr(position_mapper, '__call__'):
mapper = position_mapper # depends on [control=['if'], data=[]]
elif isinstance(position_mapper, collections.Mapping):
mapper = lambda x: position_mapper[x] # depends on [control=['if'], data=[]]
elif position_mapper == 'name':
mapper = lambda x: (x[0], int(x[1:])) # depends on [control=['if'], data=[]]
elif position_mapper in ('row_first_enumerator', 'number'):
mapper = lambda x: num_parser(x, 'F') # depends on [control=['if'], data=[]]
elif position_mapper == 'col_first_enumerator':
mapper = lambda x: num_parser(x, 'C') # depends on [control=['if'], data=[]]
else:
msg = '"{}" is not a known key_to_position_parser.'.format(position_mapper)
raise ValueError(msg)
return mapper |
def search(self, catalogue, ngrams, output_fh):
"""Returns `output_fh` populated with CSV results for each n-gram in
`ngrams` that occurs within labelled witnesses in `catalogue`.
If `ngrams` is empty, include all n-grams.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:param ngrams: n-grams to search for
:type ngrams: `list` of `str`
:param output_fh: object to write results to
:type output_fh: file-like object
:rtype: file-like object
"""
labels = list(self._set_labels(catalogue))
label_placeholders = self._get_placeholders(labels)
if ngrams:
self._add_temporary_ngrams(ngrams)
query = constants.SELECT_SEARCH_SQL.format(label_placeholders)
else:
query = constants.SELECT_SEARCH_ALL_SQL.format(label_placeholders)
self._logger.info('Running search query')
self._logger.debug('Query: {}\nN-grams: {}'.format(
query, ', '.join(ngrams)))
self._log_query_plan(query, labels)
cursor = self._conn.execute(query, labels)
return self._csv(cursor, constants.QUERY_FIELDNAMES, output_fh) | def function[search, parameter[self, catalogue, ngrams, output_fh]]:
constant[Returns `output_fh` populated with CSV results for each n-gram in
`ngrams` that occurs within labelled witnesses in `catalogue`.
If `ngrams` is empty, include all n-grams.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:param ngrams: n-grams to search for
:type ngrams: `list` of `str`
:param output_fh: object to write results to
:type output_fh: file-like object
:rtype: file-like object
]
variable[labels] assign[=] call[name[list], parameter[call[name[self]._set_labels, parameter[name[catalogue]]]]]
variable[label_placeholders] assign[=] call[name[self]._get_placeholders, parameter[name[labels]]]
if name[ngrams] begin[:]
call[name[self]._add_temporary_ngrams, parameter[name[ngrams]]]
variable[query] assign[=] call[name[constants].SELECT_SEARCH_SQL.format, parameter[name[label_placeholders]]]
call[name[self]._logger.info, parameter[constant[Running search query]]]
call[name[self]._logger.debug, parameter[call[constant[Query: {}
N-grams: {}].format, parameter[name[query], call[constant[, ].join, parameter[name[ngrams]]]]]]]
call[name[self]._log_query_plan, parameter[name[query], name[labels]]]
variable[cursor] assign[=] call[name[self]._conn.execute, parameter[name[query], name[labels]]]
return[call[name[self]._csv, parameter[name[cursor], name[constants].QUERY_FIELDNAMES, name[output_fh]]]] | keyword[def] identifier[search] ( identifier[self] , identifier[catalogue] , identifier[ngrams] , identifier[output_fh] ):
literal[string]
identifier[labels] = identifier[list] ( identifier[self] . identifier[_set_labels] ( identifier[catalogue] ))
identifier[label_placeholders] = identifier[self] . identifier[_get_placeholders] ( identifier[labels] )
keyword[if] identifier[ngrams] :
identifier[self] . identifier[_add_temporary_ngrams] ( identifier[ngrams] )
identifier[query] = identifier[constants] . identifier[SELECT_SEARCH_SQL] . identifier[format] ( identifier[label_placeholders] )
keyword[else] :
identifier[query] = identifier[constants] . identifier[SELECT_SEARCH_ALL_SQL] . identifier[format] ( identifier[label_placeholders] )
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[query] , literal[string] . identifier[join] ( identifier[ngrams] )))
identifier[self] . identifier[_log_query_plan] ( identifier[query] , identifier[labels] )
identifier[cursor] = identifier[self] . identifier[_conn] . identifier[execute] ( identifier[query] , identifier[labels] )
keyword[return] identifier[self] . identifier[_csv] ( identifier[cursor] , identifier[constants] . identifier[QUERY_FIELDNAMES] , identifier[output_fh] ) | def search(self, catalogue, ngrams, output_fh):
"""Returns `output_fh` populated with CSV results for each n-gram in
`ngrams` that occurs within labelled witnesses in `catalogue`.
If `ngrams` is empty, include all n-grams.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:param ngrams: n-grams to search for
:type ngrams: `list` of `str`
:param output_fh: object to write results to
:type output_fh: file-like object
:rtype: file-like object
"""
labels = list(self._set_labels(catalogue))
label_placeholders = self._get_placeholders(labels)
if ngrams:
self._add_temporary_ngrams(ngrams)
query = constants.SELECT_SEARCH_SQL.format(label_placeholders) # depends on [control=['if'], data=[]]
else:
query = constants.SELECT_SEARCH_ALL_SQL.format(label_placeholders)
self._logger.info('Running search query')
self._logger.debug('Query: {}\nN-grams: {}'.format(query, ', '.join(ngrams)))
self._log_query_plan(query, labels)
cursor = self._conn.execute(query, labels)
return self._csv(cursor, constants.QUERY_FIELDNAMES, output_fh) |
def get_requires():
"""Read requirements.txt."""
requirements = open("requirements.txt", "r").read()
return list(filter(lambda x: x != "", requirements.split())) | def function[get_requires, parameter[]]:
constant[Read requirements.txt.]
variable[requirements] assign[=] call[call[name[open], parameter[constant[requirements.txt], constant[r]]].read, parameter[]]
return[call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da1b16179a0>, call[name[requirements].split, parameter[]]]]]]] | keyword[def] identifier[get_requires] ():
literal[string]
identifier[requirements] = identifier[open] ( literal[string] , literal[string] ). identifier[read] ()
keyword[return] identifier[list] ( identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] != literal[string] , identifier[requirements] . identifier[split] ())) | def get_requires():
"""Read requirements.txt."""
requirements = open('requirements.txt', 'r').read()
return list(filter(lambda x: x != '', requirements.split())) |
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities in each associated CPD for each
state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
boolean: True if everything seems to be order. Otherwise raises error
according to the problem.
"""
for node in super(DynamicBayesianNetwork, self).nodes():
cpd = self.get_cpds(node=node)
if isinstance(cpd, TabularCPD):
evidence = cpd.variables[:0:-1]
evidence_card = cpd.cardinality[:0:-1]
parents = self.get_parents(node)
if set(evidence) != set(parents if parents else []):
raise ValueError("CPD associated with {node} doesn't have "
"proper parents associated with it.".format(node=node))
if not np.allclose(cpd.to_factor().marginalize([node], inplace=False).values.flatten('C'),
np.ones(np.product(evidence_card)),
atol=0.01):
raise ValueError('Sum of probabilities of states for node {node}'
' is not equal to 1'.format(node=node))
return True | def function[check_model, parameter[self]]:
constant[
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities in each associated CPD for each
state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
boolean: True if everything seems to be order. Otherwise raises error
according to the problem.
]
for taget[name[node]] in starred[call[call[name[super], parameter[name[DynamicBayesianNetwork], name[self]]].nodes, parameter[]]] begin[:]
variable[cpd] assign[=] call[name[self].get_cpds, parameter[]]
if call[name[isinstance], parameter[name[cpd], name[TabularCPD]]] begin[:]
variable[evidence] assign[=] call[name[cpd].variables][<ast.Slice object at 0x7da20c6abb50>]
variable[evidence_card] assign[=] call[name[cpd].cardinality][<ast.Slice object at 0x7da20c6ab250>]
variable[parents] assign[=] call[name[self].get_parents, parameter[name[node]]]
if compare[call[name[set], parameter[name[evidence]]] not_equal[!=] call[name[set], parameter[<ast.IfExp object at 0x7da20c6ab490>]]] begin[:]
<ast.Raise object at 0x7da20c6a9960>
if <ast.UnaryOp object at 0x7da20c6aaf20> begin[:]
<ast.Raise object at 0x7da20c6aa500>
return[constant[True]] | keyword[def] identifier[check_model] ( identifier[self] ):
literal[string]
keyword[for] identifier[node] keyword[in] identifier[super] ( identifier[DynamicBayesianNetwork] , identifier[self] ). identifier[nodes] ():
identifier[cpd] = identifier[self] . identifier[get_cpds] ( identifier[node] = identifier[node] )
keyword[if] identifier[isinstance] ( identifier[cpd] , identifier[TabularCPD] ):
identifier[evidence] = identifier[cpd] . identifier[variables] [: literal[int] :- literal[int] ]
identifier[evidence_card] = identifier[cpd] . identifier[cardinality] [: literal[int] :- literal[int] ]
identifier[parents] = identifier[self] . identifier[get_parents] ( identifier[node] )
keyword[if] identifier[set] ( identifier[evidence] )!= identifier[set] ( identifier[parents] keyword[if] identifier[parents] keyword[else] []):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[node] = identifier[node] ))
keyword[if] keyword[not] identifier[np] . identifier[allclose] ( identifier[cpd] . identifier[to_factor] (). identifier[marginalize] ([ identifier[node] ], identifier[inplace] = keyword[False] ). identifier[values] . identifier[flatten] ( literal[string] ),
identifier[np] . identifier[ones] ( identifier[np] . identifier[product] ( identifier[evidence_card] )),
identifier[atol] = literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[node] = identifier[node] ))
keyword[return] keyword[True] | def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities in each associated CPD for each
state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
boolean: True if everything seems to be order. Otherwise raises error
according to the problem.
"""
for node in super(DynamicBayesianNetwork, self).nodes():
cpd = self.get_cpds(node=node)
if isinstance(cpd, TabularCPD):
evidence = cpd.variables[:0:-1]
evidence_card = cpd.cardinality[:0:-1]
parents = self.get_parents(node)
if set(evidence) != set(parents if parents else []):
raise ValueError("CPD associated with {node} doesn't have proper parents associated with it.".format(node=node)) # depends on [control=['if'], data=[]]
if not np.allclose(cpd.to_factor().marginalize([node], inplace=False).values.flatten('C'), np.ones(np.product(evidence_card)), atol=0.01):
raise ValueError('Sum of probabilities of states for node {node} is not equal to 1'.format(node=node)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
return True |
def get_month_from_date_str(date_str, lang=DEFAULT_DATE_LANG):
"""Find the month name for the given locale, in the given string.
Returns a tuple ``(number_of_month, abbr_name)``.
"""
date_str = date_str.lower()
with calendar.different_locale(LOCALES[lang]):
month_abbrs = list(calendar.month_abbr)
for seq, abbr in enumerate(month_abbrs):
if abbr and abbr.lower() in date_str:
return seq, abbr
return () | def function[get_month_from_date_str, parameter[date_str, lang]]:
constant[Find the month name for the given locale, in the given string.
Returns a tuple ``(number_of_month, abbr_name)``.
]
variable[date_str] assign[=] call[name[date_str].lower, parameter[]]
with call[name[calendar].different_locale, parameter[call[name[LOCALES]][name[lang]]]] begin[:]
variable[month_abbrs] assign[=] call[name[list], parameter[name[calendar].month_abbr]]
for taget[tuple[[<ast.Name object at 0x7da18dc99240>, <ast.Name object at 0x7da18dc982b0>]]] in starred[call[name[enumerate], parameter[name[month_abbrs]]]] begin[:]
if <ast.BoolOp object at 0x7da18dc9bf40> begin[:]
return[tuple[[<ast.Name object at 0x7da18dc9ae00>, <ast.Name object at 0x7da18dc99120>]]]
return[tuple[[]]] | keyword[def] identifier[get_month_from_date_str] ( identifier[date_str] , identifier[lang] = identifier[DEFAULT_DATE_LANG] ):
literal[string]
identifier[date_str] = identifier[date_str] . identifier[lower] ()
keyword[with] identifier[calendar] . identifier[different_locale] ( identifier[LOCALES] [ identifier[lang] ]):
identifier[month_abbrs] = identifier[list] ( identifier[calendar] . identifier[month_abbr] )
keyword[for] identifier[seq] , identifier[abbr] keyword[in] identifier[enumerate] ( identifier[month_abbrs] ):
keyword[if] identifier[abbr] keyword[and] identifier[abbr] . identifier[lower] () keyword[in] identifier[date_str] :
keyword[return] identifier[seq] , identifier[abbr]
keyword[return] () | def get_month_from_date_str(date_str, lang=DEFAULT_DATE_LANG):
"""Find the month name for the given locale, in the given string.
Returns a tuple ``(number_of_month, abbr_name)``.
"""
date_str = date_str.lower()
with calendar.different_locale(LOCALES[lang]):
month_abbrs = list(calendar.month_abbr)
for (seq, abbr) in enumerate(month_abbrs):
if abbr and abbr.lower() in date_str:
return (seq, abbr) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]]
return () |
def _varargs_labels_as_list(label_list):
"""Return a list of labels for a list of labels or singleton list of list
of labels."""
if len(label_list) == 0:
return []
elif not _is_non_string_iterable(label_list[0]):
# Assume everything is a label. If not, it'll be caught later.
return label_list
elif len(label_list) == 1:
return label_list[0]
else:
raise ValueError("Labels {} contain more than list.".format(label_list),
"Pass just one list of labels.") | def function[_varargs_labels_as_list, parameter[label_list]]:
constant[Return a list of labels for a list of labels or singleton list of list
of labels.]
if compare[call[name[len], parameter[name[label_list]]] equal[==] constant[0]] begin[:]
return[list[[]]] | keyword[def] identifier[_varargs_labels_as_list] ( identifier[label_list] ):
literal[string]
keyword[if] identifier[len] ( identifier[label_list] )== literal[int] :
keyword[return] []
keyword[elif] keyword[not] identifier[_is_non_string_iterable] ( identifier[label_list] [ literal[int] ]):
keyword[return] identifier[label_list]
keyword[elif] identifier[len] ( identifier[label_list] )== literal[int] :
keyword[return] identifier[label_list] [ literal[int] ]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[label_list] ),
literal[string] ) | def _varargs_labels_as_list(label_list):
"""Return a list of labels for a list of labels or singleton list of list
of labels."""
if len(label_list) == 0:
return [] # depends on [control=['if'], data=[]]
elif not _is_non_string_iterable(label_list[0]):
# Assume everything is a label. If not, it'll be caught later.
return label_list # depends on [control=['if'], data=[]]
elif len(label_list) == 1:
return label_list[0] # depends on [control=['if'], data=[]]
else:
raise ValueError('Labels {} contain more than list.'.format(label_list), 'Pass just one list of labels.') |
def find(C, node, path, namespaces=None, extensions=None, smart_strings=True, **args):
"""use Element.xpath() rather than Element.find() in order to normalize the interface"""
xp = node.xpath(
path,
namespaces=namespaces or C.NS,
extensions=extensions,
smart_strings=smart_strings,
**args
)
if len(xp) > 0:
return xp[0] | def function[find, parameter[C, node, path, namespaces, extensions, smart_strings]]:
constant[use Element.xpath() rather than Element.find() in order to normalize the interface]
variable[xp] assign[=] call[name[node].xpath, parameter[name[path]]]
if compare[call[name[len], parameter[name[xp]]] greater[>] constant[0]] begin[:]
return[call[name[xp]][constant[0]]] | keyword[def] identifier[find] ( identifier[C] , identifier[node] , identifier[path] , identifier[namespaces] = keyword[None] , identifier[extensions] = keyword[None] , identifier[smart_strings] = keyword[True] ,** identifier[args] ):
literal[string]
identifier[xp] = identifier[node] . identifier[xpath] (
identifier[path] ,
identifier[namespaces] = identifier[namespaces] keyword[or] identifier[C] . identifier[NS] ,
identifier[extensions] = identifier[extensions] ,
identifier[smart_strings] = identifier[smart_strings] ,
** identifier[args]
)
keyword[if] identifier[len] ( identifier[xp] )> literal[int] :
keyword[return] identifier[xp] [ literal[int] ] | def find(C, node, path, namespaces=None, extensions=None, smart_strings=True, **args):
"""use Element.xpath() rather than Element.find() in order to normalize the interface"""
xp = node.xpath(path, namespaces=namespaces or C.NS, extensions=extensions, smart_strings=smart_strings, **args)
if len(xp) > 0:
return xp[0] # depends on [control=['if'], data=[]] |
def populate(cls, as_of=None):
"""Ensure the next X years of billing cycles exist
"""
return cls._populate(as_of=as_of or date.today(), delete=True) | def function[populate, parameter[cls, as_of]]:
constant[Ensure the next X years of billing cycles exist
]
return[call[name[cls]._populate, parameter[]]] | keyword[def] identifier[populate] ( identifier[cls] , identifier[as_of] = keyword[None] ):
literal[string]
keyword[return] identifier[cls] . identifier[_populate] ( identifier[as_of] = identifier[as_of] keyword[or] identifier[date] . identifier[today] (), identifier[delete] = keyword[True] ) | def populate(cls, as_of=None):
"""Ensure the next X years of billing cycles exist
"""
return cls._populate(as_of=as_of or date.today(), delete=True) |
def OnPadIntCtrl(self, event):
"""Pad IntCtrl event handler"""
self.attrs["pad"] = event.GetValue()
post_command_event(self, self.DrawChartMsg) | def function[OnPadIntCtrl, parameter[self, event]]:
constant[Pad IntCtrl event handler]
call[name[self].attrs][constant[pad]] assign[=] call[name[event].GetValue, parameter[]]
call[name[post_command_event], parameter[name[self], name[self].DrawChartMsg]] | keyword[def] identifier[OnPadIntCtrl] ( identifier[self] , identifier[event] ):
literal[string]
identifier[self] . identifier[attrs] [ literal[string] ]= identifier[event] . identifier[GetValue] ()
identifier[post_command_event] ( identifier[self] , identifier[self] . identifier[DrawChartMsg] ) | def OnPadIntCtrl(self, event):
"""Pad IntCtrl event handler"""
self.attrs['pad'] = event.GetValue()
post_command_event(self, self.DrawChartMsg) |
def update_from_dict(self, dict_values):
"""
Updates TDigest object with dictionary values.
The digest delta and K values are optional if you would like to update them,
but the n value is not required because it is computed from the centroid weights.
For example, you can initalize a new TDigest:
digest = TDigest()
Then load dictionary values into the digest:
digest.update_from_dict({'K': 25, 'delta': 0.01, 'centroids': [{'c': 1.0, 'm': 1.0}, {'c': 1.0, 'm': 2.0}, {'c': 1.0, 'm': 3.0}]})
Or update an existing digest where the centroids will be appropriately merged:
digest = TDigest()
digest.update(1)
digest.update(2)
digest.update(3)
digest.update_from_dict({'K': 25, 'delta': 0.01, 'centroids': [{'c': 1.0, 'm': 1.0}, {'c': 1.0, 'm': 2.0}, {'c': 1.0, 'm': 3.0}]})
Resulting in the digest having merged similar centroids by increasing their weight:
{'K': 25, 'delta': 0.01, 'centroids': [{'c': 2.0, 'm': 1.0}, {'c': 2.0, 'm': 2.0}, {'c': 2.0, 'm': 3.0}], 'n': 6.0}
Alternative you can provide only a list of centroid values with update_centroids_from_list()
"""
self.delta = dict_values.get('delta', self.delta)
self.K = dict_values.get('K', self.K)
self.update_centroids_from_list(dict_values['centroids'])
return self | def function[update_from_dict, parameter[self, dict_values]]:
constant[
Updates TDigest object with dictionary values.
The digest delta and K values are optional if you would like to update them,
but the n value is not required because it is computed from the centroid weights.
For example, you can initalize a new TDigest:
digest = TDigest()
Then load dictionary values into the digest:
digest.update_from_dict({'K': 25, 'delta': 0.01, 'centroids': [{'c': 1.0, 'm': 1.0}, {'c': 1.0, 'm': 2.0}, {'c': 1.0, 'm': 3.0}]})
Or update an existing digest where the centroids will be appropriately merged:
digest = TDigest()
digest.update(1)
digest.update(2)
digest.update(3)
digest.update_from_dict({'K': 25, 'delta': 0.01, 'centroids': [{'c': 1.0, 'm': 1.0}, {'c': 1.0, 'm': 2.0}, {'c': 1.0, 'm': 3.0}]})
Resulting in the digest having merged similar centroids by increasing their weight:
{'K': 25, 'delta': 0.01, 'centroids': [{'c': 2.0, 'm': 1.0}, {'c': 2.0, 'm': 2.0}, {'c': 2.0, 'm': 3.0}], 'n': 6.0}
Alternative you can provide only a list of centroid values with update_centroids_from_list()
]
name[self].delta assign[=] call[name[dict_values].get, parameter[constant[delta], name[self].delta]]
name[self].K assign[=] call[name[dict_values].get, parameter[constant[K], name[self].K]]
call[name[self].update_centroids_from_list, parameter[call[name[dict_values]][constant[centroids]]]]
return[name[self]] | keyword[def] identifier[update_from_dict] ( identifier[self] , identifier[dict_values] ):
literal[string]
identifier[self] . identifier[delta] = identifier[dict_values] . identifier[get] ( literal[string] , identifier[self] . identifier[delta] )
identifier[self] . identifier[K] = identifier[dict_values] . identifier[get] ( literal[string] , identifier[self] . identifier[K] )
identifier[self] . identifier[update_centroids_from_list] ( identifier[dict_values] [ literal[string] ])
keyword[return] identifier[self] | def update_from_dict(self, dict_values):
"""
Updates TDigest object with dictionary values.
The digest delta and K values are optional if you would like to update them,
but the n value is not required because it is computed from the centroid weights.
For example, you can initalize a new TDigest:
digest = TDigest()
Then load dictionary values into the digest:
digest.update_from_dict({'K': 25, 'delta': 0.01, 'centroids': [{'c': 1.0, 'm': 1.0}, {'c': 1.0, 'm': 2.0}, {'c': 1.0, 'm': 3.0}]})
Or update an existing digest where the centroids will be appropriately merged:
digest = TDigest()
digest.update(1)
digest.update(2)
digest.update(3)
digest.update_from_dict({'K': 25, 'delta': 0.01, 'centroids': [{'c': 1.0, 'm': 1.0}, {'c': 1.0, 'm': 2.0}, {'c': 1.0, 'm': 3.0}]})
Resulting in the digest having merged similar centroids by increasing their weight:
{'K': 25, 'delta': 0.01, 'centroids': [{'c': 2.0, 'm': 1.0}, {'c': 2.0, 'm': 2.0}, {'c': 2.0, 'm': 3.0}], 'n': 6.0}
Alternative you can provide only a list of centroid values with update_centroids_from_list()
"""
self.delta = dict_values.get('delta', self.delta)
self.K = dict_values.get('K', self.K)
self.update_centroids_from_list(dict_values['centroids'])
return self |
def listBlocks(self, **kwargs):
"""
API to list a block in DBS. At least one of the parameters block_name, dataset, data_tier_name or
logical_file_name are required. If data_tier_name is provided, min_cdate and max_cdate have to be specified and
the difference in time have to be less than 31 days.
:param block_name: name of the block
:type block_name: str
:param dataset: dataset
:type dataset: str
:param data_tier_name: data tier
:type data_tier_name: str
:param logical_file_name: Logical File Name
:type logical_file_name: str
:param origin_site_name: Origin Site Name (Optional)
:type origin_site_name: str
:param run_num: run numbers (Optional). Possible format: run_num, "run_min-run_max", or ["run_min-run_max", run1, run2, ...]
:type run_num: int, list of runs or list of run ranges
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: Get detailed information of a block (Optional)
:type detail: bool
:returns: List of dictionaries containing following keys (block_name). If option detail is used the dictionaries contain the following keys (block_id, create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, dataset_id and block_size)
:rtype: list of dicts
"""
validParameters = ['dataset', 'block_name', 'data_tier_name', 'origin_site_name',
'logical_file_name', 'run_num', 'open_for_writing', 'min_cdate',
'max_cdate', 'min_ldate', 'max_ldate',
'cdate', 'ldate', 'detail']
#requiredParameters = {'multiple': validParameters}
requiredParameters = {'multiple': ['dataset', 'block_name', 'data_tier_name', 'logical_file_name']}
#set defaults
if 'detail' not in kwargs.keys():
kwargs['detail'] = False
checkInputParameter(method="listBlocks", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
return self.__callServer("blocks", params=kwargs) | def function[listBlocks, parameter[self]]:
constant[
API to list a block in DBS. At least one of the parameters block_name, dataset, data_tier_name or
logical_file_name are required. If data_tier_name is provided, min_cdate and max_cdate have to be specified and
the difference in time have to be less than 31 days.
:param block_name: name of the block
:type block_name: str
:param dataset: dataset
:type dataset: str
:param data_tier_name: data tier
:type data_tier_name: str
:param logical_file_name: Logical File Name
:type logical_file_name: str
:param origin_site_name: Origin Site Name (Optional)
:type origin_site_name: str
:param run_num: run numbers (Optional). Possible format: run_num, "run_min-run_max", or ["run_min-run_max", run1, run2, ...]
:type run_num: int, list of runs or list of run ranges
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: Get detailed information of a block (Optional)
:type detail: bool
:returns: List of dictionaries containing following keys (block_name). If option detail is used the dictionaries contain the following keys (block_id, create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, dataset_id and block_size)
:rtype: list of dicts
]
variable[validParameters] assign[=] list[[<ast.Constant object at 0x7da20e9b2c80>, <ast.Constant object at 0x7da20e9b08b0>, <ast.Constant object at 0x7da20e9b1210>, <ast.Constant object at 0x7da20e9b1420>, <ast.Constant object at 0x7da20e9b1150>, <ast.Constant object at 0x7da20e9b26e0>, <ast.Constant object at 0x7da20e9b0520>, <ast.Constant object at 0x7da20e9b22f0>, <ast.Constant object at 0x7da20e9b0970>, <ast.Constant object at 0x7da20e9b0640>, <ast.Constant object at 0x7da20e9b3160>, <ast.Constant object at 0x7da20e9b2bc0>, <ast.Constant object at 0x7da20e9b00a0>, <ast.Constant object at 0x7da20e9b1ba0>]]
variable[requiredParameters] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b2d70>], [<ast.List object at 0x7da20e9b3b50>]]
if compare[constant[detail] <ast.NotIn object at 0x7da2590d7190> call[name[kwargs].keys, parameter[]]] begin[:]
call[name[kwargs]][constant[detail]] assign[=] constant[False]
call[name[checkInputParameter], parameter[]]
return[call[name[self].__callServer, parameter[constant[blocks]]]] | keyword[def] identifier[listBlocks] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[validParameters] =[ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ]
identifier[requiredParameters] ={ literal[string] :[ literal[string] , literal[string] , literal[string] , literal[string] ]}
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] . identifier[keys] ():
identifier[kwargs] [ literal[string] ]= keyword[False]
identifier[checkInputParameter] ( identifier[method] = literal[string] , identifier[parameters] = identifier[kwargs] . identifier[keys] (), identifier[validParameters] = identifier[validParameters] ,
identifier[requiredParameters] = identifier[requiredParameters] )
keyword[return] identifier[self] . identifier[__callServer] ( literal[string] , identifier[params] = identifier[kwargs] ) | def listBlocks(self, **kwargs):
"""
API to list a block in DBS. At least one of the parameters block_name, dataset, data_tier_name or
logical_file_name are required. If data_tier_name is provided, min_cdate and max_cdate have to be specified and
the difference in time have to be less than 31 days.
:param block_name: name of the block
:type block_name: str
:param dataset: dataset
:type dataset: str
:param data_tier_name: data tier
:type data_tier_name: str
:param logical_file_name: Logical File Name
:type logical_file_name: str
:param origin_site_name: Origin Site Name (Optional)
:type origin_site_name: str
:param run_num: run numbers (Optional). Possible format: run_num, "run_min-run_max", or ["run_min-run_max", run1, run2, ...]
:type run_num: int, list of runs or list of run ranges
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: Get detailed information of a block (Optional)
:type detail: bool
:returns: List of dictionaries containing following keys (block_name). If option detail is used the dictionaries contain the following keys (block_id, create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, dataset_id and block_size)
:rtype: list of dicts
"""
validParameters = ['dataset', 'block_name', 'data_tier_name', 'origin_site_name', 'logical_file_name', 'run_num', 'open_for_writing', 'min_cdate', 'max_cdate', 'min_ldate', 'max_ldate', 'cdate', 'ldate', 'detail']
#requiredParameters = {'multiple': validParameters}
requiredParameters = {'multiple': ['dataset', 'block_name', 'data_tier_name', 'logical_file_name']}
#set defaults
if 'detail' not in kwargs.keys():
kwargs['detail'] = False # depends on [control=['if'], data=[]]
checkInputParameter(method='listBlocks', parameters=kwargs.keys(), validParameters=validParameters, requiredParameters=requiredParameters)
return self.__callServer('blocks', params=kwargs) |
def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False, super_plot=False):
"""
Gradient checker that just checks each hessian individually
super_plot will plot the hessian wrt every parameter, plot will just do the first one
"""
try:
import numdifftools as nd
except:
raise ImportError("Don't have numdifftools package installed, it is not a GPy dependency as of yet, it is only used for hessian tests")
if target_param:
raise NotImplementedError('Only basic functionality is provided with this gradchecker')
#Repeat for each parameter, not the nicest but shouldn't be many cases where there are many
#variables
current_index = 0
for name, n_shape in zip(self.names, self.shapes):
current_size = numpy.prod(n_shape)
x = self.optimizer_array.copy()
#x = self._get_params_transformed().copy()
x = x[current_index:current_index + current_size].reshape(n_shape)
# Check gradients
#Actually the third derivative
analytic_hess = self._ddf(x)
#Can only calculate jacobian for one variable at a time
#From the docs:
#x0 : vector location
#at which to differentiate fun
#If x0 is an N x M array, then fun is assumed to be a function
#of N*M variables., thus we must have it flat, not (N,1), but just (N,)
#numeric_hess_partial = nd.Hessian(self._f, vectorized=False)
#Actually _df is already the hessian
numeric_hess_partial = nd.Jacobian(self._df, vectorized=True)
numeric_hess = numeric_hess_partial(x)
print("Done making numerical hessian")
if analytic_hess.dtype is np.dtype('object'):
#Blockify numeric_hess aswell
blocksizes, pagesizes = get_block_shapes_3d(analytic_hess)
#HACK
real_block_size = np.sum(blocksizes)
numeric_hess = numeric_hess.reshape(real_block_size, real_block_size, pagesizes)
#numeric_hess = get_blocks_3d(numeric_hess, blocksizes)#, pagesizes)
else:
numeric_hess = numeric_hess.reshape(*analytic_hess.shape)
#Check every block individually (for ease)
check_passed = [False]*numeric_hess.shape[2]
for block_ind in range(numeric_hess.shape[2]):
#Unless super_plot is set, just plot the first one
p = True if (plot and block_ind == numeric_hess.shape[2]-1) or super_plot else False
if verbose:
print("Checking derivative of hessian wrt parameter number {}".format(block_ind))
check_passed[block_ind] = self.checkgrad_block(analytic_hess[:,:,block_ind], numeric_hess[:,:,block_ind], verbose=verbose, step=step, tolerance=tolerance, block_indices=block_indices, plot=p)
current_index += current_size
return np.all(check_passed) | def function[checkgrad, parameter[self, target_param, verbose, step, tolerance, block_indices, plot, super_plot]]:
constant[
Gradient checker that just checks each hessian individually
super_plot will plot the hessian wrt every parameter, plot will just do the first one
]
<ast.Try object at 0x7da1b1cc0850>
if name[target_param] begin[:]
<ast.Raise object at 0x7da18ede6710>
variable[current_index] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da18ede4a90>, <ast.Name object at 0x7da18ede5120>]]] in starred[call[name[zip], parameter[name[self].names, name[self].shapes]]] begin[:]
variable[current_size] assign[=] call[name[numpy].prod, parameter[name[n_shape]]]
variable[x] assign[=] call[name[self].optimizer_array.copy, parameter[]]
variable[x] assign[=] call[call[name[x]][<ast.Slice object at 0x7da18ede5090>].reshape, parameter[name[n_shape]]]
variable[analytic_hess] assign[=] call[name[self]._ddf, parameter[name[x]]]
variable[numeric_hess_partial] assign[=] call[name[nd].Jacobian, parameter[name[self]._df]]
variable[numeric_hess] assign[=] call[name[numeric_hess_partial], parameter[name[x]]]
call[name[print], parameter[constant[Done making numerical hessian]]]
if compare[name[analytic_hess].dtype is call[name[np].dtype, parameter[constant[object]]]] begin[:]
<ast.Tuple object at 0x7da18ede69b0> assign[=] call[name[get_block_shapes_3d], parameter[name[analytic_hess]]]
variable[real_block_size] assign[=] call[name[np].sum, parameter[name[blocksizes]]]
variable[numeric_hess] assign[=] call[name[numeric_hess].reshape, parameter[name[real_block_size], name[real_block_size], name[pagesizes]]]
variable[check_passed] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18ede4970>]] * call[name[numeric_hess].shape][constant[2]]]
for taget[name[block_ind]] in starred[call[name[range], parameter[call[name[numeric_hess].shape][constant[2]]]]] begin[:]
variable[p] assign[=] <ast.IfExp object at 0x7da18ede4940>
if name[verbose] begin[:]
call[name[print], parameter[call[constant[Checking derivative of hessian wrt parameter number {}].format, parameter[name[block_ind]]]]]
call[name[check_passed]][name[block_ind]] assign[=] call[name[self].checkgrad_block, parameter[call[name[analytic_hess]][tuple[[<ast.Slice object at 0x7da18dc9a050>, <ast.Slice object at 0x7da18dc98fa0>, <ast.Name object at 0x7da18dc9acb0>]]], call[name[numeric_hess]][tuple[[<ast.Slice object at 0x7da18dc98ca0>, <ast.Slice object at 0x7da18dc9b4c0>, <ast.Name object at 0x7da18dc9beb0>]]]]]
<ast.AugAssign object at 0x7da18dc9b8b0>
return[call[name[np].all, parameter[name[check_passed]]]] | keyword[def] identifier[checkgrad] ( identifier[self] , identifier[target_param] = keyword[None] , identifier[verbose] = keyword[False] , identifier[step] = literal[int] , identifier[tolerance] = literal[int] , identifier[block_indices] = keyword[None] , identifier[plot] = keyword[False] , identifier[super_plot] = keyword[False] ):
literal[string]
keyword[try] :
keyword[import] identifier[numdifftools] keyword[as] identifier[nd]
keyword[except] :
keyword[raise] identifier[ImportError] ( literal[string] )
keyword[if] identifier[target_param] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[current_index] = literal[int]
keyword[for] identifier[name] , identifier[n_shape] keyword[in] identifier[zip] ( identifier[self] . identifier[names] , identifier[self] . identifier[shapes] ):
identifier[current_size] = identifier[numpy] . identifier[prod] ( identifier[n_shape] )
identifier[x] = identifier[self] . identifier[optimizer_array] . identifier[copy] ()
identifier[x] = identifier[x] [ identifier[current_index] : identifier[current_index] + identifier[current_size] ]. identifier[reshape] ( identifier[n_shape] )
identifier[analytic_hess] = identifier[self] . identifier[_ddf] ( identifier[x] )
identifier[numeric_hess_partial] = identifier[nd] . identifier[Jacobian] ( identifier[self] . identifier[_df] , identifier[vectorized] = keyword[True] )
identifier[numeric_hess] = identifier[numeric_hess_partial] ( identifier[x] )
identifier[print] ( literal[string] )
keyword[if] identifier[analytic_hess] . identifier[dtype] keyword[is] identifier[np] . identifier[dtype] ( literal[string] ):
identifier[blocksizes] , identifier[pagesizes] = identifier[get_block_shapes_3d] ( identifier[analytic_hess] )
identifier[real_block_size] = identifier[np] . identifier[sum] ( identifier[blocksizes] )
identifier[numeric_hess] = identifier[numeric_hess] . identifier[reshape] ( identifier[real_block_size] , identifier[real_block_size] , identifier[pagesizes] )
keyword[else] :
identifier[numeric_hess] = identifier[numeric_hess] . identifier[reshape] (* identifier[analytic_hess] . identifier[shape] )
identifier[check_passed] =[ keyword[False] ]* identifier[numeric_hess] . identifier[shape] [ literal[int] ]
keyword[for] identifier[block_ind] keyword[in] identifier[range] ( identifier[numeric_hess] . identifier[shape] [ literal[int] ]):
identifier[p] = keyword[True] keyword[if] ( identifier[plot] keyword[and] identifier[block_ind] == identifier[numeric_hess] . identifier[shape] [ literal[int] ]- literal[int] ) keyword[or] identifier[super_plot] keyword[else] keyword[False]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] . identifier[format] ( identifier[block_ind] ))
identifier[check_passed] [ identifier[block_ind] ]= identifier[self] . identifier[checkgrad_block] ( identifier[analytic_hess] [:,:, identifier[block_ind] ], identifier[numeric_hess] [:,:, identifier[block_ind] ], identifier[verbose] = identifier[verbose] , identifier[step] = identifier[step] , identifier[tolerance] = identifier[tolerance] , identifier[block_indices] = identifier[block_indices] , identifier[plot] = identifier[p] )
identifier[current_index] += identifier[current_size]
keyword[return] identifier[np] . identifier[all] ( identifier[check_passed] ) | def checkgrad(self, target_param=None, verbose=False, step=1e-06, tolerance=0.001, block_indices=None, plot=False, super_plot=False):
"""
Gradient checker that just checks each hessian individually
super_plot will plot the hessian wrt every parameter, plot will just do the first one
"""
try:
import numdifftools as nd # depends on [control=['try'], data=[]]
except:
raise ImportError("Don't have numdifftools package installed, it is not a GPy dependency as of yet, it is only used for hessian tests") # depends on [control=['except'], data=[]]
if target_param:
raise NotImplementedError('Only basic functionality is provided with this gradchecker') # depends on [control=['if'], data=[]]
#Repeat for each parameter, not the nicest but shouldn't be many cases where there are many
#variables
current_index = 0
for (name, n_shape) in zip(self.names, self.shapes):
current_size = numpy.prod(n_shape)
x = self.optimizer_array.copy()
#x = self._get_params_transformed().copy()
x = x[current_index:current_index + current_size].reshape(n_shape)
# Check gradients
#Actually the third derivative
analytic_hess = self._ddf(x)
#Can only calculate jacobian for one variable at a time
#From the docs:
#x0 : vector location
#at which to differentiate fun
#If x0 is an N x M array, then fun is assumed to be a function
#of N*M variables., thus we must have it flat, not (N,1), but just (N,)
#numeric_hess_partial = nd.Hessian(self._f, vectorized=False)
#Actually _df is already the hessian
numeric_hess_partial = nd.Jacobian(self._df, vectorized=True)
numeric_hess = numeric_hess_partial(x)
print('Done making numerical hessian')
if analytic_hess.dtype is np.dtype('object'):
#Blockify numeric_hess aswell
(blocksizes, pagesizes) = get_block_shapes_3d(analytic_hess)
#HACK
real_block_size = np.sum(blocksizes)
numeric_hess = numeric_hess.reshape(real_block_size, real_block_size, pagesizes) # depends on [control=['if'], data=[]]
else:
#numeric_hess = get_blocks_3d(numeric_hess, blocksizes)#, pagesizes)
numeric_hess = numeric_hess.reshape(*analytic_hess.shape)
#Check every block individually (for ease)
check_passed = [False] * numeric_hess.shape[2]
for block_ind in range(numeric_hess.shape[2]):
#Unless super_plot is set, just plot the first one
p = True if plot and block_ind == numeric_hess.shape[2] - 1 or super_plot else False
if verbose:
print('Checking derivative of hessian wrt parameter number {}'.format(block_ind)) # depends on [control=['if'], data=[]]
check_passed[block_ind] = self.checkgrad_block(analytic_hess[:, :, block_ind], numeric_hess[:, :, block_ind], verbose=verbose, step=step, tolerance=tolerance, block_indices=block_indices, plot=p) # depends on [control=['for'], data=['block_ind']]
current_index += current_size # depends on [control=['for'], data=[]]
return np.all(check_passed) |
def exception(self):
"""
:return: the StreamingQueryException if the query was terminated by an exception, or None.
"""
if self._jsq.exception().isDefined():
je = self._jsq.exception().get()
msg = je.toString().split(': ', 1)[1] # Drop the Java StreamingQueryException type info
stackTrace = '\n\t at '.join(map(lambda x: x.toString(), je.getStackTrace()))
return StreamingQueryException(msg, stackTrace, je.getCause())
else:
return None | def function[exception, parameter[self]]:
constant[
:return: the StreamingQueryException if the query was terminated by an exception, or None.
]
if call[call[name[self]._jsq.exception, parameter[]].isDefined, parameter[]] begin[:]
variable[je] assign[=] call[call[name[self]._jsq.exception, parameter[]].get, parameter[]]
variable[msg] assign[=] call[call[call[name[je].toString, parameter[]].split, parameter[constant[: ], constant[1]]]][constant[1]]
variable[stackTrace] assign[=] call[constant[
at ].join, parameter[call[name[map], parameter[<ast.Lambda object at 0x7da20e954ee0>, call[name[je].getStackTrace, parameter[]]]]]]
return[call[name[StreamingQueryException], parameter[name[msg], name[stackTrace], call[name[je].getCause, parameter[]]]]] | keyword[def] identifier[exception] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_jsq] . identifier[exception] (). identifier[isDefined] ():
identifier[je] = identifier[self] . identifier[_jsq] . identifier[exception] (). identifier[get] ()
identifier[msg] = identifier[je] . identifier[toString] (). identifier[split] ( literal[string] , literal[int] )[ literal[int] ]
identifier[stackTrace] = literal[string] . identifier[join] ( identifier[map] ( keyword[lambda] identifier[x] : identifier[x] . identifier[toString] (), identifier[je] . identifier[getStackTrace] ()))
keyword[return] identifier[StreamingQueryException] ( identifier[msg] , identifier[stackTrace] , identifier[je] . identifier[getCause] ())
keyword[else] :
keyword[return] keyword[None] | def exception(self):
"""
:return: the StreamingQueryException if the query was terminated by an exception, or None.
"""
if self._jsq.exception().isDefined():
je = self._jsq.exception().get()
msg = je.toString().split(': ', 1)[1] # Drop the Java StreamingQueryException type info
stackTrace = '\n\t at '.join(map(lambda x: x.toString(), je.getStackTrace()))
return StreamingQueryException(msg, stackTrace, je.getCause()) # depends on [control=['if'], data=[]]
else:
return None |
def retry(retry_count):
"""
Retry decorator used during file upload and download.
"""
def func(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
for backoff in range(retry_count):
try:
return f(*args, **kwargs)
except Exception:
time.sleep(2 ** backoff)
else:
raise SbgError('{}: failed to complete: {}'.format(
threading.current_thread().getName(), f.__name__)
)
return wrapper
return func | def function[retry, parameter[retry_count]]:
constant[
Retry decorator used during file upload and download.
]
def function[func, parameter[f]]:
def function[wrapper, parameter[]]:
for taget[name[backoff]] in starred[call[name[range], parameter[name[retry_count]]]] begin[:]
<ast.Try object at 0x7da20c9930d0>
return[name[wrapper]]
return[name[func]] | keyword[def] identifier[retry] ( identifier[retry_count] ):
literal[string]
keyword[def] identifier[func] ( identifier[f] ):
@ identifier[functools] . identifier[wraps] ( identifier[f] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[for] identifier[backoff] keyword[in] identifier[range] ( identifier[retry_count] ):
keyword[try] :
keyword[return] identifier[f] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[Exception] :
identifier[time] . identifier[sleep] ( literal[int] ** identifier[backoff] )
keyword[else] :
keyword[raise] identifier[SbgError] ( literal[string] . identifier[format] (
identifier[threading] . identifier[current_thread] (). identifier[getName] (), identifier[f] . identifier[__name__] )
)
keyword[return] identifier[wrapper]
keyword[return] identifier[func] | def retry(retry_count):
"""
Retry decorator used during file upload and download.
"""
def func(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
for backoff in range(retry_count):
try:
return f(*args, **kwargs) # depends on [control=['try'], data=[]]
except Exception:
time.sleep(2 ** backoff) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['backoff']]
else:
raise SbgError('{}: failed to complete: {}'.format(threading.current_thread().getName(), f.__name__))
return wrapper
return func |
def in_query(expression):
"""Match any of the values that exist in an array specified in query."""
def _in(index, expression=expression):
"""Return store key for documents that satisfy expression."""
ev = expression() if callable(expression) else expression
try:
iter(ev)
except TypeError:
raise AttributeError('$in argument must be an iterable!')
hashed_ev = [index.get_hash_for(v) for v in ev]
store_keys = set()
for value in hashed_ev:
store_keys |= set(index.get_keys_for(value))
return list(store_keys)
return _in | def function[in_query, parameter[expression]]:
constant[Match any of the values that exist in an array specified in query.]
def function[_in, parameter[index, expression]]:
constant[Return store key for documents that satisfy expression.]
variable[ev] assign[=] <ast.IfExp object at 0x7da1b189f490>
<ast.Try object at 0x7da1b189c670>
variable[hashed_ev] assign[=] <ast.ListComp object at 0x7da1b189f3d0>
variable[store_keys] assign[=] call[name[set], parameter[]]
for taget[name[value]] in starred[name[hashed_ev]] begin[:]
<ast.AugAssign object at 0x7da1b189fc40>
return[call[name[list], parameter[name[store_keys]]]]
return[name[_in]] | keyword[def] identifier[in_query] ( identifier[expression] ):
literal[string]
keyword[def] identifier[_in] ( identifier[index] , identifier[expression] = identifier[expression] ):
literal[string]
identifier[ev] = identifier[expression] () keyword[if] identifier[callable] ( identifier[expression] ) keyword[else] identifier[expression]
keyword[try] :
identifier[iter] ( identifier[ev] )
keyword[except] identifier[TypeError] :
keyword[raise] identifier[AttributeError] ( literal[string] )
identifier[hashed_ev] =[ identifier[index] . identifier[get_hash_for] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[ev] ]
identifier[store_keys] = identifier[set] ()
keyword[for] identifier[value] keyword[in] identifier[hashed_ev] :
identifier[store_keys] |= identifier[set] ( identifier[index] . identifier[get_keys_for] ( identifier[value] ))
keyword[return] identifier[list] ( identifier[store_keys] )
keyword[return] identifier[_in] | def in_query(expression):
"""Match any of the values that exist in an array specified in query."""
def _in(index, expression=expression):
"""Return store key for documents that satisfy expression."""
ev = expression() if callable(expression) else expression
try:
iter(ev) # depends on [control=['try'], data=[]]
except TypeError:
raise AttributeError('$in argument must be an iterable!') # depends on [control=['except'], data=[]]
hashed_ev = [index.get_hash_for(v) for v in ev]
store_keys = set()
for value in hashed_ev:
store_keys |= set(index.get_keys_for(value)) # depends on [control=['for'], data=['value']]
return list(store_keys)
return _in |
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1 | def function[gen_params, parameter[raw_params]]:
constant[
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
]
assert[<ast.BoolOp object at 0x7da18f8132b0>]
variable[curr_idx] assign[=] constant[2]
variable[max_idx] assign[=] call[name[len], parameter[name[raw_params]]]
while compare[name[curr_idx] less[<] name[max_idx]] begin[:]
variable[curr_item] assign[=] call[name[raw_params]][name[curr_idx]]
variable[prev_item] assign[=] name[curr_item].prev_sibling
if compare[name[curr_item].type not_equal[!=] name[token].NAME] begin[:]
<ast.AugAssign object at 0x7da20c6a9810>
continue
if <ast.BoolOp object at 0x7da20c6a9000> begin[:]
break
variable[name] assign[=] name[curr_item].value
variable[nxt] assign[=] name[curr_item].next_sibling
if <ast.BoolOp object at 0x7da20c6c7ca0> begin[:]
variable[default_value] assign[=] name[nxt].next_sibling
<ast.AugAssign object at 0x7da20c6c55a0>
<ast.Yield object at 0x7da20c6c5de0>
<ast.AugAssign object at 0x7da20c6c4c10> | keyword[def] identifier[gen_params] ( identifier[raw_params] ):
literal[string]
keyword[assert] identifier[raw_params] [ literal[int] ]. identifier[type] == identifier[token] . identifier[STAR] keyword[and] identifier[len] ( identifier[raw_params] )> literal[int]
identifier[curr_idx] = literal[int]
identifier[max_idx] = identifier[len] ( identifier[raw_params] )
keyword[while] identifier[curr_idx] < identifier[max_idx] :
identifier[curr_item] = identifier[raw_params] [ identifier[curr_idx] ]
identifier[prev_item] = identifier[curr_item] . identifier[prev_sibling]
keyword[if] identifier[curr_item] . identifier[type] != identifier[token] . identifier[NAME] :
identifier[curr_idx] += literal[int]
keyword[continue]
keyword[if] identifier[prev_item] keyword[is] keyword[not] keyword[None] keyword[and] identifier[prev_item] . identifier[type] == identifier[token] . identifier[DOUBLESTAR] :
keyword[break]
identifier[name] = identifier[curr_item] . identifier[value]
identifier[nxt] = identifier[curr_item] . identifier[next_sibling]
keyword[if] identifier[nxt] keyword[is] keyword[not] keyword[None] keyword[and] identifier[nxt] . identifier[type] == identifier[token] . identifier[EQUAL] :
identifier[default_value] = identifier[nxt] . identifier[next_sibling]
identifier[curr_idx] += literal[int]
keyword[else] :
identifier[default_value] = keyword[None]
keyword[yield] ( identifier[name] , identifier[default_value] )
identifier[curr_idx] += literal[int] | def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
curr_idx += 1
continue # depends on [control=['if'], data=[]]
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break # depends on [control=['if'], data=[]]
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2 # depends on [control=['if'], data=[]]
else:
default_value = None
yield (name, default_value)
curr_idx += 1 # depends on [control=['while'], data=['curr_idx']] |
def tauchen(rho, sigma_u, m=3, n=7):
r"""
Computes a Markov chain associated with a discretized version of
the linear Gaussian AR(1) process
.. math::
y_{t+1} = \rho y_t + u_{t+1}
using Tauchen's method. Here :math:`{u_t}` is an i.i.d. Gaussian process
with zero mean.
Parameters
----------
rho : scalar(float)
The autocorrelation coefficient
sigma_u : scalar(float)
The standard deviation of the random process
m : scalar(int), optional(default=3)
The number of standard deviations to approximate out to
n : scalar(int), optional(default=7)
The number of states to use in the approximation
Returns
-------
mc : MarkovChain
An instance of the MarkovChain class that stores the transition
matrix and state values returned by the discretization method
"""
# standard deviation of y_t
std_y = np.sqrt(sigma_u**2 / (1 - rho**2))
# top of discrete state space
x_max = m * std_y
# bottom of discrete state space
x_min = -x_max
# discretized state space
x = np.linspace(x_min, x_max, n)
step = (x_max - x_min) / (n - 1)
half_step = 0.5 * step
P = np.empty((n, n))
_fill_tauchen(x, P, n, rho, sigma_u, half_step)
mc = MarkovChain(P, state_values=x)
return mc | def function[tauchen, parameter[rho, sigma_u, m, n]]:
constant[
Computes a Markov chain associated with a discretized version of
the linear Gaussian AR(1) process
.. math::
y_{t+1} = \rho y_t + u_{t+1}
using Tauchen's method. Here :math:`{u_t}` is an i.i.d. Gaussian process
with zero mean.
Parameters
----------
rho : scalar(float)
The autocorrelation coefficient
sigma_u : scalar(float)
The standard deviation of the random process
m : scalar(int), optional(default=3)
The number of standard deviations to approximate out to
n : scalar(int), optional(default=7)
The number of states to use in the approximation
Returns
-------
mc : MarkovChain
An instance of the MarkovChain class that stores the transition
matrix and state values returned by the discretization method
]
variable[std_y] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[sigma_u] ** constant[2]] / binary_operation[constant[1] - binary_operation[name[rho] ** constant[2]]]]]]
variable[x_max] assign[=] binary_operation[name[m] * name[std_y]]
variable[x_min] assign[=] <ast.UnaryOp object at 0x7da1b21d6290>
variable[x] assign[=] call[name[np].linspace, parameter[name[x_min], name[x_max], name[n]]]
variable[step] assign[=] binary_operation[binary_operation[name[x_max] - name[x_min]] / binary_operation[name[n] - constant[1]]]
variable[half_step] assign[=] binary_operation[constant[0.5] * name[step]]
variable[P] assign[=] call[name[np].empty, parameter[tuple[[<ast.Name object at 0x7da1b21d5b40>, <ast.Name object at 0x7da1b21d6200>]]]]
call[name[_fill_tauchen], parameter[name[x], name[P], name[n], name[rho], name[sigma_u], name[half_step]]]
variable[mc] assign[=] call[name[MarkovChain], parameter[name[P]]]
return[name[mc]] | keyword[def] identifier[tauchen] ( identifier[rho] , identifier[sigma_u] , identifier[m] = literal[int] , identifier[n] = literal[int] ):
literal[string]
identifier[std_y] = identifier[np] . identifier[sqrt] ( identifier[sigma_u] ** literal[int] /( literal[int] - identifier[rho] ** literal[int] ))
identifier[x_max] = identifier[m] * identifier[std_y]
identifier[x_min] =- identifier[x_max]
identifier[x] = identifier[np] . identifier[linspace] ( identifier[x_min] , identifier[x_max] , identifier[n] )
identifier[step] =( identifier[x_max] - identifier[x_min] )/( identifier[n] - literal[int] )
identifier[half_step] = literal[int] * identifier[step]
identifier[P] = identifier[np] . identifier[empty] (( identifier[n] , identifier[n] ))
identifier[_fill_tauchen] ( identifier[x] , identifier[P] , identifier[n] , identifier[rho] , identifier[sigma_u] , identifier[half_step] )
identifier[mc] = identifier[MarkovChain] ( identifier[P] , identifier[state_values] = identifier[x] )
keyword[return] identifier[mc] | def tauchen(rho, sigma_u, m=3, n=7):
"""
Computes a Markov chain associated with a discretized version of
the linear Gaussian AR(1) process
.. math::
y_{t+1} = \\rho y_t + u_{t+1}
using Tauchen's method. Here :math:`{u_t}` is an i.i.d. Gaussian process
with zero mean.
Parameters
----------
rho : scalar(float)
The autocorrelation coefficient
sigma_u : scalar(float)
The standard deviation of the random process
m : scalar(int), optional(default=3)
The number of standard deviations to approximate out to
n : scalar(int), optional(default=7)
The number of states to use in the approximation
Returns
-------
mc : MarkovChain
An instance of the MarkovChain class that stores the transition
matrix and state values returned by the discretization method
"""
# standard deviation of y_t
std_y = np.sqrt(sigma_u ** 2 / (1 - rho ** 2))
# top of discrete state space
x_max = m * std_y
# bottom of discrete state space
x_min = -x_max
# discretized state space
x = np.linspace(x_min, x_max, n)
step = (x_max - x_min) / (n - 1)
half_step = 0.5 * step
P = np.empty((n, n))
_fill_tauchen(x, P, n, rho, sigma_u, half_step)
mc = MarkovChain(P, state_values=x)
return mc |
def extractall(self, directory, auto_create_dir=False, patool_path=None):
'''
:param directory: directory to extract to
:param auto_create_dir: auto create directory
:param patool_path: the path to the patool backend
'''
log.debug('extracting %s into %s (backend=%s)', self.filename, directory, self.backend)
is_zipfile = zipfile.is_zipfile(self.filename)
directory = _fullpath(directory)
if not os.path.exists(self.filename):
raise ValueError(
'archive file does not exist:' + str(self.filename))
if not os.path.exists(directory):
if auto_create_dir:
os.makedirs(directory)
else:
raise ValueError('directory does not exist:' + str(directory))
if self.backend == 'auto':
if is_zipfile:
self.extractall_zipfile(directory)
else:
self.extractall_patool(directory, patool_path)
if self.backend == 'zipfile':
if not is_zipfile:
raise ValueError('file is not zip file:' + str(self.filename))
self.extractall_zipfile(directory)
if self.backend == 'patool':
self.extractall_patool(directory, patool_path) | def function[extractall, parameter[self, directory, auto_create_dir, patool_path]]:
constant[
:param directory: directory to extract to
:param auto_create_dir: auto create directory
:param patool_path: the path to the patool backend
]
call[name[log].debug, parameter[constant[extracting %s into %s (backend=%s)], name[self].filename, name[directory], name[self].backend]]
variable[is_zipfile] assign[=] call[name[zipfile].is_zipfile, parameter[name[self].filename]]
variable[directory] assign[=] call[name[_fullpath], parameter[name[directory]]]
if <ast.UnaryOp object at 0x7da20c76c640> begin[:]
<ast.Raise object at 0x7da20c76f160>
if <ast.UnaryOp object at 0x7da20c76eaa0> begin[:]
if name[auto_create_dir] begin[:]
call[name[os].makedirs, parameter[name[directory]]]
if compare[name[self].backend equal[==] constant[auto]] begin[:]
if name[is_zipfile] begin[:]
call[name[self].extractall_zipfile, parameter[name[directory]]]
if compare[name[self].backend equal[==] constant[zipfile]] begin[:]
if <ast.UnaryOp object at 0x7da18eb57b80> begin[:]
<ast.Raise object at 0x7da18eb55a80>
call[name[self].extractall_zipfile, parameter[name[directory]]]
if compare[name[self].backend equal[==] constant[patool]] begin[:]
call[name[self].extractall_patool, parameter[name[directory], name[patool_path]]] | keyword[def] identifier[extractall] ( identifier[self] , identifier[directory] , identifier[auto_create_dir] = keyword[False] , identifier[patool_path] = keyword[None] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[filename] , identifier[directory] , identifier[self] . identifier[backend] )
identifier[is_zipfile] = identifier[zipfile] . identifier[is_zipfile] ( identifier[self] . identifier[filename] )
identifier[directory] = identifier[_fullpath] ( identifier[directory] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[filename] ):
keyword[raise] identifier[ValueError] (
literal[string] + identifier[str] ( identifier[self] . identifier[filename] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[directory] ):
keyword[if] identifier[auto_create_dir] :
identifier[os] . identifier[makedirs] ( identifier[directory] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[str] ( identifier[directory] ))
keyword[if] identifier[self] . identifier[backend] == literal[string] :
keyword[if] identifier[is_zipfile] :
identifier[self] . identifier[extractall_zipfile] ( identifier[directory] )
keyword[else] :
identifier[self] . identifier[extractall_patool] ( identifier[directory] , identifier[patool_path] )
keyword[if] identifier[self] . identifier[backend] == literal[string] :
keyword[if] keyword[not] identifier[is_zipfile] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[str] ( identifier[self] . identifier[filename] ))
identifier[self] . identifier[extractall_zipfile] ( identifier[directory] )
keyword[if] identifier[self] . identifier[backend] == literal[string] :
identifier[self] . identifier[extractall_patool] ( identifier[directory] , identifier[patool_path] ) | def extractall(self, directory, auto_create_dir=False, patool_path=None):
"""
:param directory: directory to extract to
:param auto_create_dir: auto create directory
:param patool_path: the path to the patool backend
"""
log.debug('extracting %s into %s (backend=%s)', self.filename, directory, self.backend)
is_zipfile = zipfile.is_zipfile(self.filename)
directory = _fullpath(directory)
if not os.path.exists(self.filename):
raise ValueError('archive file does not exist:' + str(self.filename)) # depends on [control=['if'], data=[]]
if not os.path.exists(directory):
if auto_create_dir:
os.makedirs(directory) # depends on [control=['if'], data=[]]
else:
raise ValueError('directory does not exist:' + str(directory)) # depends on [control=['if'], data=[]]
if self.backend == 'auto':
if is_zipfile:
self.extractall_zipfile(directory) # depends on [control=['if'], data=[]]
else:
self.extractall_patool(directory, patool_path) # depends on [control=['if'], data=[]]
if self.backend == 'zipfile':
if not is_zipfile:
raise ValueError('file is not zip file:' + str(self.filename)) # depends on [control=['if'], data=[]]
self.extractall_zipfile(directory) # depends on [control=['if'], data=[]]
if self.backend == 'patool':
self.extractall_patool(directory, patool_path) # depends on [control=['if'], data=[]] |
def download(dataset_label=None, destination_dir=None, dry_run=False):
"""Download sample data by data label. Warning: function with side effect!
Labels can be listed by sample_data.data_urls.keys(). Returns downloaded files.
:param dataset_label: label of data. If it is set to None, all data are downloaded
:param destination_dir: output dir for data
:param dry_run: runs function without downloading anything
"""
if destination_dir is None:
destination_dir = op.join(dataset_path(get_root=True), "medical", "orig")
destination_dir = op.expanduser(destination_dir)
logger.info("destination dir: {}".format(destination_dir))
if dataset_label is None:
dataset_label = data_urls.keys()
if type(dataset_label) == str:
dataset_label = [dataset_label]
dataset_label = _expand_dataset_packages(dataset_label)
for label in dataset_label:
# make all data:url have length 3
data_url, url, expected_hash, hash_path, relative_download_dir = get_dataset_meta(
label
)
if relative_download_dir is None:
label_destination_dir = destination_dir
else:
label_destination_dir = op.join(destination_dir, relative_download_dir)
if not op.exists(label_destination_dir):
logger.debug("creating directory {}".format(label_destination_dir))
os.makedirs(label_destination_dir)
if hash_path is None:
hash_path = label
path_to_hash = os.path.join(label_destination_dir, hash_path)
try:
computed_hash = checksum(path_to_hash)
except Exception as e:
# there is probably no checksumdir module
logger.warning(e)
logger.warning("problem with sample_data.checksum()")
computed_hash = None
logger.info("dataset: '" + label + "'")
logger.info("path to hash: {}".format(path_to_hash))
logger.info("expected hash: '" + str(expected_hash) + "'")
logger.info("computed hash: '" + str(computed_hash) + "'")
if (computed_hash is not None) and (expected_hash == computed_hash):
logger.info("match ok - no download needed")
else:
logger.info("downloading")
if not dry_run:
downzip(url, destination=label_destination_dir)
logger.info("finished")
downloaded_hash = checksum(
os.path.join(label_destination_dir, hash_path)
)
logger.info("downloaded hash: '" + str(downloaded_hash) + "'")
if downloaded_hash != expected_hash:
logger.warning(
"downloaded hash is different from expected hash\n"
+ "expected hash: '"
+ str(expected_hash)
+ "'\n"
+ "downloaded hash: '"
+ str(downloaded_hash)
+ "'\n"
)
else:
logger.debug("dry run") | def function[download, parameter[dataset_label, destination_dir, dry_run]]:
constant[Download sample data by data label. Warning: function with side effect!
Labels can be listed by sample_data.data_urls.keys(). Returns downloaded files.
:param dataset_label: label of data. If it is set to None, all data are downloaded
:param destination_dir: output dir for data
:param dry_run: runs function without downloading anything
]
if compare[name[destination_dir] is constant[None]] begin[:]
variable[destination_dir] assign[=] call[name[op].join, parameter[call[name[dataset_path], parameter[]], constant[medical], constant[orig]]]
variable[destination_dir] assign[=] call[name[op].expanduser, parameter[name[destination_dir]]]
call[name[logger].info, parameter[call[constant[destination dir: {}].format, parameter[name[destination_dir]]]]]
if compare[name[dataset_label] is constant[None]] begin[:]
variable[dataset_label] assign[=] call[name[data_urls].keys, parameter[]]
if compare[call[name[type], parameter[name[dataset_label]]] equal[==] name[str]] begin[:]
variable[dataset_label] assign[=] list[[<ast.Name object at 0x7da1b1877130>]]
variable[dataset_label] assign[=] call[name[_expand_dataset_packages], parameter[name[dataset_label]]]
for taget[name[label]] in starred[name[dataset_label]] begin[:]
<ast.Tuple object at 0x7da1b1876f80> assign[=] call[name[get_dataset_meta], parameter[name[label]]]
if compare[name[relative_download_dir] is constant[None]] begin[:]
variable[label_destination_dir] assign[=] name[destination_dir]
if <ast.UnaryOp object at 0x7da1b18bbac0> begin[:]
call[name[logger].debug, parameter[call[constant[creating directory {}].format, parameter[name[label_destination_dir]]]]]
call[name[os].makedirs, parameter[name[label_destination_dir]]]
if compare[name[hash_path] is constant[None]] begin[:]
variable[hash_path] assign[=] name[label]
variable[path_to_hash] assign[=] call[name[os].path.join, parameter[name[label_destination_dir], name[hash_path]]]
<ast.Try object at 0x7da1b18b9f90>
call[name[logger].info, parameter[binary_operation[binary_operation[constant[dataset: '] + name[label]] + constant[']]]]
call[name[logger].info, parameter[call[constant[path to hash: {}].format, parameter[name[path_to_hash]]]]]
call[name[logger].info, parameter[binary_operation[binary_operation[constant[expected hash: '] + call[name[str], parameter[name[expected_hash]]]] + constant[']]]]
call[name[logger].info, parameter[binary_operation[binary_operation[constant[computed hash: '] + call[name[str], parameter[name[computed_hash]]]] + constant[']]]]
if <ast.BoolOp object at 0x7da1b18babf0> begin[:]
call[name[logger].info, parameter[constant[match ok - no download needed]]] | keyword[def] identifier[download] ( identifier[dataset_label] = keyword[None] , identifier[destination_dir] = keyword[None] , identifier[dry_run] = keyword[False] ):
literal[string]
keyword[if] identifier[destination_dir] keyword[is] keyword[None] :
identifier[destination_dir] = identifier[op] . identifier[join] ( identifier[dataset_path] ( identifier[get_root] = keyword[True] ), literal[string] , literal[string] )
identifier[destination_dir] = identifier[op] . identifier[expanduser] ( identifier[destination_dir] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[destination_dir] ))
keyword[if] identifier[dataset_label] keyword[is] keyword[None] :
identifier[dataset_label] = identifier[data_urls] . identifier[keys] ()
keyword[if] identifier[type] ( identifier[dataset_label] )== identifier[str] :
identifier[dataset_label] =[ identifier[dataset_label] ]
identifier[dataset_label] = identifier[_expand_dataset_packages] ( identifier[dataset_label] )
keyword[for] identifier[label] keyword[in] identifier[dataset_label] :
identifier[data_url] , identifier[url] , identifier[expected_hash] , identifier[hash_path] , identifier[relative_download_dir] = identifier[get_dataset_meta] (
identifier[label]
)
keyword[if] identifier[relative_download_dir] keyword[is] keyword[None] :
identifier[label_destination_dir] = identifier[destination_dir]
keyword[else] :
identifier[label_destination_dir] = identifier[op] . identifier[join] ( identifier[destination_dir] , identifier[relative_download_dir] )
keyword[if] keyword[not] identifier[op] . identifier[exists] ( identifier[label_destination_dir] ):
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[label_destination_dir] ))
identifier[os] . identifier[makedirs] ( identifier[label_destination_dir] )
keyword[if] identifier[hash_path] keyword[is] keyword[None] :
identifier[hash_path] = identifier[label]
identifier[path_to_hash] = identifier[os] . identifier[path] . identifier[join] ( identifier[label_destination_dir] , identifier[hash_path] )
keyword[try] :
identifier[computed_hash] = identifier[checksum] ( identifier[path_to_hash] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[warning] ( identifier[e] )
identifier[logger] . identifier[warning] ( literal[string] )
identifier[computed_hash] = keyword[None]
identifier[logger] . identifier[info] ( literal[string] + identifier[label] + literal[string] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[path_to_hash] ))
identifier[logger] . identifier[info] ( literal[string] + identifier[str] ( identifier[expected_hash] )+ literal[string] )
identifier[logger] . identifier[info] ( literal[string] + identifier[str] ( identifier[computed_hash] )+ literal[string] )
keyword[if] ( identifier[computed_hash] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[expected_hash] == identifier[computed_hash] ):
identifier[logger] . identifier[info] ( literal[string] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[if] keyword[not] identifier[dry_run] :
identifier[downzip] ( identifier[url] , identifier[destination] = identifier[label_destination_dir] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[downloaded_hash] = identifier[checksum] (
identifier[os] . identifier[path] . identifier[join] ( identifier[label_destination_dir] , identifier[hash_path] )
)
identifier[logger] . identifier[info] ( literal[string] + identifier[str] ( identifier[downloaded_hash] )+ literal[string] )
keyword[if] identifier[downloaded_hash] != identifier[expected_hash] :
identifier[logger] . identifier[warning] (
literal[string]
+ literal[string]
+ identifier[str] ( identifier[expected_hash] )
+ literal[string]
+ literal[string]
+ identifier[str] ( identifier[downloaded_hash] )
+ literal[string]
)
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] ) | def download(dataset_label=None, destination_dir=None, dry_run=False):
"""Download sample data by data label. Warning: function with side effect!
Labels can be listed by sample_data.data_urls.keys(). Returns downloaded files.
:param dataset_label: label of data. If it is set to None, all data are downloaded
:param destination_dir: output dir for data
:param dry_run: runs function without downloading anything
"""
if destination_dir is None:
destination_dir = op.join(dataset_path(get_root=True), 'medical', 'orig') # depends on [control=['if'], data=['destination_dir']]
destination_dir = op.expanduser(destination_dir)
logger.info('destination dir: {}'.format(destination_dir))
if dataset_label is None:
dataset_label = data_urls.keys() # depends on [control=['if'], data=['dataset_label']]
if type(dataset_label) == str:
dataset_label = [dataset_label] # depends on [control=['if'], data=[]]
dataset_label = _expand_dataset_packages(dataset_label)
for label in dataset_label:
# make all data:url have length 3
(data_url, url, expected_hash, hash_path, relative_download_dir) = get_dataset_meta(label)
if relative_download_dir is None:
label_destination_dir = destination_dir # depends on [control=['if'], data=[]]
else:
label_destination_dir = op.join(destination_dir, relative_download_dir)
if not op.exists(label_destination_dir):
logger.debug('creating directory {}'.format(label_destination_dir))
os.makedirs(label_destination_dir) # depends on [control=['if'], data=[]]
if hash_path is None:
hash_path = label # depends on [control=['if'], data=['hash_path']]
path_to_hash = os.path.join(label_destination_dir, hash_path)
try:
computed_hash = checksum(path_to_hash) # depends on [control=['try'], data=[]]
except Exception as e:
# there is probably no checksumdir module
logger.warning(e)
logger.warning('problem with sample_data.checksum()')
computed_hash = None # depends on [control=['except'], data=['e']]
logger.info("dataset: '" + label + "'")
logger.info('path to hash: {}'.format(path_to_hash))
logger.info("expected hash: '" + str(expected_hash) + "'")
logger.info("computed hash: '" + str(computed_hash) + "'")
if computed_hash is not None and expected_hash == computed_hash:
logger.info('match ok - no download needed') # depends on [control=['if'], data=[]]
else:
logger.info('downloading')
if not dry_run:
downzip(url, destination=label_destination_dir)
logger.info('finished')
downloaded_hash = checksum(os.path.join(label_destination_dir, hash_path))
logger.info("downloaded hash: '" + str(downloaded_hash) + "'")
if downloaded_hash != expected_hash:
logger.warning('downloaded hash is different from expected hash\n' + "expected hash: '" + str(expected_hash) + "'\n" + "downloaded hash: '" + str(downloaded_hash) + "'\n") # depends on [control=['if'], data=['downloaded_hash', 'expected_hash']] # depends on [control=['if'], data=[]]
else:
logger.debug('dry run') # depends on [control=['for'], data=['label']] |
def togpx(self):
"""Generate a GPX waypoint element subtree.
Returns:
etree.Element: GPX element
"""
element = create_elem(self.__class__._elem_name,
{'lat': str(self.latitude),
'lon': str(self.longitude)})
if self.name:
element.append(create_elem('name', text=self.name))
if self.description:
element.append(create_elem('desc', text=self.description))
if self.elevation:
element.append(create_elem('ele', text=str(self.elevation)))
if self.time:
element.append(create_elem('time', text=self.time.isoformat()))
return element | def function[togpx, parameter[self]]:
constant[Generate a GPX waypoint element subtree.
Returns:
etree.Element: GPX element
]
variable[element] assign[=] call[name[create_elem], parameter[name[self].__class__._elem_name, dictionary[[<ast.Constant object at 0x7da18fe93970>, <ast.Constant object at 0x7da18fe92f20>], [<ast.Call object at 0x7da18fe90040>, <ast.Call object at 0x7da18fe93b80>]]]]
if name[self].name begin[:]
call[name[element].append, parameter[call[name[create_elem], parameter[constant[name]]]]]
if name[self].description begin[:]
call[name[element].append, parameter[call[name[create_elem], parameter[constant[desc]]]]]
if name[self].elevation begin[:]
call[name[element].append, parameter[call[name[create_elem], parameter[constant[ele]]]]]
if name[self].time begin[:]
call[name[element].append, parameter[call[name[create_elem], parameter[constant[time]]]]]
return[name[element]] | keyword[def] identifier[togpx] ( identifier[self] ):
literal[string]
identifier[element] = identifier[create_elem] ( identifier[self] . identifier[__class__] . identifier[_elem_name] ,
{ literal[string] : identifier[str] ( identifier[self] . identifier[latitude] ),
literal[string] : identifier[str] ( identifier[self] . identifier[longitude] )})
keyword[if] identifier[self] . identifier[name] :
identifier[element] . identifier[append] ( identifier[create_elem] ( literal[string] , identifier[text] = identifier[self] . identifier[name] ))
keyword[if] identifier[self] . identifier[description] :
identifier[element] . identifier[append] ( identifier[create_elem] ( literal[string] , identifier[text] = identifier[self] . identifier[description] ))
keyword[if] identifier[self] . identifier[elevation] :
identifier[element] . identifier[append] ( identifier[create_elem] ( literal[string] , identifier[text] = identifier[str] ( identifier[self] . identifier[elevation] )))
keyword[if] identifier[self] . identifier[time] :
identifier[element] . identifier[append] ( identifier[create_elem] ( literal[string] , identifier[text] = identifier[self] . identifier[time] . identifier[isoformat] ()))
keyword[return] identifier[element] | def togpx(self):
"""Generate a GPX waypoint element subtree.
Returns:
etree.Element: GPX element
"""
element = create_elem(self.__class__._elem_name, {'lat': str(self.latitude), 'lon': str(self.longitude)})
if self.name:
element.append(create_elem('name', text=self.name)) # depends on [control=['if'], data=[]]
if self.description:
element.append(create_elem('desc', text=self.description)) # depends on [control=['if'], data=[]]
if self.elevation:
element.append(create_elem('ele', text=str(self.elevation))) # depends on [control=['if'], data=[]]
if self.time:
element.append(create_elem('time', text=self.time.isoformat())) # depends on [control=['if'], data=[]]
return element |
def get_vnetwork_dvpgs_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs")
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_vnetwork_dvpgs_output_instance_id, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_vnetwork_dvpgs] assign[=] call[name[ET].Element, parameter[constant[get_vnetwork_dvpgs]]]
variable[config] assign[=] name[get_vnetwork_dvpgs]
variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_vnetwork_dvpgs], constant[output]]]
variable[instance_id] assign[=] call[name[ET].SubElement, parameter[name[output], constant[instance-id]]]
name[instance_id].text assign[=] call[name[kwargs].pop, parameter[constant[instance_id]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_vnetwork_dvpgs_output_instance_id] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_vnetwork_dvpgs] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_vnetwork_dvpgs]
identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_vnetwork_dvpgs] , literal[string] )
identifier[instance_id] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] )
identifier[instance_id] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_vnetwork_dvpgs_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_vnetwork_dvpgs = ET.Element('get_vnetwork_dvpgs')
config = get_vnetwork_dvpgs
output = ET.SubElement(get_vnetwork_dvpgs, 'output')
instance_id = ET.SubElement(output, 'instance-id')
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_pore_surface_parameters(surface_area):
""" Get input parameters for pore surface binary.
Get input parameters for pore_surface binary from zeo++ output,
while keeping data provenance.
"""
PoreSurfaceParameters = DataFactory('phtools.surface')
d = {
'accessible_surface_area': surface_area.get_dict()['ASA_A^2'],
'target_volume': 40e3,
'sampling_method': 'random',
}
return PoreSurfaceParameters(dict=d) | def function[get_pore_surface_parameters, parameter[surface_area]]:
constant[ Get input parameters for pore surface binary.
Get input parameters for pore_surface binary from zeo++ output,
while keeping data provenance.
]
variable[PoreSurfaceParameters] assign[=] call[name[DataFactory], parameter[constant[phtools.surface]]]
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da18f721ed0>, <ast.Constant object at 0x7da18f723340>, <ast.Constant object at 0x7da18f7230d0>], [<ast.Subscript object at 0x7da18f723ca0>, <ast.Constant object at 0x7da18f7236d0>, <ast.Constant object at 0x7da18f721e70>]]
return[call[name[PoreSurfaceParameters], parameter[]]] | keyword[def] identifier[get_pore_surface_parameters] ( identifier[surface_area] ):
literal[string]
identifier[PoreSurfaceParameters] = identifier[DataFactory] ( literal[string] )
identifier[d] ={
literal[string] : identifier[surface_area] . identifier[get_dict] ()[ literal[string] ],
literal[string] : literal[int] ,
literal[string] : literal[string] ,
}
keyword[return] identifier[PoreSurfaceParameters] ( identifier[dict] = identifier[d] ) | def get_pore_surface_parameters(surface_area):
""" Get input parameters for pore surface binary.
Get input parameters for pore_surface binary from zeo++ output,
while keeping data provenance.
"""
PoreSurfaceParameters = DataFactory('phtools.surface')
d = {'accessible_surface_area': surface_area.get_dict()['ASA_A^2'], 'target_volume': 40000.0, 'sampling_method': 'random'}
return PoreSurfaceParameters(dict=d) |
def Bidirectional(l2r, r2l):
"""Stitch two RNN models into a bidirectional layer."""
nO = l2r.nO
def birnn_fwd(Xs, drop=0.0):
l2r_Zs, bp_l2r_Zs = l2r.begin_update(Xs, drop=drop)
r2l_Zs, bp_r2l_Zs = r2l.begin_update(
[l2r.ops.xp.ascontiguousarray(X[::-1]) for X in Xs]
)
def birnn_bwd(dZs, sgd=None):
d_l2r_Zs = []
d_r2l_Zs = []
for dZ in dZs:
l2r_fwd = dZ[:, :nO]
r2l_fwd = dZ[:, nO:]
d_l2r_Zs.append(l2r.ops.xp.ascontiguousarray(l2r_fwd))
d_r2l_Zs.append(l2r.ops.xp.ascontiguousarray(r2l_fwd[::-1]))
dXs_l2r = bp_l2r_Zs(d_l2r_Zs, sgd=sgd)
dXs_r2l = bp_r2l_Zs(d_r2l_Zs, sgd=sgd)
dXs = [dXf + dXb[::-1] for dXf, dXb in zip(dXs_l2r, dXs_r2l)]
return dXs
Zs = [l2r.ops.xp.hstack((Zf, Zb[::-1])) for Zf, Zb in zip(l2r_Zs, r2l_Zs)]
return Zs, birnn_bwd
return wrap(birnn_fwd, l2r, r2l) | def function[Bidirectional, parameter[l2r, r2l]]:
constant[Stitch two RNN models into a bidirectional layer.]
variable[nO] assign[=] name[l2r].nO
def function[birnn_fwd, parameter[Xs, drop]]:
<ast.Tuple object at 0x7da20c7cb400> assign[=] call[name[l2r].begin_update, parameter[name[Xs]]]
<ast.Tuple object at 0x7da20c7cbbe0> assign[=] call[name[r2l].begin_update, parameter[<ast.ListComp object at 0x7da20c7cae90>]]
def function[birnn_bwd, parameter[dZs, sgd]]:
variable[d_l2r_Zs] assign[=] list[[]]
variable[d_r2l_Zs] assign[=] list[[]]
for taget[name[dZ]] in starred[name[dZs]] begin[:]
variable[l2r_fwd] assign[=] call[name[dZ]][tuple[[<ast.Slice object at 0x7da20c7c9870>, <ast.Slice object at 0x7da20c7cbc40>]]]
variable[r2l_fwd] assign[=] call[name[dZ]][tuple[[<ast.Slice object at 0x7da20c7cb160>, <ast.Slice object at 0x7da20c7cbc10>]]]
call[name[d_l2r_Zs].append, parameter[call[name[l2r].ops.xp.ascontiguousarray, parameter[name[l2r_fwd]]]]]
call[name[d_r2l_Zs].append, parameter[call[name[l2r].ops.xp.ascontiguousarray, parameter[call[name[r2l_fwd]][<ast.Slice object at 0x7da20c7cb310>]]]]]
variable[dXs_l2r] assign[=] call[name[bp_l2r_Zs], parameter[name[d_l2r_Zs]]]
variable[dXs_r2l] assign[=] call[name[bp_r2l_Zs], parameter[name[d_r2l_Zs]]]
variable[dXs] assign[=] <ast.ListComp object at 0x7da18f7224a0>
return[name[dXs]]
variable[Zs] assign[=] <ast.ListComp object at 0x7da18f720370>
return[tuple[[<ast.Name object at 0x7da18f720430>, <ast.Name object at 0x7da18f7221a0>]]]
return[call[name[wrap], parameter[name[birnn_fwd], name[l2r], name[r2l]]]] | keyword[def] identifier[Bidirectional] ( identifier[l2r] , identifier[r2l] ):
literal[string]
identifier[nO] = identifier[l2r] . identifier[nO]
keyword[def] identifier[birnn_fwd] ( identifier[Xs] , identifier[drop] = literal[int] ):
identifier[l2r_Zs] , identifier[bp_l2r_Zs] = identifier[l2r] . identifier[begin_update] ( identifier[Xs] , identifier[drop] = identifier[drop] )
identifier[r2l_Zs] , identifier[bp_r2l_Zs] = identifier[r2l] . identifier[begin_update] (
[ identifier[l2r] . identifier[ops] . identifier[xp] . identifier[ascontiguousarray] ( identifier[X] [::- literal[int] ]) keyword[for] identifier[X] keyword[in] identifier[Xs] ]
)
keyword[def] identifier[birnn_bwd] ( identifier[dZs] , identifier[sgd] = keyword[None] ):
identifier[d_l2r_Zs] =[]
identifier[d_r2l_Zs] =[]
keyword[for] identifier[dZ] keyword[in] identifier[dZs] :
identifier[l2r_fwd] = identifier[dZ] [:,: identifier[nO] ]
identifier[r2l_fwd] = identifier[dZ] [:, identifier[nO] :]
identifier[d_l2r_Zs] . identifier[append] ( identifier[l2r] . identifier[ops] . identifier[xp] . identifier[ascontiguousarray] ( identifier[l2r_fwd] ))
identifier[d_r2l_Zs] . identifier[append] ( identifier[l2r] . identifier[ops] . identifier[xp] . identifier[ascontiguousarray] ( identifier[r2l_fwd] [::- literal[int] ]))
identifier[dXs_l2r] = identifier[bp_l2r_Zs] ( identifier[d_l2r_Zs] , identifier[sgd] = identifier[sgd] )
identifier[dXs_r2l] = identifier[bp_r2l_Zs] ( identifier[d_r2l_Zs] , identifier[sgd] = identifier[sgd] )
identifier[dXs] =[ identifier[dXf] + identifier[dXb] [::- literal[int] ] keyword[for] identifier[dXf] , identifier[dXb] keyword[in] identifier[zip] ( identifier[dXs_l2r] , identifier[dXs_r2l] )]
keyword[return] identifier[dXs]
identifier[Zs] =[ identifier[l2r] . identifier[ops] . identifier[xp] . identifier[hstack] (( identifier[Zf] , identifier[Zb] [::- literal[int] ])) keyword[for] identifier[Zf] , identifier[Zb] keyword[in] identifier[zip] ( identifier[l2r_Zs] , identifier[r2l_Zs] )]
keyword[return] identifier[Zs] , identifier[birnn_bwd]
keyword[return] identifier[wrap] ( identifier[birnn_fwd] , identifier[l2r] , identifier[r2l] ) | def Bidirectional(l2r, r2l):
"""Stitch two RNN models into a bidirectional layer."""
nO = l2r.nO
def birnn_fwd(Xs, drop=0.0):
(l2r_Zs, bp_l2r_Zs) = l2r.begin_update(Xs, drop=drop)
(r2l_Zs, bp_r2l_Zs) = r2l.begin_update([l2r.ops.xp.ascontiguousarray(X[::-1]) for X in Xs])
def birnn_bwd(dZs, sgd=None):
d_l2r_Zs = []
d_r2l_Zs = []
for dZ in dZs:
l2r_fwd = dZ[:, :nO]
r2l_fwd = dZ[:, nO:]
d_l2r_Zs.append(l2r.ops.xp.ascontiguousarray(l2r_fwd))
d_r2l_Zs.append(l2r.ops.xp.ascontiguousarray(r2l_fwd[::-1])) # depends on [control=['for'], data=['dZ']]
dXs_l2r = bp_l2r_Zs(d_l2r_Zs, sgd=sgd)
dXs_r2l = bp_r2l_Zs(d_r2l_Zs, sgd=sgd)
dXs = [dXf + dXb[::-1] for (dXf, dXb) in zip(dXs_l2r, dXs_r2l)]
return dXs
Zs = [l2r.ops.xp.hstack((Zf, Zb[::-1])) for (Zf, Zb) in zip(l2r_Zs, r2l_Zs)]
return (Zs, birnn_bwd)
return wrap(birnn_fwd, l2r, r2l) |
def plot(self, atDataset,
errorbars=False,
grid=False):
""" use matplotlib methods for plotting
Parameters
----------
atDataset : allantools.Dataset()
a dataset with computed data
errorbars : boolean
Plot errorbars. Defaults to False
grid : boolean
Plot grid. Defaults to False
"""
if errorbars:
self.ax.errorbar(atDataset.out["taus"],
atDataset.out["stat"],
yerr=atDataset.out["stat_err"],
)
else:
self.ax.plot(atDataset.out["taus"],
atDataset.out["stat"],
)
self.ax.set_xlabel("Tau")
self.ax.set_ylabel(atDataset.out["stat_id"])
self.ax.grid(grid, which="minor", ls="-", color='0.65')
self.ax.grid(grid, which="major", ls="-", color='0.25') | def function[plot, parameter[self, atDataset, errorbars, grid]]:
constant[ use matplotlib methods for plotting
Parameters
----------
atDataset : allantools.Dataset()
a dataset with computed data
errorbars : boolean
Plot errorbars. Defaults to False
grid : boolean
Plot grid. Defaults to False
]
if name[errorbars] begin[:]
call[name[self].ax.errorbar, parameter[call[name[atDataset].out][constant[taus]], call[name[atDataset].out][constant[stat]]]]
call[name[self].ax.set_xlabel, parameter[constant[Tau]]]
call[name[self].ax.set_ylabel, parameter[call[name[atDataset].out][constant[stat_id]]]]
call[name[self].ax.grid, parameter[name[grid]]]
call[name[self].ax.grid, parameter[name[grid]]] | keyword[def] identifier[plot] ( identifier[self] , identifier[atDataset] ,
identifier[errorbars] = keyword[False] ,
identifier[grid] = keyword[False] ):
literal[string]
keyword[if] identifier[errorbars] :
identifier[self] . identifier[ax] . identifier[errorbar] ( identifier[atDataset] . identifier[out] [ literal[string] ],
identifier[atDataset] . identifier[out] [ literal[string] ],
identifier[yerr] = identifier[atDataset] . identifier[out] [ literal[string] ],
)
keyword[else] :
identifier[self] . identifier[ax] . identifier[plot] ( identifier[atDataset] . identifier[out] [ literal[string] ],
identifier[atDataset] . identifier[out] [ literal[string] ],
)
identifier[self] . identifier[ax] . identifier[set_xlabel] ( literal[string] )
identifier[self] . identifier[ax] . identifier[set_ylabel] ( identifier[atDataset] . identifier[out] [ literal[string] ])
identifier[self] . identifier[ax] . identifier[grid] ( identifier[grid] , identifier[which] = literal[string] , identifier[ls] = literal[string] , identifier[color] = literal[string] )
identifier[self] . identifier[ax] . identifier[grid] ( identifier[grid] , identifier[which] = literal[string] , identifier[ls] = literal[string] , identifier[color] = literal[string] ) | def plot(self, atDataset, errorbars=False, grid=False):
""" use matplotlib methods for plotting
Parameters
----------
atDataset : allantools.Dataset()
a dataset with computed data
errorbars : boolean
Plot errorbars. Defaults to False
grid : boolean
Plot grid. Defaults to False
"""
if errorbars:
self.ax.errorbar(atDataset.out['taus'], atDataset.out['stat'], yerr=atDataset.out['stat_err']) # depends on [control=['if'], data=[]]
else:
self.ax.plot(atDataset.out['taus'], atDataset.out['stat'])
self.ax.set_xlabel('Tau')
self.ax.set_ylabel(atDataset.out['stat_id'])
self.ax.grid(grid, which='minor', ls='-', color='0.65')
self.ax.grid(grid, which='major', ls='-', color='0.25') |
def _handle_weekly_repeat_out(self):
"""
Handles repeating an event weekly (or biweekly) if the current
year and month are outside of its start year and month.
It takes care of cases 3 and 4 in _handle_weekly_repeat_in() comments.
"""
start_d = _first_weekday(
self.event.l_start_date.weekday(), date(self.year, self.month, 1)
)
self.day = start_d.day
self.count_first = True
if self.event.repeats('BIWEEKLY'):
self._biweekly_helper()
elif self.event.repeats('WEEKLY'):
# Note count_first=True b/c although the start date isn't this
# month, the event does begin repeating this month and start_date
# has not yet been counted.
# Also note we start from start_d.day and not
# event.l_start_date.day
self.repeat()
if self.event.is_chunk():
diff = self.event.start_end_diff
self.count = _chunk_fill_out_first_week(
self.year, self.month, self.count, self.event, diff
)
for i in xrange(diff):
# count the chunk days, then repeat them
self.day = start_d.day + i + 1
self.repeat() | def function[_handle_weekly_repeat_out, parameter[self]]:
constant[
Handles repeating an event weekly (or biweekly) if the current
year and month are outside of its start year and month.
It takes care of cases 3 and 4 in _handle_weekly_repeat_in() comments.
]
variable[start_d] assign[=] call[name[_first_weekday], parameter[call[name[self].event.l_start_date.weekday, parameter[]], call[name[date], parameter[name[self].year, name[self].month, constant[1]]]]]
name[self].day assign[=] name[start_d].day
name[self].count_first assign[=] constant[True]
if call[name[self].event.repeats, parameter[constant[BIWEEKLY]]] begin[:]
call[name[self]._biweekly_helper, parameter[]] | keyword[def] identifier[_handle_weekly_repeat_out] ( identifier[self] ):
literal[string]
identifier[start_d] = identifier[_first_weekday] (
identifier[self] . identifier[event] . identifier[l_start_date] . identifier[weekday] (), identifier[date] ( identifier[self] . identifier[year] , identifier[self] . identifier[month] , literal[int] )
)
identifier[self] . identifier[day] = identifier[start_d] . identifier[day]
identifier[self] . identifier[count_first] = keyword[True]
keyword[if] identifier[self] . identifier[event] . identifier[repeats] ( literal[string] ):
identifier[self] . identifier[_biweekly_helper] ()
keyword[elif] identifier[self] . identifier[event] . identifier[repeats] ( literal[string] ):
identifier[self] . identifier[repeat] ()
keyword[if] identifier[self] . identifier[event] . identifier[is_chunk] ():
identifier[diff] = identifier[self] . identifier[event] . identifier[start_end_diff]
identifier[self] . identifier[count] = identifier[_chunk_fill_out_first_week] (
identifier[self] . identifier[year] , identifier[self] . identifier[month] , identifier[self] . identifier[count] , identifier[self] . identifier[event] , identifier[diff]
)
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[diff] ):
identifier[self] . identifier[day] = identifier[start_d] . identifier[day] + identifier[i] + literal[int]
identifier[self] . identifier[repeat] () | def _handle_weekly_repeat_out(self):
"""
Handles repeating an event weekly (or biweekly) if the current
year and month are outside of its start year and month.
It takes care of cases 3 and 4 in _handle_weekly_repeat_in() comments.
"""
start_d = _first_weekday(self.event.l_start_date.weekday(), date(self.year, self.month, 1))
self.day = start_d.day
self.count_first = True
if self.event.repeats('BIWEEKLY'):
self._biweekly_helper() # depends on [control=['if'], data=[]]
elif self.event.repeats('WEEKLY'):
# Note count_first=True b/c although the start date isn't this
# month, the event does begin repeating this month and start_date
# has not yet been counted.
# Also note we start from start_d.day and not
# event.l_start_date.day
self.repeat()
if self.event.is_chunk():
diff = self.event.start_end_diff
self.count = _chunk_fill_out_first_week(self.year, self.month, self.count, self.event, diff)
for i in xrange(diff):
# count the chunk days, then repeat them
self.day = start_d.day + i + 1
self.repeat() # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def show_graph(cn_topo, showintfs=False, showaddrs=False):
'''
Display the topology
'''
__do_draw(cn_topo, showintfs=showintfs, showaddrs=showaddrs)
pyp.show() | def function[show_graph, parameter[cn_topo, showintfs, showaddrs]]:
constant[
Display the topology
]
call[name[__do_draw], parameter[name[cn_topo]]]
call[name[pyp].show, parameter[]] | keyword[def] identifier[show_graph] ( identifier[cn_topo] , identifier[showintfs] = keyword[False] , identifier[showaddrs] = keyword[False] ):
literal[string]
identifier[__do_draw] ( identifier[cn_topo] , identifier[showintfs] = identifier[showintfs] , identifier[showaddrs] = identifier[showaddrs] )
identifier[pyp] . identifier[show] () | def show_graph(cn_topo, showintfs=False, showaddrs=False):
"""
Display the topology
"""
__do_draw(cn_topo, showintfs=showintfs, showaddrs=showaddrs)
pyp.show() |
def get_frames(tback, is_breakpoint):
"""Builds a list of ErrorFrame objects from a traceback"""
frames = []
while tback is not None:
if tback.tb_next is None and is_breakpoint:
break
filename = tback.tb_frame.f_code.co_filename
function = tback.tb_frame.f_code.co_name
context = tback.tb_frame.f_locals
lineno = tback.tb_lineno - 1
tback_id = id(tback)
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno + 1, 7)
frames.append(ErrorFrame(tback, filename, function, lineno, context, tback_id, pre_context, context_line, post_context, pre_context_lineno))
tback = tback.tb_next
return frames | def function[get_frames, parameter[tback, is_breakpoint]]:
constant[Builds a list of ErrorFrame objects from a traceback]
variable[frames] assign[=] list[[]]
while compare[name[tback] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b0b81810> begin[:]
break
variable[filename] assign[=] name[tback].tb_frame.f_code.co_filename
variable[function] assign[=] name[tback].tb_frame.f_code.co_name
variable[context] assign[=] name[tback].tb_frame.f_locals
variable[lineno] assign[=] binary_operation[name[tback].tb_lineno - constant[1]]
variable[tback_id] assign[=] call[name[id], parameter[name[tback]]]
<ast.Tuple object at 0x7da1b0ba95d0> assign[=] call[name[get_lines_from_file], parameter[name[filename], binary_operation[name[lineno] + constant[1]], constant[7]]]
call[name[frames].append, parameter[call[name[ErrorFrame], parameter[name[tback], name[filename], name[function], name[lineno], name[context], name[tback_id], name[pre_context], name[context_line], name[post_context], name[pre_context_lineno]]]]]
variable[tback] assign[=] name[tback].tb_next
return[name[frames]] | keyword[def] identifier[get_frames] ( identifier[tback] , identifier[is_breakpoint] ):
literal[string]
identifier[frames] =[]
keyword[while] identifier[tback] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[tback] . identifier[tb_next] keyword[is] keyword[None] keyword[and] identifier[is_breakpoint] :
keyword[break]
identifier[filename] = identifier[tback] . identifier[tb_frame] . identifier[f_code] . identifier[co_filename]
identifier[function] = identifier[tback] . identifier[tb_frame] . identifier[f_code] . identifier[co_name]
identifier[context] = identifier[tback] . identifier[tb_frame] . identifier[f_locals]
identifier[lineno] = identifier[tback] . identifier[tb_lineno] - literal[int]
identifier[tback_id] = identifier[id] ( identifier[tback] )
identifier[pre_context_lineno] , identifier[pre_context] , identifier[context_line] , identifier[post_context] = identifier[get_lines_from_file] ( identifier[filename] , identifier[lineno] + literal[int] , literal[int] )
identifier[frames] . identifier[append] ( identifier[ErrorFrame] ( identifier[tback] , identifier[filename] , identifier[function] , identifier[lineno] , identifier[context] , identifier[tback_id] , identifier[pre_context] , identifier[context_line] , identifier[post_context] , identifier[pre_context_lineno] ))
identifier[tback] = identifier[tback] . identifier[tb_next]
keyword[return] identifier[frames] | def get_frames(tback, is_breakpoint):
"""Builds a list of ErrorFrame objects from a traceback"""
frames = []
while tback is not None:
if tback.tb_next is None and is_breakpoint:
break # depends on [control=['if'], data=[]]
filename = tback.tb_frame.f_code.co_filename
function = tback.tb_frame.f_code.co_name
context = tback.tb_frame.f_locals
lineno = tback.tb_lineno - 1
tback_id = id(tback)
(pre_context_lineno, pre_context, context_line, post_context) = get_lines_from_file(filename, lineno + 1, 7)
frames.append(ErrorFrame(tback, filename, function, lineno, context, tback_id, pre_context, context_line, post_context, pre_context_lineno))
tback = tback.tb_next # depends on [control=['while'], data=['tback']]
return frames |
def results(self, use_cache=True, dialect=None, billing_tier=None):
"""Retrieves table of results for the query. May block if the query must be executed first.
Args:
use_cache: whether to use cached results or not. Ignored if append is specified.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
"""
if not use_cache or (self._results is None):
self.execute(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)
return self._results.results | def function[results, parameter[self, use_cache, dialect, billing_tier]]:
constant[Retrieves table of results for the query. May block if the query must be executed first.
Args:
use_cache: whether to use cached results or not. Ignored if append is specified.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
]
if <ast.BoolOp object at 0x7da18bc70b50> begin[:]
call[name[self].execute, parameter[]]
return[name[self]._results.results] | keyword[def] identifier[results] ( identifier[self] , identifier[use_cache] = keyword[True] , identifier[dialect] = keyword[None] , identifier[billing_tier] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[use_cache] keyword[or] ( identifier[self] . identifier[_results] keyword[is] keyword[None] ):
identifier[self] . identifier[execute] ( identifier[use_cache] = identifier[use_cache] , identifier[dialect] = identifier[dialect] , identifier[billing_tier] = identifier[billing_tier] )
keyword[return] identifier[self] . identifier[_results] . identifier[results] | def results(self, use_cache=True, dialect=None, billing_tier=None):
"""Retrieves table of results for the query. May block if the query must be executed first.
Args:
use_cache: whether to use cached results or not. Ignored if append is specified.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
"""
if not use_cache or self._results is None:
self.execute(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier) # depends on [control=['if'], data=[]]
return self._results.results |
def set_weights(params, new_params):
"""
Copies parameters from new_params to params
:param params: dst parameters
:param new_params: src parameters
"""
for param, new_param in zip(params, new_params):
param.data.copy_(new_param.data) | def function[set_weights, parameter[params, new_params]]:
constant[
Copies parameters from new_params to params
:param params: dst parameters
:param new_params: src parameters
]
for taget[tuple[[<ast.Name object at 0x7da1b1b01630>, <ast.Name object at 0x7da1b1b02aa0>]]] in starred[call[name[zip], parameter[name[params], name[new_params]]]] begin[:]
call[name[param].data.copy_, parameter[name[new_param].data]] | keyword[def] identifier[set_weights] ( identifier[params] , identifier[new_params] ):
literal[string]
keyword[for] identifier[param] , identifier[new_param] keyword[in] identifier[zip] ( identifier[params] , identifier[new_params] ):
identifier[param] . identifier[data] . identifier[copy_] ( identifier[new_param] . identifier[data] ) | def set_weights(params, new_params):
"""
Copies parameters from new_params to params
:param params: dst parameters
:param new_params: src parameters
"""
for (param, new_param) in zip(params, new_params):
param.data.copy_(new_param.data) # depends on [control=['for'], data=[]] |
def trigger_event(event=None, **kwargs):
'''
Trigger a configured event in IFTTT.
:param event: The name of the event to trigger.
:return: A dictionary with status, text, and error if result was failure.
'''
res = {'result': False, 'message': 'Something went wrong'}
data = {}
for value in ('value1', 'value2', 'value3',
'Value1', 'Value2', 'Value3'):
if value in kwargs:
data[value.lower()] = kwargs[value]
data['occurredat'] = time.strftime("%B %d, %Y %I:%M%p", time.localtime())
result = _query(event=event,
method='POST',
data=salt.utils.json.dumps(data)
)
if 'status' in result:
if result['status'] == 200:
res['result'] = True
res['message'] = result['text']
else:
if 'error' in result:
res['message'] = result['error']
return res | def function[trigger_event, parameter[event]]:
constant[
Trigger a configured event in IFTTT.
:param event: The name of the event to trigger.
:return: A dictionary with status, text, and error if result was failure.
]
variable[res] assign[=] dictionary[[<ast.Constant object at 0x7da1b21be650>, <ast.Constant object at 0x7da1b21bc730>], [<ast.Constant object at 0x7da1b21bd8a0>, <ast.Constant object at 0x7da1b21bdea0>]]
variable[data] assign[=] dictionary[[], []]
for taget[name[value]] in starred[tuple[[<ast.Constant object at 0x7da1b21be7d0>, <ast.Constant object at 0x7da1b21bdf60>, <ast.Constant object at 0x7da1b21bcf10>, <ast.Constant object at 0x7da1b21bdb10>, <ast.Constant object at 0x7da1b21bf340>, <ast.Constant object at 0x7da1b21bcf70>]]] begin[:]
if compare[name[value] in name[kwargs]] begin[:]
call[name[data]][call[name[value].lower, parameter[]]] assign[=] call[name[kwargs]][name[value]]
call[name[data]][constant[occurredat]] assign[=] call[name[time].strftime, parameter[constant[%B %d, %Y %I:%M%p], call[name[time].localtime, parameter[]]]]
variable[result] assign[=] call[name[_query], parameter[]]
if compare[constant[status] in name[result]] begin[:]
if compare[call[name[result]][constant[status]] equal[==] constant[200]] begin[:]
call[name[res]][constant[result]] assign[=] constant[True]
call[name[res]][constant[message]] assign[=] call[name[result]][constant[text]]
return[name[res]] | keyword[def] identifier[trigger_event] ( identifier[event] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[res] ={ literal[string] : keyword[False] , literal[string] : literal[string] }
identifier[data] ={}
keyword[for] identifier[value] keyword[in] ( literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ):
keyword[if] identifier[value] keyword[in] identifier[kwargs] :
identifier[data] [ identifier[value] . identifier[lower] ()]= identifier[kwargs] [ identifier[value] ]
identifier[data] [ literal[string] ]= identifier[time] . identifier[strftime] ( literal[string] , identifier[time] . identifier[localtime] ())
identifier[result] = identifier[_query] ( identifier[event] = identifier[event] ,
identifier[method] = literal[string] ,
identifier[data] = identifier[salt] . identifier[utils] . identifier[json] . identifier[dumps] ( identifier[data] )
)
keyword[if] literal[string] keyword[in] identifier[result] :
keyword[if] identifier[result] [ literal[string] ]== literal[int] :
identifier[res] [ literal[string] ]= keyword[True]
identifier[res] [ literal[string] ]= identifier[result] [ literal[string] ]
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[result] :
identifier[res] [ literal[string] ]= identifier[result] [ literal[string] ]
keyword[return] identifier[res] | def trigger_event(event=None, **kwargs):
"""
Trigger a configured event in IFTTT.
:param event: The name of the event to trigger.
:return: A dictionary with status, text, and error if result was failure.
"""
res = {'result': False, 'message': 'Something went wrong'}
data = {}
for value in ('value1', 'value2', 'value3', 'Value1', 'Value2', 'Value3'):
if value in kwargs:
data[value.lower()] = kwargs[value] # depends on [control=['if'], data=['value', 'kwargs']] # depends on [control=['for'], data=['value']]
data['occurredat'] = time.strftime('%B %d, %Y %I:%M%p', time.localtime())
result = _query(event=event, method='POST', data=salt.utils.json.dumps(data))
if 'status' in result:
if result['status'] == 200:
res['result'] = True
res['message'] = result['text'] # depends on [control=['if'], data=[]]
elif 'error' in result:
res['message'] = result['error'] # depends on [control=['if'], data=['result']] # depends on [control=['if'], data=['result']]
return res |
def get_file_ids(self, file_archive, creator=None, status=FileStatus.no_file):
"""Fill the file id arrays from the file lists
Parameters
----------
file_archive : `FileArchive`
Used to look up file ids
creator : int
A unique key for the job that created these file
status : `FileStatus`
Enumeration giving current status thse files
"""
file_dict = copy.deepcopy(self.file_dict)
if self.sub_file_dict is not None:
file_dict.update(self.sub_file_dict.file_dict)
infiles = file_dict.input_files
outfiles = file_dict.output_files
rmfiles = file_dict.temp_files
int_files = file_dict.internal_files
if self.infile_ids is None:
if infiles is not None:
self.infile_ids = np.zeros((len(infiles)), int)
filelist = file_archive.get_file_ids(
infiles, creator, FileStatus.expected, file_dict)
JobDetails._fill_array_from_list(filelist, self.infile_ids)
else:
self.infile_ids = np.zeros((0), int)
if self.outfile_ids is None:
if outfiles is not None:
self.outfile_ids = np.zeros((len(outfiles)), int)
filelist = file_archive.get_file_ids(
outfiles, creator, status, file_dict)
JobDetails._fill_array_from_list(filelist, self.outfile_ids)
else:
self.outfile_ids = np.zeros((0), int)
if self.rmfile_ids is None:
if rmfiles is not None:
self.rmfile_ids = np.zeros((len(rmfiles)), int)
filelist = file_archive.get_file_ids(rmfiles)
JobDetails._fill_array_from_list(filelist, self.rmfile_ids)
else:
self.rmfile_ids = np.zeros((0), int)
if self.intfile_ids is None:
if int_files is not None:
self.intfile_ids = np.zeros((len(int_files)), int)
filelist = file_archive.get_file_ids(
int_files, creator, status)
JobDetails._fill_array_from_list(filelist, self.intfile_ids)
else:
self.intfile_ids = np.zeros((0), int) | def function[get_file_ids, parameter[self, file_archive, creator, status]]:
constant[Fill the file id arrays from the file lists
Parameters
----------
file_archive : `FileArchive`
Used to look up file ids
creator : int
A unique key for the job that created these file
status : `FileStatus`
Enumeration giving current status thse files
]
variable[file_dict] assign[=] call[name[copy].deepcopy, parameter[name[self].file_dict]]
if compare[name[self].sub_file_dict is_not constant[None]] begin[:]
call[name[file_dict].update, parameter[name[self].sub_file_dict.file_dict]]
variable[infiles] assign[=] name[file_dict].input_files
variable[outfiles] assign[=] name[file_dict].output_files
variable[rmfiles] assign[=] name[file_dict].temp_files
variable[int_files] assign[=] name[file_dict].internal_files
if compare[name[self].infile_ids is constant[None]] begin[:]
if compare[name[infiles] is_not constant[None]] begin[:]
name[self].infile_ids assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[infiles]]], name[int]]]
variable[filelist] assign[=] call[name[file_archive].get_file_ids, parameter[name[infiles], name[creator], name[FileStatus].expected, name[file_dict]]]
call[name[JobDetails]._fill_array_from_list, parameter[name[filelist], name[self].infile_ids]]
if compare[name[self].outfile_ids is constant[None]] begin[:]
if compare[name[outfiles] is_not constant[None]] begin[:]
name[self].outfile_ids assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[outfiles]]], name[int]]]
variable[filelist] assign[=] call[name[file_archive].get_file_ids, parameter[name[outfiles], name[creator], name[status], name[file_dict]]]
call[name[JobDetails]._fill_array_from_list, parameter[name[filelist], name[self].outfile_ids]]
if compare[name[self].rmfile_ids is constant[None]] begin[:]
if compare[name[rmfiles] is_not constant[None]] begin[:]
name[self].rmfile_ids assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[rmfiles]]], name[int]]]
variable[filelist] assign[=] call[name[file_archive].get_file_ids, parameter[name[rmfiles]]]
call[name[JobDetails]._fill_array_from_list, parameter[name[filelist], name[self].rmfile_ids]]
if compare[name[self].intfile_ids is constant[None]] begin[:]
if compare[name[int_files] is_not constant[None]] begin[:]
name[self].intfile_ids assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[int_files]]], name[int]]]
variable[filelist] assign[=] call[name[file_archive].get_file_ids, parameter[name[int_files], name[creator], name[status]]]
call[name[JobDetails]._fill_array_from_list, parameter[name[filelist], name[self].intfile_ids]] | keyword[def] identifier[get_file_ids] ( identifier[self] , identifier[file_archive] , identifier[creator] = keyword[None] , identifier[status] = identifier[FileStatus] . identifier[no_file] ):
literal[string]
identifier[file_dict] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[file_dict] )
keyword[if] identifier[self] . identifier[sub_file_dict] keyword[is] keyword[not] keyword[None] :
identifier[file_dict] . identifier[update] ( identifier[self] . identifier[sub_file_dict] . identifier[file_dict] )
identifier[infiles] = identifier[file_dict] . identifier[input_files]
identifier[outfiles] = identifier[file_dict] . identifier[output_files]
identifier[rmfiles] = identifier[file_dict] . identifier[temp_files]
identifier[int_files] = identifier[file_dict] . identifier[internal_files]
keyword[if] identifier[self] . identifier[infile_ids] keyword[is] keyword[None] :
keyword[if] identifier[infiles] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[infile_ids] = identifier[np] . identifier[zeros] (( identifier[len] ( identifier[infiles] )), identifier[int] )
identifier[filelist] = identifier[file_archive] . identifier[get_file_ids] (
identifier[infiles] , identifier[creator] , identifier[FileStatus] . identifier[expected] , identifier[file_dict] )
identifier[JobDetails] . identifier[_fill_array_from_list] ( identifier[filelist] , identifier[self] . identifier[infile_ids] )
keyword[else] :
identifier[self] . identifier[infile_ids] = identifier[np] . identifier[zeros] (( literal[int] ), identifier[int] )
keyword[if] identifier[self] . identifier[outfile_ids] keyword[is] keyword[None] :
keyword[if] identifier[outfiles] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[outfile_ids] = identifier[np] . identifier[zeros] (( identifier[len] ( identifier[outfiles] )), identifier[int] )
identifier[filelist] = identifier[file_archive] . identifier[get_file_ids] (
identifier[outfiles] , identifier[creator] , identifier[status] , identifier[file_dict] )
identifier[JobDetails] . identifier[_fill_array_from_list] ( identifier[filelist] , identifier[self] . identifier[outfile_ids] )
keyword[else] :
identifier[self] . identifier[outfile_ids] = identifier[np] . identifier[zeros] (( literal[int] ), identifier[int] )
keyword[if] identifier[self] . identifier[rmfile_ids] keyword[is] keyword[None] :
keyword[if] identifier[rmfiles] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[rmfile_ids] = identifier[np] . identifier[zeros] (( identifier[len] ( identifier[rmfiles] )), identifier[int] )
identifier[filelist] = identifier[file_archive] . identifier[get_file_ids] ( identifier[rmfiles] )
identifier[JobDetails] . identifier[_fill_array_from_list] ( identifier[filelist] , identifier[self] . identifier[rmfile_ids] )
keyword[else] :
identifier[self] . identifier[rmfile_ids] = identifier[np] . identifier[zeros] (( literal[int] ), identifier[int] )
keyword[if] identifier[self] . identifier[intfile_ids] keyword[is] keyword[None] :
keyword[if] identifier[int_files] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[intfile_ids] = identifier[np] . identifier[zeros] (( identifier[len] ( identifier[int_files] )), identifier[int] )
identifier[filelist] = identifier[file_archive] . identifier[get_file_ids] (
identifier[int_files] , identifier[creator] , identifier[status] )
identifier[JobDetails] . identifier[_fill_array_from_list] ( identifier[filelist] , identifier[self] . identifier[intfile_ids] )
keyword[else] :
identifier[self] . identifier[intfile_ids] = identifier[np] . identifier[zeros] (( literal[int] ), identifier[int] ) | def get_file_ids(self, file_archive, creator=None, status=FileStatus.no_file):
"""Fill the file id arrays from the file lists
Parameters
----------
file_archive : `FileArchive`
Used to look up file ids
creator : int
A unique key for the job that created these file
status : `FileStatus`
Enumeration giving current status thse files
"""
file_dict = copy.deepcopy(self.file_dict)
if self.sub_file_dict is not None:
file_dict.update(self.sub_file_dict.file_dict) # depends on [control=['if'], data=[]]
infiles = file_dict.input_files
outfiles = file_dict.output_files
rmfiles = file_dict.temp_files
int_files = file_dict.internal_files
if self.infile_ids is None:
if infiles is not None:
self.infile_ids = np.zeros(len(infiles), int)
filelist = file_archive.get_file_ids(infiles, creator, FileStatus.expected, file_dict)
JobDetails._fill_array_from_list(filelist, self.infile_ids) # depends on [control=['if'], data=['infiles']]
else:
self.infile_ids = np.zeros(0, int) # depends on [control=['if'], data=[]]
if self.outfile_ids is None:
if outfiles is not None:
self.outfile_ids = np.zeros(len(outfiles), int)
filelist = file_archive.get_file_ids(outfiles, creator, status, file_dict)
JobDetails._fill_array_from_list(filelist, self.outfile_ids) # depends on [control=['if'], data=['outfiles']]
else:
self.outfile_ids = np.zeros(0, int) # depends on [control=['if'], data=[]]
if self.rmfile_ids is None:
if rmfiles is not None:
self.rmfile_ids = np.zeros(len(rmfiles), int)
filelist = file_archive.get_file_ids(rmfiles)
JobDetails._fill_array_from_list(filelist, self.rmfile_ids) # depends on [control=['if'], data=['rmfiles']]
else:
self.rmfile_ids = np.zeros(0, int) # depends on [control=['if'], data=[]]
if self.intfile_ids is None:
if int_files is not None:
self.intfile_ids = np.zeros(len(int_files), int)
filelist = file_archive.get_file_ids(int_files, creator, status)
JobDetails._fill_array_from_list(filelist, self.intfile_ids) # depends on [control=['if'], data=['int_files']]
else:
self.intfile_ids = np.zeros(0, int) # depends on [control=['if'], data=[]] |
def check_payment_v2(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id):
"""
Verify that for a version-2 namespace (burn-to-creator), the nameop paid the right amount of BTC or Stacks.
It can pay either through a preorder (for registers), or directly (for renewals)
Return {'status': True, 'tokens_paid': ..., 'token_units': ...} if so
Return {'status': False} if not.
"""
# priced in BTC only if the namespace creator can receive name fees.
# once the namespace switches over to burning, then the name creator can pay in Stacks as well.
assert name_fee is not None
assert isinstance(name_fee, (int,long))
epoch_features = get_epoch_features(block_id)
name = nameop['name']
namespace_id = get_namespace_from_name(name)
name_without_namespace = get_name_from_fq_name(name)
namespace = state_engine.get_namespace( namespace_id )
assert namespace['version'] == NAMESPACE_VERSION_PAY_TO_CREATOR
# need to be in the right epoch--i.e. pay-to-creator needs to be a feature
if EPOCH_FEATURE_NAMESPACE_BURN_TO_CREATOR not in epoch_features:
log.warning("Name '{}' was created in namespace '{}', with cversion bits 0x{:x}, which is not supported in this epoch".format(name, namespace['namespace_id'], namespace['version']))
return {'status': False}
# check burn address
receive_fees_period = get_epoch_namespace_receive_fees_period(block_id, namespace['namespace_id'])
expected_burn_address = None
tokens_allowed = None
# can only burn to namespace if the namespace is young enough (starts counting from NAMESPACE_REVEAL)
# can only pay in tokens if the register takes place after the pay-to-creator period (receive_fees_period) expires
if namespace['reveal_block'] + receive_fees_period >= block_id:
log.debug("Register must pay to v2 namespace address {}".format(namespace['address']))
expected_burn_address = namespace['address']
tokens_allowed = False
else:
log.debug("Register must pay to burn address {}".format(BLOCKSTACK_BURN_ADDRESS))
expected_burn_address = BLOCKSTACK_BURN_ADDRESS
tokens_allowed = True
if burn_address != expected_burn_address:
log.warning("Buyer of {} used the wrong burn address ({}): expected {}".format(name, burn_address, expected_burn_address))
return {'status': False}
# allowed to pay in Stacks?
if EPOCH_FEATURE_NAMEOPS_COST_TOKENS in epoch_features:
# did we pay any stacks?
res = get_stacks_payment(state_engine, nameop, state_op_type)
if res['status']:
# paid something in Stacks. Will ignore BTC.
if not tokens_allowed:
log.warning('Buyer of {} paid in Stacks, but should have paid in BTC to the namespace creator'.format(name))
return {'status': False}
res = check_payment_in_stacks(state_engine, nameop, state_op_type, fee_block_id)
if not res['status']:
log.warning("Buyer of {} paid in Stacks, but did not pay enough".format(name))
return {'status': False}
tokens_paid = res['tokens_paid']
token_units = res['token_units']
return {'status': True, 'tokens_paid': tokens_paid, 'token_units': token_units}
# did not pay in stacks tokens, or this isn't allowed yet
btc_price = price_name(name_without_namespace, namespace, fee_block_id) # price reflects namespace version
# fee must be high enough (either the preorder paid the right fee at the preorder block height,
# or the renewal paid the right fee at the renewal height)
if name_fee < btc_price:
log.warning("Name '%s' costs %s satoshis, but paid %s satoshis" % (name, btc_price, name_fee))
return {'status': False}
log.debug('Paid {} satoshis for {} to {}'.format(name_fee, name, burn_address))
return {'status': True, 'tokens_paid': name_fee, 'token_units': 'BTC'} | def function[check_payment_v2, parameter[state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id]]:
constant[
Verify that for a version-2 namespace (burn-to-creator), the nameop paid the right amount of BTC or Stacks.
It can pay either through a preorder (for registers), or directly (for renewals)
Return {'status': True, 'tokens_paid': ..., 'token_units': ...} if so
Return {'status': False} if not.
]
assert[compare[name[name_fee] is_not constant[None]]]
assert[call[name[isinstance], parameter[name[name_fee], tuple[[<ast.Name object at 0x7da2043446d0>, <ast.Name object at 0x7da204346860>]]]]]
variable[epoch_features] assign[=] call[name[get_epoch_features], parameter[name[block_id]]]
variable[name] assign[=] call[name[nameop]][constant[name]]
variable[namespace_id] assign[=] call[name[get_namespace_from_name], parameter[name[name]]]
variable[name_without_namespace] assign[=] call[name[get_name_from_fq_name], parameter[name[name]]]
variable[namespace] assign[=] call[name[state_engine].get_namespace, parameter[name[namespace_id]]]
assert[compare[call[name[namespace]][constant[version]] equal[==] name[NAMESPACE_VERSION_PAY_TO_CREATOR]]]
if compare[name[EPOCH_FEATURE_NAMESPACE_BURN_TO_CREATOR] <ast.NotIn object at 0x7da2590d7190> name[epoch_features]] begin[:]
call[name[log].warning, parameter[call[constant[Name '{}' was created in namespace '{}', with cversion bits 0x{:x}, which is not supported in this epoch].format, parameter[name[name], call[name[namespace]][constant[namespace_id]], call[name[namespace]][constant[version]]]]]]
return[dictionary[[<ast.Constant object at 0x7da204346110>], [<ast.Constant object at 0x7da204346bc0>]]]
variable[receive_fees_period] assign[=] call[name[get_epoch_namespace_receive_fees_period], parameter[name[block_id], call[name[namespace]][constant[namespace_id]]]]
variable[expected_burn_address] assign[=] constant[None]
variable[tokens_allowed] assign[=] constant[None]
if compare[binary_operation[call[name[namespace]][constant[reveal_block]] + name[receive_fees_period]] greater_or_equal[>=] name[block_id]] begin[:]
call[name[log].debug, parameter[call[constant[Register must pay to v2 namespace address {}].format, parameter[call[name[namespace]][constant[address]]]]]]
variable[expected_burn_address] assign[=] call[name[namespace]][constant[address]]
variable[tokens_allowed] assign[=] constant[False]
if compare[name[burn_address] not_equal[!=] name[expected_burn_address]] begin[:]
call[name[log].warning, parameter[call[constant[Buyer of {} used the wrong burn address ({}): expected {}].format, parameter[name[name], name[burn_address], name[expected_burn_address]]]]]
return[dictionary[[<ast.Constant object at 0x7da204344d60>], [<ast.Constant object at 0x7da2043445b0>]]]
if compare[name[EPOCH_FEATURE_NAMEOPS_COST_TOKENS] in name[epoch_features]] begin[:]
variable[res] assign[=] call[name[get_stacks_payment], parameter[name[state_engine], name[nameop], name[state_op_type]]]
if call[name[res]][constant[status]] begin[:]
if <ast.UnaryOp object at 0x7da1b16a9bd0> begin[:]
call[name[log].warning, parameter[call[constant[Buyer of {} paid in Stacks, but should have paid in BTC to the namespace creator].format, parameter[name[name]]]]]
return[dictionary[[<ast.Constant object at 0x7da1b16a89d0>], [<ast.Constant object at 0x7da1b16a8bb0>]]]
variable[res] assign[=] call[name[check_payment_in_stacks], parameter[name[state_engine], name[nameop], name[state_op_type], name[fee_block_id]]]
if <ast.UnaryOp object at 0x7da1b16a9c60> begin[:]
call[name[log].warning, parameter[call[constant[Buyer of {} paid in Stacks, but did not pay enough].format, parameter[name[name]]]]]
return[dictionary[[<ast.Constant object at 0x7da1b1721240>], [<ast.Constant object at 0x7da18f00eef0>]]]
variable[tokens_paid] assign[=] call[name[res]][constant[tokens_paid]]
variable[token_units] assign[=] call[name[res]][constant[token_units]]
return[dictionary[[<ast.Constant object at 0x7da18f00d240>, <ast.Constant object at 0x7da18f00feb0>, <ast.Constant object at 0x7da18f00db10>], [<ast.Constant object at 0x7da18f00c970>, <ast.Name object at 0x7da18f00e980>, <ast.Name object at 0x7da18f00db70>]]]
variable[btc_price] assign[=] call[name[price_name], parameter[name[name_without_namespace], name[namespace], name[fee_block_id]]]
if compare[name[name_fee] less[<] name[btc_price]] begin[:]
call[name[log].warning, parameter[binary_operation[constant[Name '%s' costs %s satoshis, but paid %s satoshis] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00fc40>, <ast.Name object at 0x7da18f00e020>, <ast.Name object at 0x7da18f00cdc0>]]]]]
return[dictionary[[<ast.Constant object at 0x7da18f00e680>], [<ast.Constant object at 0x7da18f00ec50>]]]
call[name[log].debug, parameter[call[constant[Paid {} satoshis for {} to {}].format, parameter[name[name_fee], name[name], name[burn_address]]]]]
return[dictionary[[<ast.Constant object at 0x7da18f00d5a0>, <ast.Constant object at 0x7da18f00cdf0>, <ast.Constant object at 0x7da18f00c280>], [<ast.Constant object at 0x7da18f00f400>, <ast.Name object at 0x7da18f00ef80>, <ast.Constant object at 0x7da18f00da50>]]] | keyword[def] identifier[check_payment_v2] ( identifier[state_engine] , identifier[state_op_type] , identifier[nameop] , identifier[fee_block_id] , identifier[token_address] , identifier[burn_address] , identifier[name_fee] , identifier[block_id] ):
literal[string]
keyword[assert] identifier[name_fee] keyword[is] keyword[not] keyword[None]
keyword[assert] identifier[isinstance] ( identifier[name_fee] ,( identifier[int] , identifier[long] ))
identifier[epoch_features] = identifier[get_epoch_features] ( identifier[block_id] )
identifier[name] = identifier[nameop] [ literal[string] ]
identifier[namespace_id] = identifier[get_namespace_from_name] ( identifier[name] )
identifier[name_without_namespace] = identifier[get_name_from_fq_name] ( identifier[name] )
identifier[namespace] = identifier[state_engine] . identifier[get_namespace] ( identifier[namespace_id] )
keyword[assert] identifier[namespace] [ literal[string] ]== identifier[NAMESPACE_VERSION_PAY_TO_CREATOR]
keyword[if] identifier[EPOCH_FEATURE_NAMESPACE_BURN_TO_CREATOR] keyword[not] keyword[in] identifier[epoch_features] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[name] , identifier[namespace] [ literal[string] ], identifier[namespace] [ literal[string] ]))
keyword[return] { literal[string] : keyword[False] }
identifier[receive_fees_period] = identifier[get_epoch_namespace_receive_fees_period] ( identifier[block_id] , identifier[namespace] [ literal[string] ])
identifier[expected_burn_address] = keyword[None]
identifier[tokens_allowed] = keyword[None]
keyword[if] identifier[namespace] [ literal[string] ]+ identifier[receive_fees_period] >= identifier[block_id] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[namespace] [ literal[string] ]))
identifier[expected_burn_address] = identifier[namespace] [ literal[string] ]
identifier[tokens_allowed] = keyword[False]
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[BLOCKSTACK_BURN_ADDRESS] ))
identifier[expected_burn_address] = identifier[BLOCKSTACK_BURN_ADDRESS]
identifier[tokens_allowed] = keyword[True]
keyword[if] identifier[burn_address] != identifier[expected_burn_address] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[name] , identifier[burn_address] , identifier[expected_burn_address] ))
keyword[return] { literal[string] : keyword[False] }
keyword[if] identifier[EPOCH_FEATURE_NAMEOPS_COST_TOKENS] keyword[in] identifier[epoch_features] :
identifier[res] = identifier[get_stacks_payment] ( identifier[state_engine] , identifier[nameop] , identifier[state_op_type] )
keyword[if] identifier[res] [ literal[string] ]:
keyword[if] keyword[not] identifier[tokens_allowed] :
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[return] { literal[string] : keyword[False] }
identifier[res] = identifier[check_payment_in_stacks] ( identifier[state_engine] , identifier[nameop] , identifier[state_op_type] , identifier[fee_block_id] )
keyword[if] keyword[not] identifier[res] [ literal[string] ]:
identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[return] { literal[string] : keyword[False] }
identifier[tokens_paid] = identifier[res] [ literal[string] ]
identifier[token_units] = identifier[res] [ literal[string] ]
keyword[return] { literal[string] : keyword[True] , literal[string] : identifier[tokens_paid] , literal[string] : identifier[token_units] }
identifier[btc_price] = identifier[price_name] ( identifier[name_without_namespace] , identifier[namespace] , identifier[fee_block_id] )
keyword[if] identifier[name_fee] < identifier[btc_price] :
identifier[log] . identifier[warning] ( literal[string] %( identifier[name] , identifier[btc_price] , identifier[name_fee] ))
keyword[return] { literal[string] : keyword[False] }
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[name_fee] , identifier[name] , identifier[burn_address] ))
keyword[return] { literal[string] : keyword[True] , literal[string] : identifier[name_fee] , literal[string] : literal[string] } | def check_payment_v2(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id):
"""
Verify that for a version-2 namespace (burn-to-creator), the nameop paid the right amount of BTC or Stacks.
It can pay either through a preorder (for registers), or directly (for renewals)
Return {'status': True, 'tokens_paid': ..., 'token_units': ...} if so
Return {'status': False} if not.
"""
# priced in BTC only if the namespace creator can receive name fees.
# once the namespace switches over to burning, then the name creator can pay in Stacks as well.
assert name_fee is not None
assert isinstance(name_fee, (int, long))
epoch_features = get_epoch_features(block_id)
name = nameop['name']
namespace_id = get_namespace_from_name(name)
name_without_namespace = get_name_from_fq_name(name)
namespace = state_engine.get_namespace(namespace_id)
assert namespace['version'] == NAMESPACE_VERSION_PAY_TO_CREATOR
# need to be in the right epoch--i.e. pay-to-creator needs to be a feature
if EPOCH_FEATURE_NAMESPACE_BURN_TO_CREATOR not in epoch_features:
log.warning("Name '{}' was created in namespace '{}', with cversion bits 0x{:x}, which is not supported in this epoch".format(name, namespace['namespace_id'], namespace['version']))
return {'status': False} # depends on [control=['if'], data=[]]
# check burn address
receive_fees_period = get_epoch_namespace_receive_fees_period(block_id, namespace['namespace_id'])
expected_burn_address = None
tokens_allowed = None
# can only burn to namespace if the namespace is young enough (starts counting from NAMESPACE_REVEAL)
# can only pay in tokens if the register takes place after the pay-to-creator period (receive_fees_period) expires
if namespace['reveal_block'] + receive_fees_period >= block_id:
log.debug('Register must pay to v2 namespace address {}'.format(namespace['address']))
expected_burn_address = namespace['address']
tokens_allowed = False # depends on [control=['if'], data=[]]
else:
log.debug('Register must pay to burn address {}'.format(BLOCKSTACK_BURN_ADDRESS))
expected_burn_address = BLOCKSTACK_BURN_ADDRESS
tokens_allowed = True
if burn_address != expected_burn_address:
log.warning('Buyer of {} used the wrong burn address ({}): expected {}'.format(name, burn_address, expected_burn_address))
return {'status': False} # depends on [control=['if'], data=['burn_address', 'expected_burn_address']]
# allowed to pay in Stacks?
if EPOCH_FEATURE_NAMEOPS_COST_TOKENS in epoch_features:
# did we pay any stacks?
res = get_stacks_payment(state_engine, nameop, state_op_type)
if res['status']:
# paid something in Stacks. Will ignore BTC.
if not tokens_allowed:
log.warning('Buyer of {} paid in Stacks, but should have paid in BTC to the namespace creator'.format(name))
return {'status': False} # depends on [control=['if'], data=[]]
res = check_payment_in_stacks(state_engine, nameop, state_op_type, fee_block_id)
if not res['status']:
log.warning('Buyer of {} paid in Stacks, but did not pay enough'.format(name))
return {'status': False} # depends on [control=['if'], data=[]]
tokens_paid = res['tokens_paid']
token_units = res['token_units']
return {'status': True, 'tokens_paid': tokens_paid, 'token_units': token_units} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# did not pay in stacks tokens, or this isn't allowed yet
btc_price = price_name(name_without_namespace, namespace, fee_block_id) # price reflects namespace version
# fee must be high enough (either the preorder paid the right fee at the preorder block height,
# or the renewal paid the right fee at the renewal height)
if name_fee < btc_price:
log.warning("Name '%s' costs %s satoshis, but paid %s satoshis" % (name, btc_price, name_fee))
return {'status': False} # depends on [control=['if'], data=['name_fee', 'btc_price']]
log.debug('Paid {} satoshis for {} to {}'.format(name_fee, name, burn_address))
return {'status': True, 'tokens_paid': name_fee, 'token_units': 'BTC'} |
def write(self, data_in, data_out, *args, **kwargs):
"""
:param data_in:
:type data_in: hepconverter.parsers.ParsedData
:param data_out: filelike object
:type data_out: file
:param args:
:param kwargs:
"""
self._get_tables(data_in)
self.file_emulation = False
outputs = []
self._prepare_outputs(data_out, outputs)
output = outputs[0]
for i in xrange(len(self.tables)):
table = self.tables[i]
self._write_table(output, table)
if data_out != output and hasattr(data_out, 'write'):
output.Flush()
output.ReOpen('read')
file_size = output.GetSize()
buff = bytearray(file_size)
output.ReadBuffer(buff, file_size)
data_out.write(buff)
if self.file_emulation:
filename = output.GetName()
output.Close() | def function[write, parameter[self, data_in, data_out]]:
constant[
:param data_in:
:type data_in: hepconverter.parsers.ParsedData
:param data_out: filelike object
:type data_out: file
:param args:
:param kwargs:
]
call[name[self]._get_tables, parameter[name[data_in]]]
name[self].file_emulation assign[=] constant[False]
variable[outputs] assign[=] list[[]]
call[name[self]._prepare_outputs, parameter[name[data_out], name[outputs]]]
variable[output] assign[=] call[name[outputs]][constant[0]]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[self].tables]]]]] begin[:]
variable[table] assign[=] call[name[self].tables][name[i]]
call[name[self]._write_table, parameter[name[output], name[table]]]
if <ast.BoolOp object at 0x7da18f09f280> begin[:]
call[name[output].Flush, parameter[]]
call[name[output].ReOpen, parameter[constant[read]]]
variable[file_size] assign[=] call[name[output].GetSize, parameter[]]
variable[buff] assign[=] call[name[bytearray], parameter[name[file_size]]]
call[name[output].ReadBuffer, parameter[name[buff], name[file_size]]]
call[name[data_out].write, parameter[name[buff]]]
if name[self].file_emulation begin[:]
variable[filename] assign[=] call[name[output].GetName, parameter[]]
call[name[output].Close, parameter[]] | keyword[def] identifier[write] ( identifier[self] , identifier[data_in] , identifier[data_out] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_get_tables] ( identifier[data_in] )
identifier[self] . identifier[file_emulation] = keyword[False]
identifier[outputs] =[]
identifier[self] . identifier[_prepare_outputs] ( identifier[data_out] , identifier[outputs] )
identifier[output] = identifier[outputs] [ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[self] . identifier[tables] )):
identifier[table] = identifier[self] . identifier[tables] [ identifier[i] ]
identifier[self] . identifier[_write_table] ( identifier[output] , identifier[table] )
keyword[if] identifier[data_out] != identifier[output] keyword[and] identifier[hasattr] ( identifier[data_out] , literal[string] ):
identifier[output] . identifier[Flush] ()
identifier[output] . identifier[ReOpen] ( literal[string] )
identifier[file_size] = identifier[output] . identifier[GetSize] ()
identifier[buff] = identifier[bytearray] ( identifier[file_size] )
identifier[output] . identifier[ReadBuffer] ( identifier[buff] , identifier[file_size] )
identifier[data_out] . identifier[write] ( identifier[buff] )
keyword[if] identifier[self] . identifier[file_emulation] :
identifier[filename] = identifier[output] . identifier[GetName] ()
identifier[output] . identifier[Close] () | def write(self, data_in, data_out, *args, **kwargs):
"""
:param data_in:
:type data_in: hepconverter.parsers.ParsedData
:param data_out: filelike object
:type data_out: file
:param args:
:param kwargs:
"""
self._get_tables(data_in)
self.file_emulation = False
outputs = []
self._prepare_outputs(data_out, outputs)
output = outputs[0]
for i in xrange(len(self.tables)):
table = self.tables[i]
self._write_table(output, table) # depends on [control=['for'], data=['i']]
if data_out != output and hasattr(data_out, 'write'):
output.Flush()
output.ReOpen('read')
file_size = output.GetSize()
buff = bytearray(file_size)
output.ReadBuffer(buff, file_size)
data_out.write(buff) # depends on [control=['if'], data=[]]
if self.file_emulation:
filename = output.GetName()
output.Close() # depends on [control=['if'], data=[]] |
def grid_plot(self, func, applyto='measurement', ids=None,
row_labels=None, col_labels=None,
xlim='auto', ylim='auto',
xlabel=None, ylabel=None,
colorbar=True,
row_label_xoffset=None, col_label_yoffset=None,
hide_tick_labels=True, hide_tick_lines=True,
hspace=0, wspace=0,
row_labels_kwargs={}, col_labels_kwargs={}):
"""
Creates subplots for each well in the plate. Uses func to plot on each axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
func : callable
func is a callable that accepts a measurement
object (with an optional axis reference) and plots on the current axis.
Return values from func are ignored.
.. note: if using applyto='measurement', the function
when querying for data should make sure that the data
actually exists
applyto : 'measurement' | 'data'
{_graph_grid_layout}
{bases_OrderedCollection_grid_plot_pars}
Returns
-------
{_graph_grid_layout_returns}
Examples
---------
>>> def y(well, ax):
>>> data = well.get_data()
>>> if data is None:
>>> return None
>>> graph.plotFCM(data, 'Y2-A')
>>> def z(data, ax):
>>> plot(data[0:100, 1], data[0:100, 2])
>>> plate.plot(y, applyto='measurement');
>>> plate.plot(z, applyto='data');
"""
# Acquire call arguments to be passed to create plate layout
callArgs = locals().copy() # This statement must remain first. The copy is just defensive.
[callArgs.pop(varname) for varname in
['self', 'func', 'applyto', 'ids', 'colorbar', 'xlim', 'ylim']] # pop args
callArgs['rowNum'] = self.shape[0]
callArgs['colNum'] = self.shape[1]
subplots_adjust_args = {}
subplots_adjust_args.setdefault('right', 0.85)
subplots_adjust_args.setdefault('top', 0.85)
pl.subplots_adjust(**subplots_adjust_args)
# Uses plate default row/col labels if user does not override them by specifying row/col
# labels
if row_labels == None: callArgs['row_labels'] = self.row_labels
if col_labels == None: callArgs['col_labels'] = self.col_labels
ax_main, ax_subplots = graph.create_grid_layout(**callArgs)
subplots_ax = DF(ax_subplots, index=self.row_labels, columns=self.col_labels)
if ids is None:
ids = self.keys()
ids = to_list(ids)
for ID in ids:
measurement = self[ID]
if not hasattr(measurement, 'data'):
continue
row, col = self._positions[ID]
ax = subplots_ax[col][row]
pl.sca(ax) # sets the current axis
if applyto == 'measurement':
func(measurement, ax) # reminder: pandas row/col order is reversed
elif applyto == 'data':
data = measurement.get_data()
if data is not None:
if func.func_code.co_argcount == 1:
func(data)
else:
func(data, ax)
else:
raise ValueError('Encountered unsupported value {} for applyto parameter.'.format(
applyto))
# Autoscaling axes
graph.scale_subplots(ax_subplots, xlim=xlim, ylim=ylim)
#####
# Placing ticks on the top left subplot
ax_label = ax_subplots[0, -1]
pl.sca(ax_label)
if xlabel:
xlim = ax_label.get_xlim()
pl.xticks([xlim[0], xlim[1]], rotation=90)
if ylabel:
ylim = ax_label.get_ylim()
pl.yticks([ylim[0], ylim[1]], rotation=0)
pl.sca(ax_main) # sets to the main axis -- more intuitive
return ax_main, ax_subplots | def function[grid_plot, parameter[self, func, applyto, ids, row_labels, col_labels, xlim, ylim, xlabel, ylabel, colorbar, row_label_xoffset, col_label_yoffset, hide_tick_labels, hide_tick_lines, hspace, wspace, row_labels_kwargs, col_labels_kwargs]]:
constant[
Creates subplots for each well in the plate. Uses func to plot on each axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
func : callable
func is a callable that accepts a measurement
object (with an optional axis reference) and plots on the current axis.
Return values from func are ignored.
.. note: if using applyto='measurement', the function
when querying for data should make sure that the data
actually exists
applyto : 'measurement' | 'data'
{_graph_grid_layout}
{bases_OrderedCollection_grid_plot_pars}
Returns
-------
{_graph_grid_layout_returns}
Examples
---------
>>> def y(well, ax):
>>> data = well.get_data()
>>> if data is None:
>>> return None
>>> graph.plotFCM(data, 'Y2-A')
>>> def z(data, ax):
>>> plot(data[0:100, 1], data[0:100, 2])
>>> plate.plot(y, applyto='measurement');
>>> plate.plot(z, applyto='data');
]
variable[callArgs] assign[=] call[call[name[locals], parameter[]].copy, parameter[]]
<ast.ListComp object at 0x7da18f58ce80>
call[name[callArgs]][constant[rowNum]] assign[=] call[name[self].shape][constant[0]]
call[name[callArgs]][constant[colNum]] assign[=] call[name[self].shape][constant[1]]
variable[subplots_adjust_args] assign[=] dictionary[[], []]
call[name[subplots_adjust_args].setdefault, parameter[constant[right], constant[0.85]]]
call[name[subplots_adjust_args].setdefault, parameter[constant[top], constant[0.85]]]
call[name[pl].subplots_adjust, parameter[]]
if compare[name[row_labels] equal[==] constant[None]] begin[:]
call[name[callArgs]][constant[row_labels]] assign[=] name[self].row_labels
if compare[name[col_labels] equal[==] constant[None]] begin[:]
call[name[callArgs]][constant[col_labels]] assign[=] name[self].col_labels
<ast.Tuple object at 0x7da18f58d570> assign[=] call[name[graph].create_grid_layout, parameter[]]
variable[subplots_ax] assign[=] call[name[DF], parameter[name[ax_subplots]]]
if compare[name[ids] is constant[None]] begin[:]
variable[ids] assign[=] call[name[self].keys, parameter[]]
variable[ids] assign[=] call[name[to_list], parameter[name[ids]]]
for taget[name[ID]] in starred[name[ids]] begin[:]
variable[measurement] assign[=] call[name[self]][name[ID]]
if <ast.UnaryOp object at 0x7da18f58f640> begin[:]
continue
<ast.Tuple object at 0x7da18f58ea10> assign[=] call[name[self]._positions][name[ID]]
variable[ax] assign[=] call[call[name[subplots_ax]][name[col]]][name[row]]
call[name[pl].sca, parameter[name[ax]]]
if compare[name[applyto] equal[==] constant[measurement]] begin[:]
call[name[func], parameter[name[measurement], name[ax]]]
call[name[graph].scale_subplots, parameter[name[ax_subplots]]]
variable[ax_label] assign[=] call[name[ax_subplots]][tuple[[<ast.Constant object at 0x7da2041d96f0>, <ast.UnaryOp object at 0x7da2041d8f70>]]]
call[name[pl].sca, parameter[name[ax_label]]]
if name[xlabel] begin[:]
variable[xlim] assign[=] call[name[ax_label].get_xlim, parameter[]]
call[name[pl].xticks, parameter[list[[<ast.Subscript object at 0x7da2041d8be0>, <ast.Subscript object at 0x7da2041da770>]]]]
if name[ylabel] begin[:]
variable[ylim] assign[=] call[name[ax_label].get_ylim, parameter[]]
call[name[pl].yticks, parameter[list[[<ast.Subscript object at 0x7da2041da410>, <ast.Subscript object at 0x7da2041d97b0>]]]]
call[name[pl].sca, parameter[name[ax_main]]]
return[tuple[[<ast.Name object at 0x7da2041da2c0>, <ast.Name object at 0x7da2041dbd30>]]] | keyword[def] identifier[grid_plot] ( identifier[self] , identifier[func] , identifier[applyto] = literal[string] , identifier[ids] = keyword[None] ,
identifier[row_labels] = keyword[None] , identifier[col_labels] = keyword[None] ,
identifier[xlim] = literal[string] , identifier[ylim] = literal[string] ,
identifier[xlabel] = keyword[None] , identifier[ylabel] = keyword[None] ,
identifier[colorbar] = keyword[True] ,
identifier[row_label_xoffset] = keyword[None] , identifier[col_label_yoffset] = keyword[None] ,
identifier[hide_tick_labels] = keyword[True] , identifier[hide_tick_lines] = keyword[True] ,
identifier[hspace] = literal[int] , identifier[wspace] = literal[int] ,
identifier[row_labels_kwargs] ={}, identifier[col_labels_kwargs] ={}):
literal[string]
identifier[callArgs] = identifier[locals] (). identifier[copy] ()
[ identifier[callArgs] . identifier[pop] ( identifier[varname] ) keyword[for] identifier[varname] keyword[in]
[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]]
identifier[callArgs] [ literal[string] ]= identifier[self] . identifier[shape] [ literal[int] ]
identifier[callArgs] [ literal[string] ]= identifier[self] . identifier[shape] [ literal[int] ]
identifier[subplots_adjust_args] ={}
identifier[subplots_adjust_args] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[subplots_adjust_args] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[pl] . identifier[subplots_adjust] (** identifier[subplots_adjust_args] )
keyword[if] identifier[row_labels] == keyword[None] : identifier[callArgs] [ literal[string] ]= identifier[self] . identifier[row_labels]
keyword[if] identifier[col_labels] == keyword[None] : identifier[callArgs] [ literal[string] ]= identifier[self] . identifier[col_labels]
identifier[ax_main] , identifier[ax_subplots] = identifier[graph] . identifier[create_grid_layout] (** identifier[callArgs] )
identifier[subplots_ax] = identifier[DF] ( identifier[ax_subplots] , identifier[index] = identifier[self] . identifier[row_labels] , identifier[columns] = identifier[self] . identifier[col_labels] )
keyword[if] identifier[ids] keyword[is] keyword[None] :
identifier[ids] = identifier[self] . identifier[keys] ()
identifier[ids] = identifier[to_list] ( identifier[ids] )
keyword[for] identifier[ID] keyword[in] identifier[ids] :
identifier[measurement] = identifier[self] [ identifier[ID] ]
keyword[if] keyword[not] identifier[hasattr] ( identifier[measurement] , literal[string] ):
keyword[continue]
identifier[row] , identifier[col] = identifier[self] . identifier[_positions] [ identifier[ID] ]
identifier[ax] = identifier[subplots_ax] [ identifier[col] ][ identifier[row] ]
identifier[pl] . identifier[sca] ( identifier[ax] )
keyword[if] identifier[applyto] == literal[string] :
identifier[func] ( identifier[measurement] , identifier[ax] )
keyword[elif] identifier[applyto] == literal[string] :
identifier[data] = identifier[measurement] . identifier[get_data] ()
keyword[if] identifier[data] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[func] . identifier[func_code] . identifier[co_argcount] == literal[int] :
identifier[func] ( identifier[data] )
keyword[else] :
identifier[func] ( identifier[data] , identifier[ax] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[applyto] ))
identifier[graph] . identifier[scale_subplots] ( identifier[ax_subplots] , identifier[xlim] = identifier[xlim] , identifier[ylim] = identifier[ylim] )
identifier[ax_label] = identifier[ax_subplots] [ literal[int] ,- literal[int] ]
identifier[pl] . identifier[sca] ( identifier[ax_label] )
keyword[if] identifier[xlabel] :
identifier[xlim] = identifier[ax_label] . identifier[get_xlim] ()
identifier[pl] . identifier[xticks] ([ identifier[xlim] [ literal[int] ], identifier[xlim] [ literal[int] ]], identifier[rotation] = literal[int] )
keyword[if] identifier[ylabel] :
identifier[ylim] = identifier[ax_label] . identifier[get_ylim] ()
identifier[pl] . identifier[yticks] ([ identifier[ylim] [ literal[int] ], identifier[ylim] [ literal[int] ]], identifier[rotation] = literal[int] )
identifier[pl] . identifier[sca] ( identifier[ax_main] )
keyword[return] identifier[ax_main] , identifier[ax_subplots] | def grid_plot(self, func, applyto='measurement', ids=None, row_labels=None, col_labels=None, xlim='auto', ylim='auto', xlabel=None, ylabel=None, colorbar=True, row_label_xoffset=None, col_label_yoffset=None, hide_tick_labels=True, hide_tick_lines=True, hspace=0, wspace=0, row_labels_kwargs={}, col_labels_kwargs={}):
"""
Creates subplots for each well in the plate. Uses func to plot on each axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
func : callable
func is a callable that accepts a measurement
object (with an optional axis reference) and plots on the current axis.
Return values from func are ignored.
.. note: if using applyto='measurement', the function
when querying for data should make sure that the data
actually exists
applyto : 'measurement' | 'data'
{_graph_grid_layout}
{bases_OrderedCollection_grid_plot_pars}
Returns
-------
{_graph_grid_layout_returns}
Examples
---------
>>> def y(well, ax):
>>> data = well.get_data()
>>> if data is None:
>>> return None
>>> graph.plotFCM(data, 'Y2-A')
>>> def z(data, ax):
>>> plot(data[0:100, 1], data[0:100, 2])
>>> plate.plot(y, applyto='measurement');
>>> plate.plot(z, applyto='data');
"""
# Acquire call arguments to be passed to create plate layout
callArgs = locals().copy() # This statement must remain first. The copy is just defensive.
[callArgs.pop(varname) for varname in ['self', 'func', 'applyto', 'ids', 'colorbar', 'xlim', 'ylim']] # pop args
callArgs['rowNum'] = self.shape[0]
callArgs['colNum'] = self.shape[1]
subplots_adjust_args = {}
subplots_adjust_args.setdefault('right', 0.85)
subplots_adjust_args.setdefault('top', 0.85)
pl.subplots_adjust(**subplots_adjust_args)
# Uses plate default row/col labels if user does not override them by specifying row/col
# labels
if row_labels == None:
callArgs['row_labels'] = self.row_labels # depends on [control=['if'], data=[]]
if col_labels == None:
callArgs['col_labels'] = self.col_labels # depends on [control=['if'], data=[]]
(ax_main, ax_subplots) = graph.create_grid_layout(**callArgs)
subplots_ax = DF(ax_subplots, index=self.row_labels, columns=self.col_labels)
if ids is None:
ids = self.keys() # depends on [control=['if'], data=['ids']]
ids = to_list(ids)
for ID in ids:
measurement = self[ID]
if not hasattr(measurement, 'data'):
continue # depends on [control=['if'], data=[]]
(row, col) = self._positions[ID]
ax = subplots_ax[col][row]
pl.sca(ax) # sets the current axis
if applyto == 'measurement':
func(measurement, ax) # reminder: pandas row/col order is reversed # depends on [control=['if'], data=[]]
elif applyto == 'data':
data = measurement.get_data()
if data is not None:
if func.func_code.co_argcount == 1:
func(data) # depends on [control=['if'], data=[]]
else:
func(data, ax) # depends on [control=['if'], data=['data']] # depends on [control=['if'], data=[]]
else:
raise ValueError('Encountered unsupported value {} for applyto parameter.'.format(applyto)) # depends on [control=['for'], data=['ID']]
# Autoscaling axes
graph.scale_subplots(ax_subplots, xlim=xlim, ylim=ylim)
#####
# Placing ticks on the top left subplot
ax_label = ax_subplots[0, -1]
pl.sca(ax_label)
if xlabel:
xlim = ax_label.get_xlim()
pl.xticks([xlim[0], xlim[1]], rotation=90) # depends on [control=['if'], data=[]]
if ylabel:
ylim = ax_label.get_ylim()
pl.yticks([ylim[0], ylim[1]], rotation=0) # depends on [control=['if'], data=[]]
pl.sca(ax_main) # sets to the main axis -- more intuitive
return (ax_main, ax_subplots) |
def Serialize(self, writer):
"""
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
"""
self.SerializeUnsigned(writer)
writer.WriteByte(1)
self.Script.Serialize(writer) | def function[Serialize, parameter[self, writer]]:
constant[
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
]
call[name[self].SerializeUnsigned, parameter[name[writer]]]
call[name[writer].WriteByte, parameter[constant[1]]]
call[name[self].Script.Serialize, parameter[name[writer]]] | keyword[def] identifier[Serialize] ( identifier[self] , identifier[writer] ):
literal[string]
identifier[self] . identifier[SerializeUnsigned] ( identifier[writer] )
identifier[writer] . identifier[WriteByte] ( literal[int] )
identifier[self] . identifier[Script] . identifier[Serialize] ( identifier[writer] ) | def Serialize(self, writer):
"""
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
"""
self.SerializeUnsigned(writer)
writer.WriteByte(1)
self.Script.Serialize(writer) |
def _add_index(self, index):
"""
Adds an index to the table.
:param index: The index to add
:type index: Index
:rtype: Table
"""
index_name = index.get_name()
index_name = self._normalize_identifier(index_name)
replaced_implicit_indexes = []
for name, implicit_index in self._implicit_indexes.items():
if implicit_index.is_fullfilled_by(index) and name in self._indexes:
replaced_implicit_indexes.append(name)
already_exists = (
index_name in self._indexes
and index_name not in replaced_implicit_indexes
or self._primary_key_name is not False
and index.is_primary()
)
if already_exists:
raise IndexAlreadyExists(index_name, self._name)
for name in replaced_implicit_indexes:
del self._indexes[name]
del self._implicit_indexes[name]
if index.is_primary():
self._primary_key_name = index_name
self._indexes[index_name] = index
return self | def function[_add_index, parameter[self, index]]:
constant[
Adds an index to the table.
:param index: The index to add
:type index: Index
:rtype: Table
]
variable[index_name] assign[=] call[name[index].get_name, parameter[]]
variable[index_name] assign[=] call[name[self]._normalize_identifier, parameter[name[index_name]]]
variable[replaced_implicit_indexes] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18eb573d0>, <ast.Name object at 0x7da18eb545e0>]]] in starred[call[name[self]._implicit_indexes.items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18eb57e80> begin[:]
call[name[replaced_implicit_indexes].append, parameter[name[name]]]
variable[already_exists] assign[=] <ast.BoolOp object at 0x7da18eb554e0>
if name[already_exists] begin[:]
<ast.Raise object at 0x7da18eb561a0>
for taget[name[name]] in starred[name[replaced_implicit_indexes]] begin[:]
<ast.Delete object at 0x7da18f09e470>
<ast.Delete object at 0x7da18f09dcc0>
if call[name[index].is_primary, parameter[]] begin[:]
name[self]._primary_key_name assign[=] name[index_name]
call[name[self]._indexes][name[index_name]] assign[=] name[index]
return[name[self]] | keyword[def] identifier[_add_index] ( identifier[self] , identifier[index] ):
literal[string]
identifier[index_name] = identifier[index] . identifier[get_name] ()
identifier[index_name] = identifier[self] . identifier[_normalize_identifier] ( identifier[index_name] )
identifier[replaced_implicit_indexes] =[]
keyword[for] identifier[name] , identifier[implicit_index] keyword[in] identifier[self] . identifier[_implicit_indexes] . identifier[items] ():
keyword[if] identifier[implicit_index] . identifier[is_fullfilled_by] ( identifier[index] ) keyword[and] identifier[name] keyword[in] identifier[self] . identifier[_indexes] :
identifier[replaced_implicit_indexes] . identifier[append] ( identifier[name] )
identifier[already_exists] =(
identifier[index_name] keyword[in] identifier[self] . identifier[_indexes]
keyword[and] identifier[index_name] keyword[not] keyword[in] identifier[replaced_implicit_indexes]
keyword[or] identifier[self] . identifier[_primary_key_name] keyword[is] keyword[not] keyword[False]
keyword[and] identifier[index] . identifier[is_primary] ()
)
keyword[if] identifier[already_exists] :
keyword[raise] identifier[IndexAlreadyExists] ( identifier[index_name] , identifier[self] . identifier[_name] )
keyword[for] identifier[name] keyword[in] identifier[replaced_implicit_indexes] :
keyword[del] identifier[self] . identifier[_indexes] [ identifier[name] ]
keyword[del] identifier[self] . identifier[_implicit_indexes] [ identifier[name] ]
keyword[if] identifier[index] . identifier[is_primary] ():
identifier[self] . identifier[_primary_key_name] = identifier[index_name]
identifier[self] . identifier[_indexes] [ identifier[index_name] ]= identifier[index]
keyword[return] identifier[self] | def _add_index(self, index):
"""
Adds an index to the table.
:param index: The index to add
:type index: Index
:rtype: Table
"""
index_name = index.get_name()
index_name = self._normalize_identifier(index_name)
replaced_implicit_indexes = []
for (name, implicit_index) in self._implicit_indexes.items():
if implicit_index.is_fullfilled_by(index) and name in self._indexes:
replaced_implicit_indexes.append(name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
already_exists = index_name in self._indexes and index_name not in replaced_implicit_indexes or (self._primary_key_name is not False and index.is_primary())
if already_exists:
raise IndexAlreadyExists(index_name, self._name) # depends on [control=['if'], data=[]]
for name in replaced_implicit_indexes:
del self._indexes[name]
del self._implicit_indexes[name] # depends on [control=['for'], data=['name']]
if index.is_primary():
self._primary_key_name = index_name # depends on [control=['if'], data=[]]
self._indexes[index_name] = index
return self |
def icqt(C, sr=22050, hop_length=512, fmin=None, bins_per_octave=12,
tuning=0.0, filter_scale=1, norm=1, sparsity=0.01, window='hann',
scale=True, length=None, amin=util.Deprecated(), res_type='fft'):
'''Compute the inverse constant-Q transform.
Given a constant-Q transform representation `C` of an audio signal `y`,
this function produces an approximation `y_hat`.
Parameters
----------
C : np.ndarray, [shape=(n_bins, n_frames)]
Constant-Q representation as produced by `core.cqt`
hop_length : int > 0 [scalar]
number of samples between successive frames
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
tuning : float in `[-0.5, 0.5)` [scalar]
Tuning offset in fractions of a bin (cents).
filter_scale : float > 0 [scalar]
Filter scale factor. Small values (<1) use shorter windows
for improved time resolution.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See `librosa.util.normalize`.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
scale : bool
If `True`, scale the CQT response by square-root the length
of each channel's filter. This is analogous to `norm='ortho'` in FFT.
If `False`, do not scale the CQT. This is analogous to `norm=None`
in FFT.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
amin : float or None [DEPRECATED]
.. note:: This parameter is deprecated in 0.7.0 and will be removed in 0.8.0.
res_type : string
Resampling mode. By default, this uses `fft` mode for high-quality
reconstruction, but this may be slow depending on your signal duration.
See `librosa.resample` for supported modes.
Returns
-------
y : np.ndarray, [shape=(n_samples), dtype=np.float]
Audio time-series reconstructed from the CQT representation.
See Also
--------
cqt
core.resample
Notes
-----
This function caches at level 40.
Examples
--------
Using default parameters
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> C = librosa.cqt(y=y, sr=sr)
>>> y_hat = librosa.icqt(C=C, sr=sr)
Or with a different hop length and frequency resolution:
>>> hop_length = 256
>>> bins_per_octave = 12 * 3
>>> C = librosa.cqt(y=y, sr=sr, hop_length=256, n_bins=7*bins_per_octave,
... bins_per_octave=bins_per_octave)
>>> y_hat = librosa.icqt(C=C, sr=sr, hop_length=hop_length,
... bins_per_octave=bins_per_octave)
'''
if fmin is None:
fmin = note_to_hz('C1')
# Get the top octave of frequencies
n_bins = len(C)
freqs = cqt_frequencies(n_bins, fmin,
bins_per_octave=bins_per_octave,
tuning=tuning)[-bins_per_octave:]
n_filters = min(n_bins, bins_per_octave)
fft_basis, n_fft, lengths = __cqt_filter_fft(sr, np.min(freqs),
n_filters,
bins_per_octave,
tuning,
filter_scale,
norm,
sparsity=sparsity,
window=window)
if hop_length > min(lengths):
warnings.warn('hop_length={} exceeds minimum CQT filter length={:.3f}.\n'
'This will probably cause unpleasant acoustic artifacts. '
'Consider decreasing your hop length or increasing the frequency resolution of your CQT.'.format(hop_length, min(lengths)))
# The basis gets renormalized by the effective window length above;
# This step undoes that
fft_basis = fft_basis.todense() * n_fft / lengths[:, np.newaxis]
# This step conjugate-transposes the filter
inv_basis = fft_basis.H
# How many octaves do we have?
n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
y = None
for octave in range(n_octaves - 1, -1, -1):
slice_ = slice(-(octave+1) * bins_per_octave - 1,
-(octave) * bins_per_octave - 1)
# Slice this octave
C_oct = C[slice_]
inv_oct = inv_basis[:, -C_oct.shape[0]:]
oct_hop = hop_length // 2**octave
# Apply energy corrections
if scale:
C_scale = np.sqrt(lengths[-C_oct.shape[0]:, np.newaxis]) / n_fft
else:
C_scale = lengths[-C_oct.shape[0]:, np.newaxis] * np.sqrt(2**octave) / n_fft
# Inverse-project the basis for each octave
D_oct = inv_oct.dot(C_oct / C_scale)
# Inverse-STFT that response
y_oct = istft(D_oct, window='ones', hop_length=oct_hop)
# Up-sample that octave
if y is None:
y = y_oct
else:
# Up-sample the previous buffer and add in the new one
# Scipy-resampling is fast here, since it's a power-of-two relation
y = audio.resample(y, 1, 2, scale=True, res_type=res_type, fix=False)
y[:len(y_oct)] += y_oct
if length:
y = util.fix_length(y, length)
return y | def function[icqt, parameter[C, sr, hop_length, fmin, bins_per_octave, tuning, filter_scale, norm, sparsity, window, scale, length, amin, res_type]]:
constant[Compute the inverse constant-Q transform.
Given a constant-Q transform representation `C` of an audio signal `y`,
this function produces an approximation `y_hat`.
Parameters
----------
C : np.ndarray, [shape=(n_bins, n_frames)]
Constant-Q representation as produced by `core.cqt`
hop_length : int > 0 [scalar]
number of samples between successive frames
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
tuning : float in `[-0.5, 0.5)` [scalar]
Tuning offset in fractions of a bin (cents).
filter_scale : float > 0 [scalar]
Filter scale factor. Small values (<1) use shorter windows
for improved time resolution.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See `librosa.util.normalize`.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
scale : bool
If `True`, scale the CQT response by square-root the length
of each channel's filter. This is analogous to `norm='ortho'` in FFT.
If `False`, do not scale the CQT. This is analogous to `norm=None`
in FFT.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
amin : float or None [DEPRECATED]
.. note:: This parameter is deprecated in 0.7.0 and will be removed in 0.8.0.
res_type : string
Resampling mode. By default, this uses `fft` mode for high-quality
reconstruction, but this may be slow depending on your signal duration.
See `librosa.resample` for supported modes.
Returns
-------
y : np.ndarray, [shape=(n_samples), dtype=np.float]
Audio time-series reconstructed from the CQT representation.
See Also
--------
cqt
core.resample
Notes
-----
This function caches at level 40.
Examples
--------
Using default parameters
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> C = librosa.cqt(y=y, sr=sr)
>>> y_hat = librosa.icqt(C=C, sr=sr)
Or with a different hop length and frequency resolution:
>>> hop_length = 256
>>> bins_per_octave = 12 * 3
>>> C = librosa.cqt(y=y, sr=sr, hop_length=256, n_bins=7*bins_per_octave,
... bins_per_octave=bins_per_octave)
>>> y_hat = librosa.icqt(C=C, sr=sr, hop_length=hop_length,
... bins_per_octave=bins_per_octave)
]
if compare[name[fmin] is constant[None]] begin[:]
variable[fmin] assign[=] call[name[note_to_hz], parameter[constant[C1]]]
variable[n_bins] assign[=] call[name[len], parameter[name[C]]]
variable[freqs] assign[=] call[call[name[cqt_frequencies], parameter[name[n_bins], name[fmin]]]][<ast.Slice object at 0x7da207f9bca0>]
variable[n_filters] assign[=] call[name[min], parameter[name[n_bins], name[bins_per_octave]]]
<ast.Tuple object at 0x7da207f98c70> assign[=] call[name[__cqt_filter_fft], parameter[name[sr], call[name[np].min, parameter[name[freqs]]], name[n_filters], name[bins_per_octave], name[tuning], name[filter_scale], name[norm]]]
if compare[name[hop_length] greater[>] call[name[min], parameter[name[lengths]]]] begin[:]
call[name[warnings].warn, parameter[call[constant[hop_length={} exceeds minimum CQT filter length={:.3f}.
This will probably cause unpleasant acoustic artifacts. Consider decreasing your hop length or increasing the frequency resolution of your CQT.].format, parameter[name[hop_length], call[name[min], parameter[name[lengths]]]]]]]
variable[fft_basis] assign[=] binary_operation[binary_operation[call[name[fft_basis].todense, parameter[]] * name[n_fft]] / call[name[lengths]][tuple[[<ast.Slice object at 0x7da1b05133a0>, <ast.Attribute object at 0x7da1b05132b0>]]]]
variable[inv_basis] assign[=] name[fft_basis].H
variable[n_octaves] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[call[name[float], parameter[name[n_bins]]] / name[bins_per_octave]]]]]]
variable[y] assign[=] constant[None]
for taget[name[octave]] in starred[call[name[range], parameter[binary_operation[name[n_octaves] - constant[1]], <ast.UnaryOp object at 0x7da1b0513ac0>, <ast.UnaryOp object at 0x7da1b0513af0>]]] begin[:]
variable[slice_] assign[=] call[name[slice], parameter[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b05135e0> * name[bins_per_octave]] - constant[1]], binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b0513850> * name[bins_per_octave]] - constant[1]]]]
variable[C_oct] assign[=] call[name[C]][name[slice_]]
variable[inv_oct] assign[=] call[name[inv_basis]][tuple[[<ast.Slice object at 0x7da1b0512560>, <ast.Slice object at 0x7da1b0512710>]]]
variable[oct_hop] assign[=] binary_operation[name[hop_length] <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[constant[2] ** name[octave]]]
if name[scale] begin[:]
variable[C_scale] assign[=] binary_operation[call[name[np].sqrt, parameter[call[name[lengths]][tuple[[<ast.Slice object at 0x7da1b0512440>, <ast.Attribute object at 0x7da207f9bdf0>]]]]] / name[n_fft]]
variable[D_oct] assign[=] call[name[inv_oct].dot, parameter[binary_operation[name[C_oct] / name[C_scale]]]]
variable[y_oct] assign[=] call[name[istft], parameter[name[D_oct]]]
if compare[name[y] is constant[None]] begin[:]
variable[y] assign[=] name[y_oct]
if name[length] begin[:]
variable[y] assign[=] call[name[util].fix_length, parameter[name[y], name[length]]]
return[name[y]] | keyword[def] identifier[icqt] ( identifier[C] , identifier[sr] = literal[int] , identifier[hop_length] = literal[int] , identifier[fmin] = keyword[None] , identifier[bins_per_octave] = literal[int] ,
identifier[tuning] = literal[int] , identifier[filter_scale] = literal[int] , identifier[norm] = literal[int] , identifier[sparsity] = literal[int] , identifier[window] = literal[string] ,
identifier[scale] = keyword[True] , identifier[length] = keyword[None] , identifier[amin] = identifier[util] . identifier[Deprecated] (), identifier[res_type] = literal[string] ):
literal[string]
keyword[if] identifier[fmin] keyword[is] keyword[None] :
identifier[fmin] = identifier[note_to_hz] ( literal[string] )
identifier[n_bins] = identifier[len] ( identifier[C] )
identifier[freqs] = identifier[cqt_frequencies] ( identifier[n_bins] , identifier[fmin] ,
identifier[bins_per_octave] = identifier[bins_per_octave] ,
identifier[tuning] = identifier[tuning] )[- identifier[bins_per_octave] :]
identifier[n_filters] = identifier[min] ( identifier[n_bins] , identifier[bins_per_octave] )
identifier[fft_basis] , identifier[n_fft] , identifier[lengths] = identifier[__cqt_filter_fft] ( identifier[sr] , identifier[np] . identifier[min] ( identifier[freqs] ),
identifier[n_filters] ,
identifier[bins_per_octave] ,
identifier[tuning] ,
identifier[filter_scale] ,
identifier[norm] ,
identifier[sparsity] = identifier[sparsity] ,
identifier[window] = identifier[window] )
keyword[if] identifier[hop_length] > identifier[min] ( identifier[lengths] ):
identifier[warnings] . identifier[warn] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[hop_length] , identifier[min] ( identifier[lengths] )))
identifier[fft_basis] = identifier[fft_basis] . identifier[todense] ()* identifier[n_fft] / identifier[lengths] [:, identifier[np] . identifier[newaxis] ]
identifier[inv_basis] = identifier[fft_basis] . identifier[H]
identifier[n_octaves] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[float] ( identifier[n_bins] )/ identifier[bins_per_octave] ))
identifier[y] = keyword[None]
keyword[for] identifier[octave] keyword[in] identifier[range] ( identifier[n_octaves] - literal[int] ,- literal[int] ,- literal[int] ):
identifier[slice_] = identifier[slice] (-( identifier[octave] + literal[int] )* identifier[bins_per_octave] - literal[int] ,
-( identifier[octave] )* identifier[bins_per_octave] - literal[int] )
identifier[C_oct] = identifier[C] [ identifier[slice_] ]
identifier[inv_oct] = identifier[inv_basis] [:,- identifier[C_oct] . identifier[shape] [ literal[int] ]:]
identifier[oct_hop] = identifier[hop_length] // literal[int] ** identifier[octave]
keyword[if] identifier[scale] :
identifier[C_scale] = identifier[np] . identifier[sqrt] ( identifier[lengths] [- identifier[C_oct] . identifier[shape] [ literal[int] ]:, identifier[np] . identifier[newaxis] ])/ identifier[n_fft]
keyword[else] :
identifier[C_scale] = identifier[lengths] [- identifier[C_oct] . identifier[shape] [ literal[int] ]:, identifier[np] . identifier[newaxis] ]* identifier[np] . identifier[sqrt] ( literal[int] ** identifier[octave] )/ identifier[n_fft]
identifier[D_oct] = identifier[inv_oct] . identifier[dot] ( identifier[C_oct] / identifier[C_scale] )
identifier[y_oct] = identifier[istft] ( identifier[D_oct] , identifier[window] = literal[string] , identifier[hop_length] = identifier[oct_hop] )
keyword[if] identifier[y] keyword[is] keyword[None] :
identifier[y] = identifier[y_oct]
keyword[else] :
identifier[y] = identifier[audio] . identifier[resample] ( identifier[y] , literal[int] , literal[int] , identifier[scale] = keyword[True] , identifier[res_type] = identifier[res_type] , identifier[fix] = keyword[False] )
identifier[y] [: identifier[len] ( identifier[y_oct] )]+= identifier[y_oct]
keyword[if] identifier[length] :
identifier[y] = identifier[util] . identifier[fix_length] ( identifier[y] , identifier[length] )
keyword[return] identifier[y] | def icqt(C, sr=22050, hop_length=512, fmin=None, bins_per_octave=12, tuning=0.0, filter_scale=1, norm=1, sparsity=0.01, window='hann', scale=True, length=None, amin=util.Deprecated(), res_type='fft'):
"""Compute the inverse constant-Q transform.
Given a constant-Q transform representation `C` of an audio signal `y`,
this function produces an approximation `y_hat`.
Parameters
----------
C : np.ndarray, [shape=(n_bins, n_frames)]
Constant-Q representation as produced by `core.cqt`
hop_length : int > 0 [scalar]
number of samples between successive frames
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
tuning : float in `[-0.5, 0.5)` [scalar]
Tuning offset in fractions of a bin (cents).
filter_scale : float > 0 [scalar]
Filter scale factor. Small values (<1) use shorter windows
for improved time resolution.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See `librosa.util.normalize`.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
scale : bool
If `True`, scale the CQT response by square-root the length
of each channel's filter. This is analogous to `norm='ortho'` in FFT.
If `False`, do not scale the CQT. This is analogous to `norm=None`
in FFT.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
amin : float or None [DEPRECATED]
.. note:: This parameter is deprecated in 0.7.0 and will be removed in 0.8.0.
res_type : string
Resampling mode. By default, this uses `fft` mode for high-quality
reconstruction, but this may be slow depending on your signal duration.
See `librosa.resample` for supported modes.
Returns
-------
y : np.ndarray, [shape=(n_samples), dtype=np.float]
Audio time-series reconstructed from the CQT representation.
See Also
--------
cqt
core.resample
Notes
-----
This function caches at level 40.
Examples
--------
Using default parameters
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> C = librosa.cqt(y=y, sr=sr)
>>> y_hat = librosa.icqt(C=C, sr=sr)
Or with a different hop length and frequency resolution:
>>> hop_length = 256
>>> bins_per_octave = 12 * 3
>>> C = librosa.cqt(y=y, sr=sr, hop_length=256, n_bins=7*bins_per_octave,
... bins_per_octave=bins_per_octave)
>>> y_hat = librosa.icqt(C=C, sr=sr, hop_length=hop_length,
... bins_per_octave=bins_per_octave)
"""
if fmin is None:
fmin = note_to_hz('C1') # depends on [control=['if'], data=['fmin']]
# Get the top octave of frequencies
n_bins = len(C)
freqs = cqt_frequencies(n_bins, fmin, bins_per_octave=bins_per_octave, tuning=tuning)[-bins_per_octave:]
n_filters = min(n_bins, bins_per_octave)
(fft_basis, n_fft, lengths) = __cqt_filter_fft(sr, np.min(freqs), n_filters, bins_per_octave, tuning, filter_scale, norm, sparsity=sparsity, window=window)
if hop_length > min(lengths):
warnings.warn('hop_length={} exceeds minimum CQT filter length={:.3f}.\nThis will probably cause unpleasant acoustic artifacts. Consider decreasing your hop length or increasing the frequency resolution of your CQT.'.format(hop_length, min(lengths))) # depends on [control=['if'], data=['hop_length']]
# The basis gets renormalized by the effective window length above;
# This step undoes that
fft_basis = fft_basis.todense() * n_fft / lengths[:, np.newaxis]
# This step conjugate-transposes the filter
inv_basis = fft_basis.H
# How many octaves do we have?
n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
y = None
for octave in range(n_octaves - 1, -1, -1):
slice_ = slice(-(octave + 1) * bins_per_octave - 1, -octave * bins_per_octave - 1)
# Slice this octave
C_oct = C[slice_]
inv_oct = inv_basis[:, -C_oct.shape[0]:]
oct_hop = hop_length // 2 ** octave
# Apply energy corrections
if scale:
C_scale = np.sqrt(lengths[-C_oct.shape[0]:, np.newaxis]) / n_fft # depends on [control=['if'], data=[]]
else:
C_scale = lengths[-C_oct.shape[0]:, np.newaxis] * np.sqrt(2 ** octave) / n_fft
# Inverse-project the basis for each octave
D_oct = inv_oct.dot(C_oct / C_scale)
# Inverse-STFT that response
y_oct = istft(D_oct, window='ones', hop_length=oct_hop)
# Up-sample that octave
if y is None:
y = y_oct # depends on [control=['if'], data=['y']]
else:
# Up-sample the previous buffer and add in the new one
# Scipy-resampling is fast here, since it's a power-of-two relation
y = audio.resample(y, 1, 2, scale=True, res_type=res_type, fix=False)
y[:len(y_oct)] += y_oct # depends on [control=['for'], data=['octave']]
if length:
y = util.fix_length(y, length) # depends on [control=['if'], data=[]]
return y |
def intersection_update(self, *others):
"""Update the set, keeping only elements found in it and all others."""
self.db.sinterstore(self.key, [o.key for o in [self.key] + others]) | def function[intersection_update, parameter[self]]:
constant[Update the set, keeping only elements found in it and all others.]
call[name[self].db.sinterstore, parameter[name[self].key, <ast.ListComp object at 0x7da2044c1510>]] | keyword[def] identifier[intersection_update] ( identifier[self] ,* identifier[others] ):
literal[string]
identifier[self] . identifier[db] . identifier[sinterstore] ( identifier[self] . identifier[key] ,[ identifier[o] . identifier[key] keyword[for] identifier[o] keyword[in] [ identifier[self] . identifier[key] ]+ identifier[others] ]) | def intersection_update(self, *others):
"""Update the set, keeping only elements found in it and all others."""
self.db.sinterstore(self.key, [o.key for o in [self.key] + others]) |
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with tf.device(gv[0][0].device):
with tf.name_scope("unpack"):
splits = tf.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv | def function[unpack_grad_tuple, parameter[gv, gpt]]:
constant[Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
]
variable[elt_widths] assign[=] <ast.ListComp object at 0x7da20e961840>
with call[name[tf].device, parameter[call[call[name[gv]][constant[0]]][constant[0]].device]] begin[:]
with call[name[tf].name_scope, parameter[constant[unpack]]] begin[:]
variable[splits] assign[=] call[name[tf].split, parameter[call[name[gv]][constant[0]], name[elt_widths]]]
variable[unpacked_gv] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da2041dbdc0>, <ast.Name object at 0x7da2041d8580>]]] in starred[call[name[enumerate], parameter[name[splits]]]] begin[:]
call[name[unpacked_gv].append, parameter[tuple[[<ast.Call object at 0x7da2041db9a0>, <ast.Subscript object at 0x7da2041da7d0>]]]]
return[name[unpacked_gv]] | keyword[def] identifier[unpack_grad_tuple] ( identifier[gv] , identifier[gpt] ):
literal[string]
identifier[elt_widths] =[ identifier[x] . identifier[num_elements] () keyword[for] identifier[x] keyword[in] identifier[gpt] . identifier[shapes] ]
keyword[with] identifier[tf] . identifier[device] ( identifier[gv] [ literal[int] ][ literal[int] ]. identifier[device] ):
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
identifier[splits] = identifier[tf] . identifier[split] ( identifier[gv] [ literal[int] ], identifier[elt_widths] )
identifier[unpacked_gv] =[]
keyword[for] identifier[idx] , identifier[s] keyword[in] identifier[enumerate] ( identifier[splits] ):
identifier[unpacked_gv] . identifier[append] (( identifier[tf] . identifier[reshape] ( identifier[s] , identifier[gpt] . identifier[shapes] [ identifier[idx] ]),
identifier[gpt] . identifier[vars] [ identifier[idx] ]))
keyword[return] identifier[unpacked_gv] | def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with tf.device(gv[0][0].device):
with tf.name_scope('unpack'):
splits = tf.split(gv[0], elt_widths)
unpacked_gv = []
for (idx, s) in enumerate(splits):
unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx])) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]]
return unpacked_gv |
def get_translation_lookup(identifier, field, value):
"""
Mapper that takes a language field, its value and returns the
related lookup for Translation model.
"""
# Split by transformers
parts = field.split("__")
# Store transformers
transformers = parts[1:] if len(parts) > 1 else None
# defaults to "title" and default language
field_name = parts[0]
language = get_fallback_language()
name_parts = parts[0].split("_")
if len(name_parts) > 1:
supported_languages = get_supported_languages()
last_part = name_parts[-1]
if last_part in supported_languages:
# title_with_underscore_fr?
field_name = "_".join(name_parts[:-1])
language = last_part
else:
# title_with_underscore?
# Let's use default language
field_name = "_".join(name_parts)
value_lookup = (
"field_value"
if transformers is None
else "field_value__%s" % "__".join(transformers)
)
lookup = {"field_name": field_name, "identifier": identifier, "language": language}
lookup[value_lookup] = value
return lookup | def function[get_translation_lookup, parameter[identifier, field, value]]:
constant[
Mapper that takes a language field, its value and returns the
related lookup for Translation model.
]
variable[parts] assign[=] call[name[field].split, parameter[constant[__]]]
variable[transformers] assign[=] <ast.IfExp object at 0x7da1b28452a0>
variable[field_name] assign[=] call[name[parts]][constant[0]]
variable[language] assign[=] call[name[get_fallback_language], parameter[]]
variable[name_parts] assign[=] call[call[name[parts]][constant[0]].split, parameter[constant[_]]]
if compare[call[name[len], parameter[name[name_parts]]] greater[>] constant[1]] begin[:]
variable[supported_languages] assign[=] call[name[get_supported_languages], parameter[]]
variable[last_part] assign[=] call[name[name_parts]][<ast.UnaryOp object at 0x7da2041d9780>]
if compare[name[last_part] in name[supported_languages]] begin[:]
variable[field_name] assign[=] call[constant[_].join, parameter[call[name[name_parts]][<ast.Slice object at 0x7da2041d8a60>]]]
variable[language] assign[=] name[last_part]
variable[value_lookup] assign[=] <ast.IfExp object at 0x7da2041d90f0>
variable[lookup] assign[=] dictionary[[<ast.Constant object at 0x7da2041d8ee0>, <ast.Constant object at 0x7da2041d8f10>, <ast.Constant object at 0x7da2041da620>], [<ast.Name object at 0x7da2041d9e40>, <ast.Name object at 0x7da2041d9f60>, <ast.Name object at 0x7da2041d8610>]]
call[name[lookup]][name[value_lookup]] assign[=] name[value]
return[name[lookup]] | keyword[def] identifier[get_translation_lookup] ( identifier[identifier] , identifier[field] , identifier[value] ):
literal[string]
identifier[parts] = identifier[field] . identifier[split] ( literal[string] )
identifier[transformers] = identifier[parts] [ literal[int] :] keyword[if] identifier[len] ( identifier[parts] )> literal[int] keyword[else] keyword[None]
identifier[field_name] = identifier[parts] [ literal[int] ]
identifier[language] = identifier[get_fallback_language] ()
identifier[name_parts] = identifier[parts] [ literal[int] ]. identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[name_parts] )> literal[int] :
identifier[supported_languages] = identifier[get_supported_languages] ()
identifier[last_part] = identifier[name_parts] [- literal[int] ]
keyword[if] identifier[last_part] keyword[in] identifier[supported_languages] :
identifier[field_name] = literal[string] . identifier[join] ( identifier[name_parts] [:- literal[int] ])
identifier[language] = identifier[last_part]
keyword[else] :
identifier[field_name] = literal[string] . identifier[join] ( identifier[name_parts] )
identifier[value_lookup] =(
literal[string]
keyword[if] identifier[transformers] keyword[is] keyword[None]
keyword[else] literal[string] % literal[string] . identifier[join] ( identifier[transformers] )
)
identifier[lookup] ={ literal[string] : identifier[field_name] , literal[string] : identifier[identifier] , literal[string] : identifier[language] }
identifier[lookup] [ identifier[value_lookup] ]= identifier[value]
keyword[return] identifier[lookup] | def get_translation_lookup(identifier, field, value):
"""
Mapper that takes a language field, its value and returns the
related lookup for Translation model.
"""
# Split by transformers
parts = field.split('__')
# Store transformers
transformers = parts[1:] if len(parts) > 1 else None
# defaults to "title" and default language
field_name = parts[0]
language = get_fallback_language()
name_parts = parts[0].split('_')
if len(name_parts) > 1:
supported_languages = get_supported_languages()
last_part = name_parts[-1]
if last_part in supported_languages:
# title_with_underscore_fr?
field_name = '_'.join(name_parts[:-1])
language = last_part # depends on [control=['if'], data=['last_part']]
else:
# title_with_underscore?
# Let's use default language
field_name = '_'.join(name_parts) # depends on [control=['if'], data=[]]
value_lookup = 'field_value' if transformers is None else 'field_value__%s' % '__'.join(transformers)
lookup = {'field_name': field_name, 'identifier': identifier, 'language': language}
lookup[value_lookup] = value
return lookup |
def _write_cvvr(self, f, data):
'''
Write compressed "data" variable to the end of the file in a CVVR
'''
f.seek(0, 2)
byte_loc = f.tell()
cSize = len(data)
block_size = CDF.CVVR_BASE_SIZE64 + cSize
section_type = CDF.CVVR_
rfuA = 0
cvvr1 = bytearray(24)
cvvr1[0:8] = struct.pack('>q', block_size)
cvvr1[8:12] = struct.pack('>i', section_type)
cvvr1[12:16] = struct.pack('>i', rfuA)
cvvr1[16:24] = struct.pack('>q', cSize)
f.write(cvvr1)
f.write(data)
return byte_loc | def function[_write_cvvr, parameter[self, f, data]]:
constant[
Write compressed "data" variable to the end of the file in a CVVR
]
call[name[f].seek, parameter[constant[0], constant[2]]]
variable[byte_loc] assign[=] call[name[f].tell, parameter[]]
variable[cSize] assign[=] call[name[len], parameter[name[data]]]
variable[block_size] assign[=] binary_operation[name[CDF].CVVR_BASE_SIZE64 + name[cSize]]
variable[section_type] assign[=] name[CDF].CVVR_
variable[rfuA] assign[=] constant[0]
variable[cvvr1] assign[=] call[name[bytearray], parameter[constant[24]]]
call[name[cvvr1]][<ast.Slice object at 0x7da1b06a0ca0>] assign[=] call[name[struct].pack, parameter[constant[>q], name[block_size]]]
call[name[cvvr1]][<ast.Slice object at 0x7da1b06a0040>] assign[=] call[name[struct].pack, parameter[constant[>i], name[section_type]]]
call[name[cvvr1]][<ast.Slice object at 0x7da1b06a08b0>] assign[=] call[name[struct].pack, parameter[constant[>i], name[rfuA]]]
call[name[cvvr1]][<ast.Slice object at 0x7da1b06a3850>] assign[=] call[name[struct].pack, parameter[constant[>q], name[cSize]]]
call[name[f].write, parameter[name[cvvr1]]]
call[name[f].write, parameter[name[data]]]
return[name[byte_loc]] | keyword[def] identifier[_write_cvvr] ( identifier[self] , identifier[f] , identifier[data] ):
literal[string]
identifier[f] . identifier[seek] ( literal[int] , literal[int] )
identifier[byte_loc] = identifier[f] . identifier[tell] ()
identifier[cSize] = identifier[len] ( identifier[data] )
identifier[block_size] = identifier[CDF] . identifier[CVVR_BASE_SIZE64] + identifier[cSize]
identifier[section_type] = identifier[CDF] . identifier[CVVR_]
identifier[rfuA] = literal[int]
identifier[cvvr1] = identifier[bytearray] ( literal[int] )
identifier[cvvr1] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[block_size] )
identifier[cvvr1] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[section_type] )
identifier[cvvr1] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[rfuA] )
identifier[cvvr1] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[cSize] )
identifier[f] . identifier[write] ( identifier[cvvr1] )
identifier[f] . identifier[write] ( identifier[data] )
keyword[return] identifier[byte_loc] | def _write_cvvr(self, f, data):
"""
Write compressed "data" variable to the end of the file in a CVVR
"""
f.seek(0, 2)
byte_loc = f.tell()
cSize = len(data)
block_size = CDF.CVVR_BASE_SIZE64 + cSize
section_type = CDF.CVVR_
rfuA = 0
cvvr1 = bytearray(24)
cvvr1[0:8] = struct.pack('>q', block_size)
cvvr1[8:12] = struct.pack('>i', section_type)
cvvr1[12:16] = struct.pack('>i', rfuA)
cvvr1[16:24] = struct.pack('>q', cSize)
f.write(cvvr1)
f.write(data)
return byte_loc |
def log_writer():
'''log writing thread'''
while True:
mpstate.logfile_raw.write(bytearray(mpstate.logqueue_raw.get()))
timeout = time.time() + 10
while not mpstate.logqueue_raw.empty() and time.time() < timeout:
mpstate.logfile_raw.write(mpstate.logqueue_raw.get())
while not mpstate.logqueue.empty() and time.time() < timeout:
mpstate.logfile.write(mpstate.logqueue.get())
if mpstate.settings.flushlogs or time.time() >= timeout:
mpstate.logfile.flush()
mpstate.logfile_raw.flush() | def function[log_writer, parameter[]]:
constant[log writing thread]
while constant[True] begin[:]
call[name[mpstate].logfile_raw.write, parameter[call[name[bytearray], parameter[call[name[mpstate].logqueue_raw.get, parameter[]]]]]]
variable[timeout] assign[=] binary_operation[call[name[time].time, parameter[]] + constant[10]]
while <ast.BoolOp object at 0x7da1b162b6d0> begin[:]
call[name[mpstate].logfile_raw.write, parameter[call[name[mpstate].logqueue_raw.get, parameter[]]]]
while <ast.BoolOp object at 0x7da1b16287c0> begin[:]
call[name[mpstate].logfile.write, parameter[call[name[mpstate].logqueue.get, parameter[]]]]
if <ast.BoolOp object at 0x7da1b16288b0> begin[:]
call[name[mpstate].logfile.flush, parameter[]]
call[name[mpstate].logfile_raw.flush, parameter[]] | keyword[def] identifier[log_writer] ():
literal[string]
keyword[while] keyword[True] :
identifier[mpstate] . identifier[logfile_raw] . identifier[write] ( identifier[bytearray] ( identifier[mpstate] . identifier[logqueue_raw] . identifier[get] ()))
identifier[timeout] = identifier[time] . identifier[time] ()+ literal[int]
keyword[while] keyword[not] identifier[mpstate] . identifier[logqueue_raw] . identifier[empty] () keyword[and] identifier[time] . identifier[time] ()< identifier[timeout] :
identifier[mpstate] . identifier[logfile_raw] . identifier[write] ( identifier[mpstate] . identifier[logqueue_raw] . identifier[get] ())
keyword[while] keyword[not] identifier[mpstate] . identifier[logqueue] . identifier[empty] () keyword[and] identifier[time] . identifier[time] ()< identifier[timeout] :
identifier[mpstate] . identifier[logfile] . identifier[write] ( identifier[mpstate] . identifier[logqueue] . identifier[get] ())
keyword[if] identifier[mpstate] . identifier[settings] . identifier[flushlogs] keyword[or] identifier[time] . identifier[time] ()>= identifier[timeout] :
identifier[mpstate] . identifier[logfile] . identifier[flush] ()
identifier[mpstate] . identifier[logfile_raw] . identifier[flush] () | def log_writer():
"""log writing thread"""
while True:
mpstate.logfile_raw.write(bytearray(mpstate.logqueue_raw.get()))
timeout = time.time() + 10
while not mpstate.logqueue_raw.empty() and time.time() < timeout:
mpstate.logfile_raw.write(mpstate.logqueue_raw.get()) # depends on [control=['while'], data=[]]
while not mpstate.logqueue.empty() and time.time() < timeout:
mpstate.logfile.write(mpstate.logqueue.get()) # depends on [control=['while'], data=[]]
if mpstate.settings.flushlogs or time.time() >= timeout:
mpstate.logfile.flush()
mpstate.logfile_raw.flush() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def wait_for_ready_state_complete(driver, timeout=settings.EXTREME_TIMEOUT):
"""
The DOM (Document Object Model) has a property called "readyState".
When the value of this becomes "complete", page resources are considered
fully loaded (although AJAX and other loads might still be happening).
This method will wait until document.readyState == "complete".
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
try:
ready_state = driver.execute_script("return document.readyState")
except WebDriverException:
# Bug fix for: [Permission denied to access property "document"]
time.sleep(0.03)
return True
if ready_state == u'complete':
time.sleep(0.01) # Better be sure everything is done loading
return True
else:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
raise Exception(
"Page elements never fully loaded after %s seconds!" % timeout) | def function[wait_for_ready_state_complete, parameter[driver, timeout]]:
constant[
The DOM (Document Object Model) has a property called "readyState".
When the value of this becomes "complete", page resources are considered
fully loaded (although AJAX and other loads might still be happening).
This method will wait until document.readyState == "complete".
]
variable[start_ms] assign[=] binary_operation[call[name[time].time, parameter[]] * constant[1000.0]]
variable[stop_ms] assign[=] binary_operation[name[start_ms] + binary_operation[name[timeout] * constant[1000.0]]]
for taget[name[x]] in starred[call[name[range], parameter[call[name[int], parameter[binary_operation[name[timeout] * constant[10]]]]]]] begin[:]
<ast.Try object at 0x7da1b1bb9690>
if compare[name[ready_state] equal[==] constant[complete]] begin[:]
call[name[time].sleep, parameter[constant[0.01]]]
return[constant[True]]
<ast.Raise object at 0x7da1b1bba830> | keyword[def] identifier[wait_for_ready_state_complete] ( identifier[driver] , identifier[timeout] = identifier[settings] . identifier[EXTREME_TIMEOUT] ):
literal[string]
identifier[start_ms] = identifier[time] . identifier[time] ()* literal[int]
identifier[stop_ms] = identifier[start_ms] +( identifier[timeout] * literal[int] )
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[int] ( identifier[timeout] * literal[int] )):
keyword[try] :
identifier[ready_state] = identifier[driver] . identifier[execute_script] ( literal[string] )
keyword[except] identifier[WebDriverException] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[return] keyword[True]
keyword[if] identifier[ready_state] == literal[string] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[return] keyword[True]
keyword[else] :
identifier[now_ms] = identifier[time] . identifier[time] ()* literal[int]
keyword[if] identifier[now_ms] >= identifier[stop_ms] :
keyword[break]
identifier[time] . identifier[sleep] ( literal[int] )
keyword[raise] identifier[Exception] (
literal[string] % identifier[timeout] ) | def wait_for_ready_state_complete(driver, timeout=settings.EXTREME_TIMEOUT):
"""
The DOM (Document Object Model) has a property called "readyState".
When the value of this becomes "complete", page resources are considered
fully loaded (although AJAX and other loads might still be happening).
This method will wait until document.readyState == "complete".
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + timeout * 1000.0
for x in range(int(timeout * 10)):
try:
ready_state = driver.execute_script('return document.readyState') # depends on [control=['try'], data=[]]
except WebDriverException:
# Bug fix for: [Permission denied to access property "document"]
time.sleep(0.03)
return True # depends on [control=['except'], data=[]]
if ready_state == u'complete':
time.sleep(0.01) # Better be sure everything is done loading
return True # depends on [control=['if'], data=[]]
else:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break # depends on [control=['if'], data=[]]
time.sleep(0.1) # depends on [control=['for'], data=[]]
raise Exception('Page elements never fully loaded after %s seconds!' % timeout) |
def load(self, content):
"""Parse yaml content."""
# Try parsing the YAML with global tags
try:
config = yaml.load(content, Loader=self._loader(self._global_tags))
except yaml.YAMLError:
raise InvalidConfigError(_("Config is not valid yaml."))
# Try extracting just the tool portion
try:
config = config[self.tool]
except (TypeError, KeyError):
return None
# If no scopes, just apply global default
if not isinstance(config, dict):
config = self._apply_default(config, self._global_default)
else:
# Figure out what scopes exist
scoped_keys = set(key for key in self._scopes)
# For every scope
for key in config:
# If scope has custom tags, apply
if key in scoped_keys:
# local tags, and local default
tags, default = self._scopes[key]
# Inherit global default if no local default
if not default:
default = self._global_default
config[key] = self._apply_default(config[key], default)
self._apply_scope(config[key], tags)
# Otherwise just apply global default
else:
config[key] = self._apply_default(config[key], self._global_default)
self._validate(config)
return config | def function[load, parameter[self, content]]:
constant[Parse yaml content.]
<ast.Try object at 0x7da18bc71660>
<ast.Try object at 0x7da18bc71540>
if <ast.UnaryOp object at 0x7da18bc72bf0> begin[:]
variable[config] assign[=] call[name[self]._apply_default, parameter[name[config], name[self]._global_default]]
call[name[self]._validate, parameter[name[config]]]
return[name[config]] | keyword[def] identifier[load] ( identifier[self] , identifier[content] ):
literal[string]
keyword[try] :
identifier[config] = identifier[yaml] . identifier[load] ( identifier[content] , identifier[Loader] = identifier[self] . identifier[_loader] ( identifier[self] . identifier[_global_tags] ))
keyword[except] identifier[yaml] . identifier[YAMLError] :
keyword[raise] identifier[InvalidConfigError] ( identifier[_] ( literal[string] ))
keyword[try] :
identifier[config] = identifier[config] [ identifier[self] . identifier[tool] ]
keyword[except] ( identifier[TypeError] , identifier[KeyError] ):
keyword[return] keyword[None]
keyword[if] keyword[not] identifier[isinstance] ( identifier[config] , identifier[dict] ):
identifier[config] = identifier[self] . identifier[_apply_default] ( identifier[config] , identifier[self] . identifier[_global_default] )
keyword[else] :
identifier[scoped_keys] = identifier[set] ( identifier[key] keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_scopes] )
keyword[for] identifier[key] keyword[in] identifier[config] :
keyword[if] identifier[key] keyword[in] identifier[scoped_keys] :
identifier[tags] , identifier[default] = identifier[self] . identifier[_scopes] [ identifier[key] ]
keyword[if] keyword[not] identifier[default] :
identifier[default] = identifier[self] . identifier[_global_default]
identifier[config] [ identifier[key] ]= identifier[self] . identifier[_apply_default] ( identifier[config] [ identifier[key] ], identifier[default] )
identifier[self] . identifier[_apply_scope] ( identifier[config] [ identifier[key] ], identifier[tags] )
keyword[else] :
identifier[config] [ identifier[key] ]= identifier[self] . identifier[_apply_default] ( identifier[config] [ identifier[key] ], identifier[self] . identifier[_global_default] )
identifier[self] . identifier[_validate] ( identifier[config] )
keyword[return] identifier[config] | def load(self, content):
"""Parse yaml content."""
# Try parsing the YAML with global tags
try:
config = yaml.load(content, Loader=self._loader(self._global_tags)) # depends on [control=['try'], data=[]]
except yaml.YAMLError:
raise InvalidConfigError(_('Config is not valid yaml.')) # depends on [control=['except'], data=[]]
# Try extracting just the tool portion
try:
config = config[self.tool] # depends on [control=['try'], data=[]]
except (TypeError, KeyError):
return None # depends on [control=['except'], data=[]]
# If no scopes, just apply global default
if not isinstance(config, dict):
config = self._apply_default(config, self._global_default) # depends on [control=['if'], data=[]]
else:
# Figure out what scopes exist
scoped_keys = set((key for key in self._scopes))
# For every scope
for key in config:
# If scope has custom tags, apply
if key in scoped_keys:
# local tags, and local default
(tags, default) = self._scopes[key]
# Inherit global default if no local default
if not default:
default = self._global_default # depends on [control=['if'], data=[]]
config[key] = self._apply_default(config[key], default)
self._apply_scope(config[key], tags) # depends on [control=['if'], data=['key']]
else:
# Otherwise just apply global default
config[key] = self._apply_default(config[key], self._global_default) # depends on [control=['for'], data=['key']]
self._validate(config)
return config |
def _strip_metachars(val):
"""
When a filter uses a / or - in the search, only the elements
name and comment field is searched. This can cause issues if
searching a network element, i.e. 1.1.1.0/24 where the /24 portion
is not present in the name and only the elements ipv4_network
attribute. If exact_match is not specified, strip off the /24
portion. Queries of this nature should instead use a kw filter
of: ipv4_network='1.1.1.0/24'.
"""
ignore_metachar = r'(.+)([/-].+)'
match = re.search(ignore_metachar, str(val))
if match:
left_half = match.group(1)
return left_half
return val | def function[_strip_metachars, parameter[val]]:
constant[
When a filter uses a / or - in the search, only the elements
name and comment field is searched. This can cause issues if
searching a network element, i.e. 1.1.1.0/24 where the /24 portion
is not present in the name and only the elements ipv4_network
attribute. If exact_match is not specified, strip off the /24
portion. Queries of this nature should instead use a kw filter
of: ipv4_network='1.1.1.0/24'.
]
variable[ignore_metachar] assign[=] constant[(.+)([/-].+)]
variable[match] assign[=] call[name[re].search, parameter[name[ignore_metachar], call[name[str], parameter[name[val]]]]]
if name[match] begin[:]
variable[left_half] assign[=] call[name[match].group, parameter[constant[1]]]
return[name[left_half]]
return[name[val]] | keyword[def] identifier[_strip_metachars] ( identifier[val] ):
literal[string]
identifier[ignore_metachar] = literal[string]
identifier[match] = identifier[re] . identifier[search] ( identifier[ignore_metachar] , identifier[str] ( identifier[val] ))
keyword[if] identifier[match] :
identifier[left_half] = identifier[match] . identifier[group] ( literal[int] )
keyword[return] identifier[left_half]
keyword[return] identifier[val] | def _strip_metachars(val):
"""
When a filter uses a / or - in the search, only the elements
name and comment field is searched. This can cause issues if
searching a network element, i.e. 1.1.1.0/24 where the /24 portion
is not present in the name and only the elements ipv4_network
attribute. If exact_match is not specified, strip off the /24
portion. Queries of this nature should instead use a kw filter
of: ipv4_network='1.1.1.0/24'.
"""
ignore_metachar = '(.+)([/-].+)'
match = re.search(ignore_metachar, str(val))
if match:
left_half = match.group(1)
return left_half # depends on [control=['if'], data=[]]
return val |
def _master(self):
"""Master node's operation.
Assigning tasks to workers and collecting results from them
Parameters
----------
None
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
logger.info(
'Master at rank %d starts to allocate tasks',
MPI.COMM_WORLD.Get_rank()
)
results = []
comm = MPI.COMM_WORLD
size = comm.Get_size()
sending_voxels = self.voxel_unit if self.voxel_unit < self.num_voxels \
else self.num_voxels
current_task = (0, sending_voxels)
status = MPI.Status()
# using_size is used when the number of tasks
# is smaller than the number of workers
using_size = size
for i in range(0, size):
if i == self.master_rank:
continue
if current_task[1] == 0:
using_size = i
break
logger.debug(
'master starts to send a task to worker %d' %
i
)
comm.send(current_task,
dest=i,
tag=self._WORKTAG)
next_start = current_task[0] + current_task[1]
sending_voxels = self.voxel_unit \
if self.voxel_unit < self.num_voxels - next_start \
else self.num_voxels - next_start
current_task = (next_start, sending_voxels)
while using_size == size:
if current_task[1] == 0:
break
result = comm.recv(source=MPI.ANY_SOURCE,
tag=MPI.ANY_TAG,
status=status)
results += result
comm.send(current_task,
dest=status.Get_source(),
tag=self._WORKTAG)
next_start = current_task[0] + current_task[1]
sending_voxels = self.voxel_unit \
if self.voxel_unit < self.num_voxels - next_start \
else self.num_voxels - next_start
current_task = (next_start, sending_voxels)
for i in range(0, using_size):
if i == self.master_rank:
continue
result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
results += result
for i in range(0, size):
if i == self.master_rank:
continue
comm.send(None,
dest=i,
tag=self._TERMINATETAG)
return results | def function[_master, parameter[self]]:
constant[Master node's operation.
Assigning tasks to workers and collecting results from them
Parameters
----------
None
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
]
call[name[logger].info, parameter[constant[Master at rank %d starts to allocate tasks], call[name[MPI].COMM_WORLD.Get_rank, parameter[]]]]
variable[results] assign[=] list[[]]
variable[comm] assign[=] name[MPI].COMM_WORLD
variable[size] assign[=] call[name[comm].Get_size, parameter[]]
variable[sending_voxels] assign[=] <ast.IfExp object at 0x7da1b074e350>
variable[current_task] assign[=] tuple[[<ast.Constant object at 0x7da1b074ecb0>, <ast.Name object at 0x7da1b074ec20>]]
variable[status] assign[=] call[name[MPI].Status, parameter[]]
variable[using_size] assign[=] name[size]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[size]]]] begin[:]
if compare[name[i] equal[==] name[self].master_rank] begin[:]
continue
if compare[call[name[current_task]][constant[1]] equal[==] constant[0]] begin[:]
variable[using_size] assign[=] name[i]
break
call[name[logger].debug, parameter[binary_operation[constant[master starts to send a task to worker %d] <ast.Mod object at 0x7da2590d6920> name[i]]]]
call[name[comm].send, parameter[name[current_task]]]
variable[next_start] assign[=] binary_operation[call[name[current_task]][constant[0]] + call[name[current_task]][constant[1]]]
variable[sending_voxels] assign[=] <ast.IfExp object at 0x7da1b07e2c50>
variable[current_task] assign[=] tuple[[<ast.Name object at 0x7da1b07e0970>, <ast.Name object at 0x7da1b07e2b60>]]
while compare[name[using_size] equal[==] name[size]] begin[:]
if compare[call[name[current_task]][constant[1]] equal[==] constant[0]] begin[:]
break
variable[result] assign[=] call[name[comm].recv, parameter[]]
<ast.AugAssign object at 0x7da1b074d7b0>
call[name[comm].send, parameter[name[current_task]]]
variable[next_start] assign[=] binary_operation[call[name[current_task]][constant[0]] + call[name[current_task]][constant[1]]]
variable[sending_voxels] assign[=] <ast.IfExp object at 0x7da1b074dd20>
variable[current_task] assign[=] tuple[[<ast.Name object at 0x7da1b074f250>, <ast.Name object at 0x7da1b074f2e0>]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[using_size]]]] begin[:]
if compare[name[i] equal[==] name[self].master_rank] begin[:]
continue
variable[result] assign[=] call[name[comm].recv, parameter[]]
<ast.AugAssign object at 0x7da1b074e2c0>
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[size]]]] begin[:]
if compare[name[i] equal[==] name[self].master_rank] begin[:]
continue
call[name[comm].send, parameter[constant[None]]]
return[name[results]] | keyword[def] identifier[_master] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[info] (
literal[string] ,
identifier[MPI] . identifier[COMM_WORLD] . identifier[Get_rank] ()
)
identifier[results] =[]
identifier[comm] = identifier[MPI] . identifier[COMM_WORLD]
identifier[size] = identifier[comm] . identifier[Get_size] ()
identifier[sending_voxels] = identifier[self] . identifier[voxel_unit] keyword[if] identifier[self] . identifier[voxel_unit] < identifier[self] . identifier[num_voxels] keyword[else] identifier[self] . identifier[num_voxels]
identifier[current_task] =( literal[int] , identifier[sending_voxels] )
identifier[status] = identifier[MPI] . identifier[Status] ()
identifier[using_size] = identifier[size]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[size] ):
keyword[if] identifier[i] == identifier[self] . identifier[master_rank] :
keyword[continue]
keyword[if] identifier[current_task] [ literal[int] ]== literal[int] :
identifier[using_size] = identifier[i]
keyword[break]
identifier[logger] . identifier[debug] (
literal[string] %
identifier[i]
)
identifier[comm] . identifier[send] ( identifier[current_task] ,
identifier[dest] = identifier[i] ,
identifier[tag] = identifier[self] . identifier[_WORKTAG] )
identifier[next_start] = identifier[current_task] [ literal[int] ]+ identifier[current_task] [ literal[int] ]
identifier[sending_voxels] = identifier[self] . identifier[voxel_unit] keyword[if] identifier[self] . identifier[voxel_unit] < identifier[self] . identifier[num_voxels] - identifier[next_start] keyword[else] identifier[self] . identifier[num_voxels] - identifier[next_start]
identifier[current_task] =( identifier[next_start] , identifier[sending_voxels] )
keyword[while] identifier[using_size] == identifier[size] :
keyword[if] identifier[current_task] [ literal[int] ]== literal[int] :
keyword[break]
identifier[result] = identifier[comm] . identifier[recv] ( identifier[source] = identifier[MPI] . identifier[ANY_SOURCE] ,
identifier[tag] = identifier[MPI] . identifier[ANY_TAG] ,
identifier[status] = identifier[status] )
identifier[results] += identifier[result]
identifier[comm] . identifier[send] ( identifier[current_task] ,
identifier[dest] = identifier[status] . identifier[Get_source] (),
identifier[tag] = identifier[self] . identifier[_WORKTAG] )
identifier[next_start] = identifier[current_task] [ literal[int] ]+ identifier[current_task] [ literal[int] ]
identifier[sending_voxels] = identifier[self] . identifier[voxel_unit] keyword[if] identifier[self] . identifier[voxel_unit] < identifier[self] . identifier[num_voxels] - identifier[next_start] keyword[else] identifier[self] . identifier[num_voxels] - identifier[next_start]
identifier[current_task] =( identifier[next_start] , identifier[sending_voxels] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[using_size] ):
keyword[if] identifier[i] == identifier[self] . identifier[master_rank] :
keyword[continue]
identifier[result] = identifier[comm] . identifier[recv] ( identifier[source] = identifier[MPI] . identifier[ANY_SOURCE] , identifier[tag] = identifier[MPI] . identifier[ANY_TAG] )
identifier[results] += identifier[result]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[size] ):
keyword[if] identifier[i] == identifier[self] . identifier[master_rank] :
keyword[continue]
identifier[comm] . identifier[send] ( keyword[None] ,
identifier[dest] = identifier[i] ,
identifier[tag] = identifier[self] . identifier[_TERMINATETAG] )
keyword[return] identifier[results] | def _master(self):
"""Master node's operation.
Assigning tasks to workers and collecting results from them
Parameters
----------
None
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
logger.info('Master at rank %d starts to allocate tasks', MPI.COMM_WORLD.Get_rank())
results = []
comm = MPI.COMM_WORLD
size = comm.Get_size()
sending_voxels = self.voxel_unit if self.voxel_unit < self.num_voxels else self.num_voxels
current_task = (0, sending_voxels)
status = MPI.Status()
# using_size is used when the number of tasks
# is smaller than the number of workers
using_size = size
for i in range(0, size):
if i == self.master_rank:
continue # depends on [control=['if'], data=[]]
if current_task[1] == 0:
using_size = i
break # depends on [control=['if'], data=[]]
logger.debug('master starts to send a task to worker %d' % i)
comm.send(current_task, dest=i, tag=self._WORKTAG)
next_start = current_task[0] + current_task[1]
sending_voxels = self.voxel_unit if self.voxel_unit < self.num_voxels - next_start else self.num_voxels - next_start
current_task = (next_start, sending_voxels) # depends on [control=['for'], data=['i']]
while using_size == size:
if current_task[1] == 0:
break # depends on [control=['if'], data=[]]
result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
results += result
comm.send(current_task, dest=status.Get_source(), tag=self._WORKTAG)
next_start = current_task[0] + current_task[1]
sending_voxels = self.voxel_unit if self.voxel_unit < self.num_voxels - next_start else self.num_voxels - next_start
current_task = (next_start, sending_voxels) # depends on [control=['while'], data=[]]
for i in range(0, using_size):
if i == self.master_rank:
continue # depends on [control=['if'], data=[]]
result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
results += result # depends on [control=['for'], data=['i']]
for i in range(0, size):
if i == self.master_rank:
continue # depends on [control=['if'], data=[]]
comm.send(None, dest=i, tag=self._TERMINATETAG) # depends on [control=['for'], data=['i']]
return results |
def create_fork(self):
"""
:calls: `POST /gists/:id/forks <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.Gist.Gist`
"""
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/forks"
)
return Gist(self._requester, headers, data, completed=True) | def function[create_fork, parameter[self]]:
constant[
:calls: `POST /gists/:id/forks <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.Gist.Gist`
]
<ast.Tuple object at 0x7da1b217fe20> assign[=] call[name[self]._requester.requestJsonAndCheck, parameter[constant[POST], binary_operation[name[self].url + constant[/forks]]]]
return[call[name[Gist], parameter[name[self]._requester, name[headers], name[data]]]] | keyword[def] identifier[create_fork] ( identifier[self] ):
literal[string]
identifier[headers] , identifier[data] = identifier[self] . identifier[_requester] . identifier[requestJsonAndCheck] (
literal[string] ,
identifier[self] . identifier[url] + literal[string]
)
keyword[return] identifier[Gist] ( identifier[self] . identifier[_requester] , identifier[headers] , identifier[data] , identifier[completed] = keyword[True] ) | def create_fork(self):
"""
:calls: `POST /gists/:id/forks <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.Gist.Gist`
"""
(headers, data) = self._requester.requestJsonAndCheck('POST', self.url + '/forks')
return Gist(self._requester, headers, data, completed=True) |
def calibration(self):
"""
Set the path to the calibration cache file for the given IFO.
During S2 the Hanford 2km IFO had two calibration epochs, so
if the start time is during S2, we use the correct cache file.
"""
# figure out the name of the calibration cache files
# as specified in the ini-file
self.calibration_cache_path()
if self.job().is_dax():
# new code for DAX
self.add_var_opt('glob-calibration-data','')
cache_filename=self.get_calibration()
pat = re.compile(r'(file://.*)')
f = open(cache_filename, 'r')
lines = f.readlines()
# loop over entries in the cache-file...
for line in lines:
m = pat.search(line)
if not m:
raise IOError
url = m.group(1)
# ... and add files to input-file list
path = urlparse.urlparse(url)[2]
calibration_lfn = os.path.basename(path)
self.add_input_file(calibration_lfn)
else:
# old .calibration for DAG's
self.add_var_opt('calibration-cache', self.__calibration_cache)
self.__calibration = self.__calibration_cache
self.add_input_file(self.__calibration) | def function[calibration, parameter[self]]:
constant[
Set the path to the calibration cache file for the given IFO.
During S2 the Hanford 2km IFO had two calibration epochs, so
if the start time is during S2, we use the correct cache file.
]
call[name[self].calibration_cache_path, parameter[]]
if call[call[name[self].job, parameter[]].is_dax, parameter[]] begin[:]
call[name[self].add_var_opt, parameter[constant[glob-calibration-data], constant[]]]
variable[cache_filename] assign[=] call[name[self].get_calibration, parameter[]]
variable[pat] assign[=] call[name[re].compile, parameter[constant[(file://.*)]]]
variable[f] assign[=] call[name[open], parameter[name[cache_filename], constant[r]]]
variable[lines] assign[=] call[name[f].readlines, parameter[]]
for taget[name[line]] in starred[name[lines]] begin[:]
variable[m] assign[=] call[name[pat].search, parameter[name[line]]]
if <ast.UnaryOp object at 0x7da18dc06920> begin[:]
<ast.Raise object at 0x7da18dc04820>
variable[url] assign[=] call[name[m].group, parameter[constant[1]]]
variable[path] assign[=] call[call[name[urlparse].urlparse, parameter[name[url]]]][constant[2]]
variable[calibration_lfn] assign[=] call[name[os].path.basename, parameter[name[path]]]
call[name[self].add_input_file, parameter[name[calibration_lfn]]] | keyword[def] identifier[calibration] ( identifier[self] ):
literal[string]
identifier[self] . identifier[calibration_cache_path] ()
keyword[if] identifier[self] . identifier[job] (). identifier[is_dax] ():
identifier[self] . identifier[add_var_opt] ( literal[string] , literal[string] )
identifier[cache_filename] = identifier[self] . identifier[get_calibration] ()
identifier[pat] = identifier[re] . identifier[compile] ( literal[string] )
identifier[f] = identifier[open] ( identifier[cache_filename] , literal[string] )
identifier[lines] = identifier[f] . identifier[readlines] ()
keyword[for] identifier[line] keyword[in] identifier[lines] :
identifier[m] = identifier[pat] . identifier[search] ( identifier[line] )
keyword[if] keyword[not] identifier[m] :
keyword[raise] identifier[IOError]
identifier[url] = identifier[m] . identifier[group] ( literal[int] )
identifier[path] = identifier[urlparse] . identifier[urlparse] ( identifier[url] )[ literal[int] ]
identifier[calibration_lfn] = identifier[os] . identifier[path] . identifier[basename] ( identifier[path] )
identifier[self] . identifier[add_input_file] ( identifier[calibration_lfn] )
keyword[else] :
identifier[self] . identifier[add_var_opt] ( literal[string] , identifier[self] . identifier[__calibration_cache] )
identifier[self] . identifier[__calibration] = identifier[self] . identifier[__calibration_cache]
identifier[self] . identifier[add_input_file] ( identifier[self] . identifier[__calibration] ) | def calibration(self):
"""
Set the path to the calibration cache file for the given IFO.
During S2 the Hanford 2km IFO had two calibration epochs, so
if the start time is during S2, we use the correct cache file.
"""
# figure out the name of the calibration cache files
# as specified in the ini-file
self.calibration_cache_path()
if self.job().is_dax():
# new code for DAX
self.add_var_opt('glob-calibration-data', '')
cache_filename = self.get_calibration()
pat = re.compile('(file://.*)')
f = open(cache_filename, 'r')
lines = f.readlines()
# loop over entries in the cache-file...
for line in lines:
m = pat.search(line)
if not m:
raise IOError # depends on [control=['if'], data=[]]
url = m.group(1)
# ... and add files to input-file list
path = urlparse.urlparse(url)[2]
calibration_lfn = os.path.basename(path)
self.add_input_file(calibration_lfn) # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]]
else:
# old .calibration for DAG's
self.add_var_opt('calibration-cache', self.__calibration_cache)
self.__calibration = self.__calibration_cache
self.add_input_file(self.__calibration) |
def init(uri, echo=False):
"""Initialise the database.
Initialise the sqlalchemy engine, metadata and table objects that we use to
connect to the database.
Create the database and the database tables themselves if they don't
already exist.
:param uri: the sqlalchemy database URI
:type uri: string
:param echo: whether or not to have the sqlalchemy engine log all
statements to stdout
:type echo: bool
"""
global ENGINE, _METADATA, JOBS_TABLE, METADATA_TABLE, LOGS_TABLE
ENGINE = sqlalchemy.create_engine(uri, echo=echo, convert_unicode=True)
_METADATA = sqlalchemy.MetaData(ENGINE)
JOBS_TABLE = _init_jobs_table()
METADATA_TABLE = _init_metadata_table()
LOGS_TABLE = _init_logs_table()
_METADATA.create_all(ENGINE) | def function[init, parameter[uri, echo]]:
constant[Initialise the database.
Initialise the sqlalchemy engine, metadata and table objects that we use to
connect to the database.
Create the database and the database tables themselves if they don't
already exist.
:param uri: the sqlalchemy database URI
:type uri: string
:param echo: whether or not to have the sqlalchemy engine log all
statements to stdout
:type echo: bool
]
<ast.Global object at 0x7da18bccb070>
variable[ENGINE] assign[=] call[name[sqlalchemy].create_engine, parameter[name[uri]]]
variable[_METADATA] assign[=] call[name[sqlalchemy].MetaData, parameter[name[ENGINE]]]
variable[JOBS_TABLE] assign[=] call[name[_init_jobs_table], parameter[]]
variable[METADATA_TABLE] assign[=] call[name[_init_metadata_table], parameter[]]
variable[LOGS_TABLE] assign[=] call[name[_init_logs_table], parameter[]]
call[name[_METADATA].create_all, parameter[name[ENGINE]]] | keyword[def] identifier[init] ( identifier[uri] , identifier[echo] = keyword[False] ):
literal[string]
keyword[global] identifier[ENGINE] , identifier[_METADATA] , identifier[JOBS_TABLE] , identifier[METADATA_TABLE] , identifier[LOGS_TABLE]
identifier[ENGINE] = identifier[sqlalchemy] . identifier[create_engine] ( identifier[uri] , identifier[echo] = identifier[echo] , identifier[convert_unicode] = keyword[True] )
identifier[_METADATA] = identifier[sqlalchemy] . identifier[MetaData] ( identifier[ENGINE] )
identifier[JOBS_TABLE] = identifier[_init_jobs_table] ()
identifier[METADATA_TABLE] = identifier[_init_metadata_table] ()
identifier[LOGS_TABLE] = identifier[_init_logs_table] ()
identifier[_METADATA] . identifier[create_all] ( identifier[ENGINE] ) | def init(uri, echo=False):
"""Initialise the database.
Initialise the sqlalchemy engine, metadata and table objects that we use to
connect to the database.
Create the database and the database tables themselves if they don't
already exist.
:param uri: the sqlalchemy database URI
:type uri: string
:param echo: whether or not to have the sqlalchemy engine log all
statements to stdout
:type echo: bool
"""
global ENGINE, _METADATA, JOBS_TABLE, METADATA_TABLE, LOGS_TABLE
ENGINE = sqlalchemy.create_engine(uri, echo=echo, convert_unicode=True)
_METADATA = sqlalchemy.MetaData(ENGINE)
JOBS_TABLE = _init_jobs_table()
METADATA_TABLE = _init_metadata_table()
LOGS_TABLE = _init_logs_table()
_METADATA.create_all(ENGINE) |
def describe_stream(self, stream_arn, first_shard=None):
"""Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens.
:param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``.
:param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned.
:return: All shards in the stream, or a subset if ``first_shard`` is provided.
:rtype: dict
"""
description = {"Shards": []}
request = {"StreamArn": stream_arn, "ExclusiveStartShardId": first_shard}
# boto3 isn't down with literal Nones.
if first_shard is None:
request.pop("ExclusiveStartShardId")
while request.get("ExclusiveStartShardId") is not missing:
try:
response = self.stream_client.describe_stream(**request)["StreamDescription"]
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] == "ResourceNotFoundException":
raise InvalidStream(f"The stream arn {stream_arn!r} does not exist.") from error
raise BloopException("Unexpected error while describing stream.") from error
# Docs aren't clear if the terminal value is null, or won't exist.
# Since we don't terminate the loop on None, the "or missing" here
# will ensure we stop on a falsey value.
request["ExclusiveStartShardId"] = response.pop("LastEvaluatedShardId", None) or missing
description["Shards"].extend(response.pop("Shards", []))
description.update(response)
return description | def function[describe_stream, parameter[self, stream_arn, first_shard]]:
constant[Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens.
:param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``.
:param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned.
:return: All shards in the stream, or a subset if ``first_shard`` is provided.
:rtype: dict
]
variable[description] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f2cc10>], [<ast.List object at 0x7da1b0f2ea40>]]
variable[request] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f2e380>, <ast.Constant object at 0x7da1b0f2c0a0>], [<ast.Name object at 0x7da1b0f2f2b0>, <ast.Name object at 0x7da1b0f2e260>]]
if compare[name[first_shard] is constant[None]] begin[:]
call[name[request].pop, parameter[constant[ExclusiveStartShardId]]]
while compare[call[name[request].get, parameter[constant[ExclusiveStartShardId]]] is_not name[missing]] begin[:]
<ast.Try object at 0x7da1b0f2ee60>
call[name[request]][constant[ExclusiveStartShardId]] assign[=] <ast.BoolOp object at 0x7da1b0f2ec80>
call[call[name[description]][constant[Shards]].extend, parameter[call[name[response].pop, parameter[constant[Shards], list[[]]]]]]
call[name[description].update, parameter[name[response]]]
return[name[description]] | keyword[def] identifier[describe_stream] ( identifier[self] , identifier[stream_arn] , identifier[first_shard] = keyword[None] ):
literal[string]
identifier[description] ={ literal[string] :[]}
identifier[request] ={ literal[string] : identifier[stream_arn] , literal[string] : identifier[first_shard] }
keyword[if] identifier[first_shard] keyword[is] keyword[None] :
identifier[request] . identifier[pop] ( literal[string] )
keyword[while] identifier[request] . identifier[get] ( literal[string] ) keyword[is] keyword[not] identifier[missing] :
keyword[try] :
identifier[response] = identifier[self] . identifier[stream_client] . identifier[describe_stream] (** identifier[request] )[ literal[string] ]
keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[error] :
keyword[if] identifier[error] . identifier[response] [ literal[string] ][ literal[string] ]== literal[string] :
keyword[raise] identifier[InvalidStream] ( literal[string] ) keyword[from] identifier[error]
keyword[raise] identifier[BloopException] ( literal[string] ) keyword[from] identifier[error]
identifier[request] [ literal[string] ]= identifier[response] . identifier[pop] ( literal[string] , keyword[None] ) keyword[or] identifier[missing]
identifier[description] [ literal[string] ]. identifier[extend] ( identifier[response] . identifier[pop] ( literal[string] ,[]))
identifier[description] . identifier[update] ( identifier[response] )
keyword[return] identifier[description] | def describe_stream(self, stream_arn, first_shard=None):
"""Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens.
:param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``.
:param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned.
:return: All shards in the stream, or a subset if ``first_shard`` is provided.
:rtype: dict
"""
description = {'Shards': []}
request = {'StreamArn': stream_arn, 'ExclusiveStartShardId': first_shard}
# boto3 isn't down with literal Nones.
if first_shard is None:
request.pop('ExclusiveStartShardId') # depends on [control=['if'], data=[]]
while request.get('ExclusiveStartShardId') is not missing:
try:
response = self.stream_client.describe_stream(**request)['StreamDescription'] # depends on [control=['try'], data=[]]
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == 'ResourceNotFoundException':
raise InvalidStream(f'The stream arn {stream_arn!r} does not exist.') from error # depends on [control=['if'], data=[]]
raise BloopException('Unexpected error while describing stream.') from error # depends on [control=['except'], data=['error']]
# Docs aren't clear if the terminal value is null, or won't exist.
# Since we don't terminate the loop on None, the "or missing" here
# will ensure we stop on a falsey value.
request['ExclusiveStartShardId'] = response.pop('LastEvaluatedShardId', None) or missing
description['Shards'].extend(response.pop('Shards', []))
description.update(response) # depends on [control=['while'], data=['missing']]
return description |
def _index_loopbacks(self):
"""Finds all loopbacks and stores them in :attr:`loopbacks`"""
self.loopbacks = {}
try:
result = _util.check_output_(['losetup', '-a'])
for line in result.splitlines():
m = re.match(r'(.+): (.+) \((.+)\).*', line)
if m:
self.loopbacks[m.group(1)] = m.group(3)
except Exception:
pass | def function[_index_loopbacks, parameter[self]]:
constant[Finds all loopbacks and stores them in :attr:`loopbacks`]
name[self].loopbacks assign[=] dictionary[[], []]
<ast.Try object at 0x7da1b0690550> | keyword[def] identifier[_index_loopbacks] ( identifier[self] ):
literal[string]
identifier[self] . identifier[loopbacks] ={}
keyword[try] :
identifier[result] = identifier[_util] . identifier[check_output_] ([ literal[string] , literal[string] ])
keyword[for] identifier[line] keyword[in] identifier[result] . identifier[splitlines] ():
identifier[m] = identifier[re] . identifier[match] ( literal[string] , identifier[line] )
keyword[if] identifier[m] :
identifier[self] . identifier[loopbacks] [ identifier[m] . identifier[group] ( literal[int] )]= identifier[m] . identifier[group] ( literal[int] )
keyword[except] identifier[Exception] :
keyword[pass] | def _index_loopbacks(self):
"""Finds all loopbacks and stores them in :attr:`loopbacks`"""
self.loopbacks = {}
try:
result = _util.check_output_(['losetup', '-a'])
for line in result.splitlines():
m = re.match('(.+): (.+) \\((.+)\\).*', line)
if m:
self.loopbacks[m.group(1)] = m.group(3) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] |
def rename_collection(db, collection, new_name):
'''
Renames a MongoDB collection.
Arguments:
db (Database): A pymongo Database object. Can be obtained
with ``get_db``.
collection (str): Name of the collection to be renamed.
new_name (str, func): ``new_name`` can be one of two things::
1. The new collection name, as a string.
2. A function which, when passed the current collection name,
returns the new collection name. If the function
returns an empty string, the collection will not be
renamed.
'''
if hasattr(new_name, '__call__'):
_new = new_name(collection)
if _new == '':
return
else:
_new = new_name
c = db[collection]
c.rename(_new) | def function[rename_collection, parameter[db, collection, new_name]]:
constant[
Renames a MongoDB collection.
Arguments:
db (Database): A pymongo Database object. Can be obtained
with ``get_db``.
collection (str): Name of the collection to be renamed.
new_name (str, func): ``new_name`` can be one of two things::
1. The new collection name, as a string.
2. A function which, when passed the current collection name,
returns the new collection name. If the function
returns an empty string, the collection will not be
renamed.
]
if call[name[hasattr], parameter[name[new_name], constant[__call__]]] begin[:]
variable[_new] assign[=] call[name[new_name], parameter[name[collection]]]
if compare[name[_new] equal[==] constant[]] begin[:]
return[None]
variable[c] assign[=] call[name[db]][name[collection]]
call[name[c].rename, parameter[name[_new]]] | keyword[def] identifier[rename_collection] ( identifier[db] , identifier[collection] , identifier[new_name] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[new_name] , literal[string] ):
identifier[_new] = identifier[new_name] ( identifier[collection] )
keyword[if] identifier[_new] == literal[string] :
keyword[return]
keyword[else] :
identifier[_new] = identifier[new_name]
identifier[c] = identifier[db] [ identifier[collection] ]
identifier[c] . identifier[rename] ( identifier[_new] ) | def rename_collection(db, collection, new_name):
"""
Renames a MongoDB collection.
Arguments:
db (Database): A pymongo Database object. Can be obtained
with ``get_db``.
collection (str): Name of the collection to be renamed.
new_name (str, func): ``new_name`` can be one of two things::
1. The new collection name, as a string.
2. A function which, when passed the current collection name,
returns the new collection name. If the function
returns an empty string, the collection will not be
renamed.
"""
if hasattr(new_name, '__call__'):
_new = new_name(collection)
if _new == '':
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
_new = new_name
c = db[collection]
c.rename(_new) |
def round(x, context=None):
"""
Return the nearest integer to x, rounding halfway cases *away from zero*.
If the result is not exactly representable, it will be rounded according to
the current context.
.. note::
This function corresponds to the MPFR function ``mpfr_rint_round``, not
to ``mpfr_round``.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_rint_round,
(BigFloat._implicit_convert(x),),
context,
) | def function[round, parameter[x, context]]:
constant[
Return the nearest integer to x, rounding halfway cases *away from zero*.
If the result is not exactly representable, it will be rounded according to
the current context.
.. note::
This function corresponds to the MPFR function ``mpfr_rint_round``, not
to ``mpfr_round``.
]
return[call[name[_apply_function_in_current_context], parameter[name[BigFloat], name[mpfr].mpfr_rint_round, tuple[[<ast.Call object at 0x7da18ede69e0>]], name[context]]]] | keyword[def] identifier[round] ( identifier[x] , identifier[context] = keyword[None] ):
literal[string]
keyword[return] identifier[_apply_function_in_current_context] (
identifier[BigFloat] ,
identifier[mpfr] . identifier[mpfr_rint_round] ,
( identifier[BigFloat] . identifier[_implicit_convert] ( identifier[x] ),),
identifier[context] ,
) | def round(x, context=None):
"""
Return the nearest integer to x, rounding halfway cases *away from zero*.
If the result is not exactly representable, it will be rounded according to
the current context.
.. note::
This function corresponds to the MPFR function ``mpfr_rint_round``, not
to ``mpfr_round``.
"""
return _apply_function_in_current_context(BigFloat, mpfr.mpfr_rint_round, (BigFloat._implicit_convert(x),), context) |
def _get_token():
'''
Get an auth token
'''
username = __opts__.get('rallydev', {}).get('username', None)
password = __opts__.get('rallydev', {}).get('password', None)
path = 'https://rally1.rallydev.com/slm/webservice/v2.0/security/authorize'
result = salt.utils.http.query(
path,
decode=True,
decode_type='json',
text=True,
status=True,
username=username,
password=password,
cookies=True,
persist_session=True,
opts=__opts__,
)
if 'dict' not in result:
return None
return result['dict']['OperationResult']['SecurityToken'] | def function[_get_token, parameter[]]:
constant[
Get an auth token
]
variable[username] assign[=] call[call[name[__opts__].get, parameter[constant[rallydev], dictionary[[], []]]].get, parameter[constant[username], constant[None]]]
variable[password] assign[=] call[call[name[__opts__].get, parameter[constant[rallydev], dictionary[[], []]]].get, parameter[constant[password], constant[None]]]
variable[path] assign[=] constant[https://rally1.rallydev.com/slm/webservice/v2.0/security/authorize]
variable[result] assign[=] call[name[salt].utils.http.query, parameter[name[path]]]
if compare[constant[dict] <ast.NotIn object at 0x7da2590d7190> name[result]] begin[:]
return[constant[None]]
return[call[call[call[name[result]][constant[dict]]][constant[OperationResult]]][constant[SecurityToken]]] | keyword[def] identifier[_get_token] ():
literal[string]
identifier[username] = identifier[__opts__] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , keyword[None] )
identifier[password] = identifier[__opts__] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , keyword[None] )
identifier[path] = literal[string]
identifier[result] = identifier[salt] . identifier[utils] . identifier[http] . identifier[query] (
identifier[path] ,
identifier[decode] = keyword[True] ,
identifier[decode_type] = literal[string] ,
identifier[text] = keyword[True] ,
identifier[status] = keyword[True] ,
identifier[username] = identifier[username] ,
identifier[password] = identifier[password] ,
identifier[cookies] = keyword[True] ,
identifier[persist_session] = keyword[True] ,
identifier[opts] = identifier[__opts__] ,
)
keyword[if] literal[string] keyword[not] keyword[in] identifier[result] :
keyword[return] keyword[None]
keyword[return] identifier[result] [ literal[string] ][ literal[string] ][ literal[string] ] | def _get_token():
"""
Get an auth token
"""
username = __opts__.get('rallydev', {}).get('username', None)
password = __opts__.get('rallydev', {}).get('password', None)
path = 'https://rally1.rallydev.com/slm/webservice/v2.0/security/authorize'
result = salt.utils.http.query(path, decode=True, decode_type='json', text=True, status=True, username=username, password=password, cookies=True, persist_session=True, opts=__opts__)
if 'dict' not in result:
return None # depends on [control=['if'], data=[]]
return result['dict']['OperationResult']['SecurityToken'] |
def filter_assert_nodes(nodes: List[ast.stmt], min_line_number: int) -> List[ast.stmt]:
"""
Finds all nodes that are after the ``min_line_number``
"""
return [node for node in nodes if node.lineno > min_line_number] | def function[filter_assert_nodes, parameter[nodes, min_line_number]]:
constant[
Finds all nodes that are after the ``min_line_number``
]
return[<ast.ListComp object at 0x7da1b1121ff0>] | keyword[def] identifier[filter_assert_nodes] ( identifier[nodes] : identifier[List] [ identifier[ast] . identifier[stmt] ], identifier[min_line_number] : identifier[int] )-> identifier[List] [ identifier[ast] . identifier[stmt] ]:
literal[string]
keyword[return] [ identifier[node] keyword[for] identifier[node] keyword[in] identifier[nodes] keyword[if] identifier[node] . identifier[lineno] > identifier[min_line_number] ] | def filter_assert_nodes(nodes: List[ast.stmt], min_line_number: int) -> List[ast.stmt]:
"""
Finds all nodes that are after the ``min_line_number``
"""
return [node for node in nodes if node.lineno > min_line_number] |
def get_is_authorized(request, pid):
"""MNAuthorization.isAuthorized(did, action) -> Boolean."""
if 'action' not in request.GET:
raise d1_common.types.exceptions.InvalidRequest(
0, 'Missing required parameter. required="action"'
)
# Convert action string to action level. Raises InvalidRequest if the
# action string is not valid.
level = d1_gmn.app.auth.action_to_level(request.GET['action'])
d1_gmn.app.auth.assert_allowed(request, level, pid)
return d1_gmn.app.views.util.http_response_with_boolean_true_type() | def function[get_is_authorized, parameter[request, pid]]:
constant[MNAuthorization.isAuthorized(did, action) -> Boolean.]
if compare[constant[action] <ast.NotIn object at 0x7da2590d7190> name[request].GET] begin[:]
<ast.Raise object at 0x7da1b1905210>
variable[level] assign[=] call[name[d1_gmn].app.auth.action_to_level, parameter[call[name[request].GET][constant[action]]]]
call[name[d1_gmn].app.auth.assert_allowed, parameter[name[request], name[level], name[pid]]]
return[call[name[d1_gmn].app.views.util.http_response_with_boolean_true_type, parameter[]]] | keyword[def] identifier[get_is_authorized] ( identifier[request] , identifier[pid] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[request] . identifier[GET] :
keyword[raise] identifier[d1_common] . identifier[types] . identifier[exceptions] . identifier[InvalidRequest] (
literal[int] , literal[string]
)
identifier[level] = identifier[d1_gmn] . identifier[app] . identifier[auth] . identifier[action_to_level] ( identifier[request] . identifier[GET] [ literal[string] ])
identifier[d1_gmn] . identifier[app] . identifier[auth] . identifier[assert_allowed] ( identifier[request] , identifier[level] , identifier[pid] )
keyword[return] identifier[d1_gmn] . identifier[app] . identifier[views] . identifier[util] . identifier[http_response_with_boolean_true_type] () | def get_is_authorized(request, pid):
"""MNAuthorization.isAuthorized(did, action) -> Boolean."""
if 'action' not in request.GET:
raise d1_common.types.exceptions.InvalidRequest(0, 'Missing required parameter. required="action"') # depends on [control=['if'], data=[]]
# Convert action string to action level. Raises InvalidRequest if the
# action string is not valid.
level = d1_gmn.app.auth.action_to_level(request.GET['action'])
d1_gmn.app.auth.assert_allowed(request, level, pid)
return d1_gmn.app.views.util.http_response_with_boolean_true_type() |
def get_portal_by_name(self, portal_name):
"""
Set active portal according to the name passed in 'portal_name'.
Returns dictionary of device 'serial_number: rid'
"""
portals = self.get_portals_list()
for p in portals:
# print("Checking {!r}".format(p))
if portal_name == p[1]:
# print("Found Portal!")
self.set_portal_name( p[1] )
self.set_portal_id( p[0] )
self.set_portal_cik( p[2][1]['info']['key'] )
# print("Active Portal Details:\nName: {0}\nId: {1}\nCIK: {2}".format(
# self.portal_name(),
# self.portal_id(),
# self.portal_cik()))
return p
return None | def function[get_portal_by_name, parameter[self, portal_name]]:
constant[
Set active portal according to the name passed in 'portal_name'.
Returns dictionary of device 'serial_number: rid'
]
variable[portals] assign[=] call[name[self].get_portals_list, parameter[]]
for taget[name[p]] in starred[name[portals]] begin[:]
if compare[name[portal_name] equal[==] call[name[p]][constant[1]]] begin[:]
call[name[self].set_portal_name, parameter[call[name[p]][constant[1]]]]
call[name[self].set_portal_id, parameter[call[name[p]][constant[0]]]]
call[name[self].set_portal_cik, parameter[call[call[call[call[name[p]][constant[2]]][constant[1]]][constant[info]]][constant[key]]]]
return[name[p]]
return[constant[None]] | keyword[def] identifier[get_portal_by_name] ( identifier[self] , identifier[portal_name] ):
literal[string]
identifier[portals] = identifier[self] . identifier[get_portals_list] ()
keyword[for] identifier[p] keyword[in] identifier[portals] :
keyword[if] identifier[portal_name] == identifier[p] [ literal[int] ]:
identifier[self] . identifier[set_portal_name] ( identifier[p] [ literal[int] ])
identifier[self] . identifier[set_portal_id] ( identifier[p] [ literal[int] ])
identifier[self] . identifier[set_portal_cik] ( identifier[p] [ literal[int] ][ literal[int] ][ literal[string] ][ literal[string] ])
keyword[return] identifier[p]
keyword[return] keyword[None] | def get_portal_by_name(self, portal_name):
"""
Set active portal according to the name passed in 'portal_name'.
Returns dictionary of device 'serial_number: rid'
"""
portals = self.get_portals_list()
for p in portals:
# print("Checking {!r}".format(p))
if portal_name == p[1]:
# print("Found Portal!")
self.set_portal_name(p[1])
self.set_portal_id(p[0])
self.set_portal_cik(p[2][1]['info']['key'])
# print("Active Portal Details:\nName: {0}\nId: {1}\nCIK: {2}".format(
# self.portal_name(),
# self.portal_id(),
# self.portal_cik()))
return p # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
return None |
def _encrypt(self, value):
"""Turn a json serializable value into an jsonified, encrypted,
hexa string.
"""
value = json.dumps(value)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
encrypted_value = self.cipher.encrypt(value.encode('utf8'))
hexified_value = binascii.hexlify(encrypted_value).decode('ascii')
return hexified_value | def function[_encrypt, parameter[self, value]]:
constant[Turn a json serializable value into an jsonified, encrypted,
hexa string.
]
variable[value] assign[=] call[name[json].dumps, parameter[name[value]]]
with call[name[warnings].catch_warnings, parameter[]] begin[:]
call[name[warnings].simplefilter, parameter[constant[ignore]]]
variable[encrypted_value] assign[=] call[name[self].cipher.encrypt, parameter[call[name[value].encode, parameter[constant[utf8]]]]]
variable[hexified_value] assign[=] call[call[name[binascii].hexlify, parameter[name[encrypted_value]]].decode, parameter[constant[ascii]]]
return[name[hexified_value]] | keyword[def] identifier[_encrypt] ( identifier[self] , identifier[value] ):
literal[string]
identifier[value] = identifier[json] . identifier[dumps] ( identifier[value] )
keyword[with] identifier[warnings] . identifier[catch_warnings] ():
identifier[warnings] . identifier[simplefilter] ( literal[string] )
identifier[encrypted_value] = identifier[self] . identifier[cipher] . identifier[encrypt] ( identifier[value] . identifier[encode] ( literal[string] ))
identifier[hexified_value] = identifier[binascii] . identifier[hexlify] ( identifier[encrypted_value] ). identifier[decode] ( literal[string] )
keyword[return] identifier[hexified_value] | def _encrypt(self, value):
"""Turn a json serializable value into an jsonified, encrypted,
hexa string.
"""
value = json.dumps(value)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
encrypted_value = self.cipher.encrypt(value.encode('utf8')) # depends on [control=['with'], data=[]]
hexified_value = binascii.hexlify(encrypted_value).decode('ascii')
return hexified_value |
def add_nest(self, name=None, **kw):
"""A simple decorator which wraps :meth:`nestly.core.Nest.add`."""
def deco(func):
self.add(name or func.__name__, func, **kw)
return func
return deco | def function[add_nest, parameter[self, name]]:
constant[A simple decorator which wraps :meth:`nestly.core.Nest.add`.]
def function[deco, parameter[func]]:
call[name[self].add, parameter[<ast.BoolOp object at 0x7da2046216c0>, name[func]]]
return[name[func]]
return[name[deco]] | keyword[def] identifier[add_nest] ( identifier[self] , identifier[name] = keyword[None] ,** identifier[kw] ):
literal[string]
keyword[def] identifier[deco] ( identifier[func] ):
identifier[self] . identifier[add] ( identifier[name] keyword[or] identifier[func] . identifier[__name__] , identifier[func] ,** identifier[kw] )
keyword[return] identifier[func]
keyword[return] identifier[deco] | def add_nest(self, name=None, **kw):
"""A simple decorator which wraps :meth:`nestly.core.Nest.add`."""
def deco(func):
self.add(name or func.__name__, func, **kw)
return func
return deco |
def transaction_operations(self, tx_hash, cursor=None, order='asc', include_failed=False, limit=10):
"""This endpoint represents all operations that are part of a given
transaction.
`GET /transactions/{hash}/operations{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/operations-for-transaction.html>`_
:param str tx_hash: The hex-encoded transaction hash.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool include_failed: Set to `True` to include operations of failed transactions in results.
:return: A single transaction's operations.
:rtype: dict
"""
endpoint = '/transactions/{tx_hash}/operations'.format(tx_hash=tx_hash)
params = self.__query_params(cursor=cursor, order=order, limit=limit, include_failed=include_failed)
return self.query(endpoint, params) | def function[transaction_operations, parameter[self, tx_hash, cursor, order, include_failed, limit]]:
constant[This endpoint represents all operations that are part of a given
transaction.
`GET /transactions/{hash}/operations{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/operations-for-transaction.html>`_
:param str tx_hash: The hex-encoded transaction hash.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool include_failed: Set to `True` to include operations of failed transactions in results.
:return: A single transaction's operations.
:rtype: dict
]
variable[endpoint] assign[=] call[constant[/transactions/{tx_hash}/operations].format, parameter[]]
variable[params] assign[=] call[name[self].__query_params, parameter[]]
return[call[name[self].query, parameter[name[endpoint], name[params]]]] | keyword[def] identifier[transaction_operations] ( identifier[self] , identifier[tx_hash] , identifier[cursor] = keyword[None] , identifier[order] = literal[string] , identifier[include_failed] = keyword[False] , identifier[limit] = literal[int] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[format] ( identifier[tx_hash] = identifier[tx_hash] )
identifier[params] = identifier[self] . identifier[__query_params] ( identifier[cursor] = identifier[cursor] , identifier[order] = identifier[order] , identifier[limit] = identifier[limit] , identifier[include_failed] = identifier[include_failed] )
keyword[return] identifier[self] . identifier[query] ( identifier[endpoint] , identifier[params] ) | def transaction_operations(self, tx_hash, cursor=None, order='asc', include_failed=False, limit=10):
"""This endpoint represents all operations that are part of a given
transaction.
`GET /transactions/{hash}/operations{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/operations-for-transaction.html>`_
:param str tx_hash: The hex-encoded transaction hash.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool include_failed: Set to `True` to include operations of failed transactions in results.
:return: A single transaction's operations.
:rtype: dict
"""
endpoint = '/transactions/{tx_hash}/operations'.format(tx_hash=tx_hash)
params = self.__query_params(cursor=cursor, order=order, limit=limit, include_failed=include_failed)
return self.query(endpoint, params) |
def main():
# config file
conf = Config()
actions = []
debuglevel = logging.ERROR
for it in sys.argv[1:]:
if it == ("-v"):
debuglevel = logging.WARNING
elif it == ("-vv"):
debuglevel = logging.INFO
elif it == ("-vvv"):
debuglevel = logging.DEBUG
if it == ("--valid"):
actions.append("valid")
conf.debuglevel = debuglevel
srv = ServerApp(conf)
status = srv.login(conf.token)
if not status:
sys.exit(100)
# pick up and run events
""" Run all script for this systems. """
srv.do_all_actions(conf)
""" send data for monitoring """
if conf.monitoring:
srv.monitoring()
""" create repo for web project - apache2/uwsgi"""
if conf.webproject:
#data = srv.get_all_projects()
#content = aray2xml(data)
""" Check all repository on this system. """
srv.check_repo_all()
""" Recount size of full disk in all project on this system. """
srv.check_size_all()
""" reset owners for projects """
srv.check_rights()
""" check running services """
srv.check_services()
mng = manager(conf)
for it in actions:
fc = getattr(mng, it)
fc() | def function[main, parameter[]]:
variable[conf] assign[=] call[name[Config], parameter[]]
variable[actions] assign[=] list[[]]
variable[debuglevel] assign[=] name[logging].ERROR
for taget[name[it]] in starred[call[name[sys].argv][<ast.Slice object at 0x7da1b088c310>]] begin[:]
if compare[name[it] equal[==] constant[-v]] begin[:]
variable[debuglevel] assign[=] name[logging].WARNING
if compare[name[it] equal[==] constant[--valid]] begin[:]
call[name[actions].append, parameter[constant[valid]]]
name[conf].debuglevel assign[=] name[debuglevel]
variable[srv] assign[=] call[name[ServerApp], parameter[name[conf]]]
variable[status] assign[=] call[name[srv].login, parameter[name[conf].token]]
if <ast.UnaryOp object at 0x7da1b0812e60> begin[:]
call[name[sys].exit, parameter[constant[100]]]
constant[ Run all script for this systems. ]
call[name[srv].do_all_actions, parameter[name[conf]]]
constant[ send data for monitoring ]
if name[conf].monitoring begin[:]
call[name[srv].monitoring, parameter[]]
constant[ create repo for web project - apache2/uwsgi]
if name[conf].webproject begin[:]
constant[ Check all repository on this system. ]
call[name[srv].check_repo_all, parameter[]]
constant[ Recount size of full disk in all project on this system. ]
call[name[srv].check_size_all, parameter[]]
constant[ reset owners for projects ]
call[name[srv].check_rights, parameter[]]
constant[ check running services ]
call[name[srv].check_services, parameter[]]
variable[mng] assign[=] call[name[manager], parameter[name[conf]]]
for taget[name[it]] in starred[name[actions]] begin[:]
variable[fc] assign[=] call[name[getattr], parameter[name[mng], name[it]]]
call[name[fc], parameter[]] | keyword[def] identifier[main] ():
identifier[conf] = identifier[Config] ()
identifier[actions] =[]
identifier[debuglevel] = identifier[logging] . identifier[ERROR]
keyword[for] identifier[it] keyword[in] identifier[sys] . identifier[argv] [ literal[int] :]:
keyword[if] identifier[it] ==( literal[string] ):
identifier[debuglevel] = identifier[logging] . identifier[WARNING]
keyword[elif] identifier[it] ==( literal[string] ):
identifier[debuglevel] = identifier[logging] . identifier[INFO]
keyword[elif] identifier[it] ==( literal[string] ):
identifier[debuglevel] = identifier[logging] . identifier[DEBUG]
keyword[if] identifier[it] ==( literal[string] ):
identifier[actions] . identifier[append] ( literal[string] )
identifier[conf] . identifier[debuglevel] = identifier[debuglevel]
identifier[srv] = identifier[ServerApp] ( identifier[conf] )
identifier[status] = identifier[srv] . identifier[login] ( identifier[conf] . identifier[token] )
keyword[if] keyword[not] identifier[status] :
identifier[sys] . identifier[exit] ( literal[int] )
literal[string]
identifier[srv] . identifier[do_all_actions] ( identifier[conf] )
literal[string]
keyword[if] identifier[conf] . identifier[monitoring] :
identifier[srv] . identifier[monitoring] ()
literal[string]
keyword[if] identifier[conf] . identifier[webproject] :
literal[string]
identifier[srv] . identifier[check_repo_all] ()
literal[string]
identifier[srv] . identifier[check_size_all] ()
literal[string]
identifier[srv] . identifier[check_rights] ()
literal[string]
identifier[srv] . identifier[check_services] ()
identifier[mng] = identifier[manager] ( identifier[conf] )
keyword[for] identifier[it] keyword[in] identifier[actions] :
identifier[fc] = identifier[getattr] ( identifier[mng] , identifier[it] )
identifier[fc] () | def main():
# config file
conf = Config()
actions = []
debuglevel = logging.ERROR
for it in sys.argv[1:]:
if it == '-v':
debuglevel = logging.WARNING # depends on [control=['if'], data=[]]
elif it == '-vv':
debuglevel = logging.INFO # depends on [control=['if'], data=[]]
elif it == '-vvv':
debuglevel = logging.DEBUG # depends on [control=['if'], data=[]]
if it == '--valid':
actions.append('valid') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['it']]
conf.debuglevel = debuglevel
srv = ServerApp(conf)
status = srv.login(conf.token)
if not status:
sys.exit(100) # depends on [control=['if'], data=[]]
# pick up and run events
' Run all script for this systems. '
srv.do_all_actions(conf)
' send data for monitoring '
if conf.monitoring:
srv.monitoring() # depends on [control=['if'], data=[]]
' create repo for web project - apache2/uwsgi'
if conf.webproject:
#data = srv.get_all_projects()
#content = aray2xml(data)
' Check all repository on this system. '
srv.check_repo_all()
' Recount size of full disk in all project on this system. '
srv.check_size_all()
' reset owners for projects '
srv.check_rights()
' check running services '
srv.check_services()
mng = manager(conf)
for it in actions:
fc = getattr(mng, it)
fc() # depends on [control=['for'], data=['it']] # depends on [control=['if'], data=[]] |
def raise_expired_not_yet_valid(certificate):
"""
Raises a TLSVerificationError due to certificate being expired, or not yet
being valid
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
validity = certificate['tbs_certificate']['validity']
not_after = validity['not_after'].native
not_before = validity['not_before'].native
now = datetime.now(timezone.utc)
if not_before > now:
formatted_before = not_before.strftime('%Y-%m-%d %H:%M:%SZ')
message = 'Server certificate verification failed - certificate not valid until %s' % formatted_before
elif not_after < now:
formatted_after = not_after.strftime('%Y-%m-%d %H:%M:%SZ')
message = 'Server certificate verification failed - certificate expired %s' % formatted_after
raise TLSVerificationError(message, certificate) | def function[raise_expired_not_yet_valid, parameter[certificate]]:
constant[
Raises a TLSVerificationError due to certificate being expired, or not yet
being valid
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
]
variable[validity] assign[=] call[call[name[certificate]][constant[tbs_certificate]]][constant[validity]]
variable[not_after] assign[=] call[name[validity]][constant[not_after]].native
variable[not_before] assign[=] call[name[validity]][constant[not_before]].native
variable[now] assign[=] call[name[datetime].now, parameter[name[timezone].utc]]
if compare[name[not_before] greater[>] name[now]] begin[:]
variable[formatted_before] assign[=] call[name[not_before].strftime, parameter[constant[%Y-%m-%d %H:%M:%SZ]]]
variable[message] assign[=] binary_operation[constant[Server certificate verification failed - certificate not valid until %s] <ast.Mod object at 0x7da2590d6920> name[formatted_before]]
<ast.Raise object at 0x7da1aff0fee0> | keyword[def] identifier[raise_expired_not_yet_valid] ( identifier[certificate] ):
literal[string]
identifier[validity] = identifier[certificate] [ literal[string] ][ literal[string] ]
identifier[not_after] = identifier[validity] [ literal[string] ]. identifier[native]
identifier[not_before] = identifier[validity] [ literal[string] ]. identifier[native]
identifier[now] = identifier[datetime] . identifier[now] ( identifier[timezone] . identifier[utc] )
keyword[if] identifier[not_before] > identifier[now] :
identifier[formatted_before] = identifier[not_before] . identifier[strftime] ( literal[string] )
identifier[message] = literal[string] % identifier[formatted_before]
keyword[elif] identifier[not_after] < identifier[now] :
identifier[formatted_after] = identifier[not_after] . identifier[strftime] ( literal[string] )
identifier[message] = literal[string] % identifier[formatted_after]
keyword[raise] identifier[TLSVerificationError] ( identifier[message] , identifier[certificate] ) | def raise_expired_not_yet_valid(certificate):
"""
Raises a TLSVerificationError due to certificate being expired, or not yet
being valid
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
validity = certificate['tbs_certificate']['validity']
not_after = validity['not_after'].native
not_before = validity['not_before'].native
now = datetime.now(timezone.utc)
if not_before > now:
formatted_before = not_before.strftime('%Y-%m-%d %H:%M:%SZ')
message = 'Server certificate verification failed - certificate not valid until %s' % formatted_before # depends on [control=['if'], data=['not_before']]
elif not_after < now:
formatted_after = not_after.strftime('%Y-%m-%d %H:%M:%SZ')
message = 'Server certificate verification failed - certificate expired %s' % formatted_after # depends on [control=['if'], data=['not_after']]
raise TLSVerificationError(message, certificate) |
def show(self):
"""
Display the information (with a pretty print) about the method
"""
self.show_info()
self.show_notes()
if self.code:
self.each_params_by_register(self.code.get_registers_size(), self.get_descriptor())
self.code.show() | def function[show, parameter[self]]:
constant[
Display the information (with a pretty print) about the method
]
call[name[self].show_info, parameter[]]
call[name[self].show_notes, parameter[]]
if name[self].code begin[:]
call[name[self].each_params_by_register, parameter[call[name[self].code.get_registers_size, parameter[]], call[name[self].get_descriptor, parameter[]]]]
call[name[self].code.show, parameter[]] | keyword[def] identifier[show] ( identifier[self] ):
literal[string]
identifier[self] . identifier[show_info] ()
identifier[self] . identifier[show_notes] ()
keyword[if] identifier[self] . identifier[code] :
identifier[self] . identifier[each_params_by_register] ( identifier[self] . identifier[code] . identifier[get_registers_size] (), identifier[self] . identifier[get_descriptor] ())
identifier[self] . identifier[code] . identifier[show] () | def show(self):
"""
Display the information (with a pretty print) about the method
"""
self.show_info()
self.show_notes()
if self.code:
self.each_params_by_register(self.code.get_registers_size(), self.get_descriptor())
self.code.show() # depends on [control=['if'], data=[]] |
def get_all_users(self, path_prefix='/', marker=None, max_items=None):
"""
List the users that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only users whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only in
follow-up request after you've received a response
where the results are truncated. Set this to the
value of the Marker element in the response you
just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the
response.
"""
params = {'PathPrefix' : path_prefix}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListUsers', params, list_marker='Users') | def function[get_all_users, parameter[self, path_prefix, marker, max_items]]:
constant[
List the users that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only users whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only in
follow-up request after you've received a response
where the results are truncated. Set this to the
value of the Marker element in the response you
just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the
response.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2617010>], [<ast.Name object at 0x7da1b26150f0>]]
if name[marker] begin[:]
call[name[params]][constant[Marker]] assign[=] name[marker]
if name[max_items] begin[:]
call[name[params]][constant[MaxItems]] assign[=] name[max_items]
return[call[name[self].get_response, parameter[constant[ListUsers], name[params]]]] | keyword[def] identifier[get_all_users] ( identifier[self] , identifier[path_prefix] = literal[string] , identifier[marker] = keyword[None] , identifier[max_items] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[path_prefix] }
keyword[if] identifier[marker] :
identifier[params] [ literal[string] ]= identifier[marker]
keyword[if] identifier[max_items] :
identifier[params] [ literal[string] ]= identifier[max_items]
keyword[return] identifier[self] . identifier[get_response] ( literal[string] , identifier[params] , identifier[list_marker] = literal[string] ) | def get_all_users(self, path_prefix='/', marker=None, max_items=None):
"""
List the users that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only users whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only in
follow-up request after you've received a response
where the results are truncated. Set this to the
value of the Marker element in the response you
just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the
response.
"""
params = {'PathPrefix': path_prefix}
if marker:
params['Marker'] = marker # depends on [control=['if'], data=[]]
if max_items:
params['MaxItems'] = max_items # depends on [control=['if'], data=[]]
return self.get_response('ListUsers', params, list_marker='Users') |
def _equivalent_node_iterator_helper(self, node: BaseEntity, visited: Set[BaseEntity]) -> BaseEntity:
"""Iterate over nodes and their data that are equal to the given node, starting with the original."""
for v in self[node]:
if v in visited:
continue
if self._has_no_equivalent_edge(node, v):
continue
visited.add(v)
yield v
yield from self._equivalent_node_iterator_helper(v, visited) | def function[_equivalent_node_iterator_helper, parameter[self, node, visited]]:
constant[Iterate over nodes and their data that are equal to the given node, starting with the original.]
for taget[name[v]] in starred[call[name[self]][name[node]]] begin[:]
if compare[name[v] in name[visited]] begin[:]
continue
if call[name[self]._has_no_equivalent_edge, parameter[name[node], name[v]]] begin[:]
continue
call[name[visited].add, parameter[name[v]]]
<ast.Yield object at 0x7da1b0c66920>
<ast.YieldFrom object at 0x7da1b0c66c50> | keyword[def] identifier[_equivalent_node_iterator_helper] ( identifier[self] , identifier[node] : identifier[BaseEntity] , identifier[visited] : identifier[Set] [ identifier[BaseEntity] ])-> identifier[BaseEntity] :
literal[string]
keyword[for] identifier[v] keyword[in] identifier[self] [ identifier[node] ]:
keyword[if] identifier[v] keyword[in] identifier[visited] :
keyword[continue]
keyword[if] identifier[self] . identifier[_has_no_equivalent_edge] ( identifier[node] , identifier[v] ):
keyword[continue]
identifier[visited] . identifier[add] ( identifier[v] )
keyword[yield] identifier[v]
keyword[yield] keyword[from] identifier[self] . identifier[_equivalent_node_iterator_helper] ( identifier[v] , identifier[visited] ) | def _equivalent_node_iterator_helper(self, node: BaseEntity, visited: Set[BaseEntity]) -> BaseEntity:
"""Iterate over nodes and their data that are equal to the given node, starting with the original."""
for v in self[node]:
if v in visited:
continue # depends on [control=['if'], data=[]]
if self._has_no_equivalent_edge(node, v):
continue # depends on [control=['if'], data=[]]
visited.add(v)
yield v
yield from self._equivalent_node_iterator_helper(v, visited) # depends on [control=['for'], data=['v']] |
def prune_database():
"""
Delete tokens that have expired from the database.
How (and if) you call this is entirely up you. You could expose it to an
endpoint that only administrators could call, you could run it as a cron,
set it up with flask cli, etc.
"""
now = datetime.now()
expired = TokenBlacklist.query.filter(TokenBlacklist.expires < now).all()
for token in expired:
db.session.delete(token)
db.session.commit() | def function[prune_database, parameter[]]:
constant[
Delete tokens that have expired from the database.
How (and if) you call this is entirely up you. You could expose it to an
endpoint that only administrators could call, you could run it as a cron,
set it up with flask cli, etc.
]
variable[now] assign[=] call[name[datetime].now, parameter[]]
variable[expired] assign[=] call[call[name[TokenBlacklist].query.filter, parameter[compare[name[TokenBlacklist].expires less[<] name[now]]]].all, parameter[]]
for taget[name[token]] in starred[name[expired]] begin[:]
call[name[db].session.delete, parameter[name[token]]]
call[name[db].session.commit, parameter[]] | keyword[def] identifier[prune_database] ():
literal[string]
identifier[now] = identifier[datetime] . identifier[now] ()
identifier[expired] = identifier[TokenBlacklist] . identifier[query] . identifier[filter] ( identifier[TokenBlacklist] . identifier[expires] < identifier[now] ). identifier[all] ()
keyword[for] identifier[token] keyword[in] identifier[expired] :
identifier[db] . identifier[session] . identifier[delete] ( identifier[token] )
identifier[db] . identifier[session] . identifier[commit] () | def prune_database():
"""
Delete tokens that have expired from the database.
How (and if) you call this is entirely up you. You could expose it to an
endpoint that only administrators could call, you could run it as a cron,
set it up with flask cli, etc.
"""
now = datetime.now()
expired = TokenBlacklist.query.filter(TokenBlacklist.expires < now).all()
for token in expired:
db.session.delete(token) # depends on [control=['for'], data=['token']]
db.session.commit() |
def create_object(self, name, experiment_id, model_id, argument_defs, arguments=None, properties=None):
"""Create a model run object with the given list of arguments. The
initial state of the object is RUNNING.
Raises ValueError if given arguments are invalid.
Parameters
----------
name : string
User-provided name for the model run
experiment_id : string
Unique identifier of associated experiment object
model_id : string
Unique model identifier
argument_defs : list(attribute.AttributeDefinition)
Definition of valid arguments for the given model
arguments : list(dict('name':...,'value:...')), optional
List of attribute instances
properties : Dictionary, optional
Set of model run properties.
Returns
-------
PredictionHandle
Object handle for created model run
"""
# Create a new object identifier.
identifier = str(uuid.uuid4()).replace('-','')
# Directory for successful model run resource files. Directories are
# simply named by object identifier
directory = os.path.join(self.directory, identifier)
# Create the directory if it doesn't exists
if not os.access(directory, os.F_OK):
os.makedirs(directory)
# By default all model runs are in IDLE state at creation
state = ModelRunIdle()
# Create the initial set of properties.
run_properties = {
datastore.PROPERTY_NAME: name,
datastore.PROPERTY_STATE: str(state),
datastore.PROPERTY_MODEL: model_id
}
if not properties is None:
for prop in properties:
if not prop in run_properties:
run_properties[prop] = properties[prop]
# If argument list is not given then the initial set of arguments is
# empty. Here we do not validate the given arguments. Definitions of
# valid argument sets are maintained in the model registry and are not
# accessible by the model run manager at this point.
run_arguments = {}
if not arguments is None:
# Convert arguments to dictionary of Atrribute instances. Will
# raise an exception if values are of invalid type.
run_arguments = attribute.to_dict(arguments, argument_defs)
# Create the image group object and store it in the database before
# returning it.
obj = ModelRunHandle(
identifier,
run_properties,
directory,
state,
experiment_id,
model_id,
run_arguments
)
self.insert_object(obj)
return obj | def function[create_object, parameter[self, name, experiment_id, model_id, argument_defs, arguments, properties]]:
constant[Create a model run object with the given list of arguments. The
initial state of the object is RUNNING.
Raises ValueError if given arguments are invalid.
Parameters
----------
name : string
User-provided name for the model run
experiment_id : string
Unique identifier of associated experiment object
model_id : string
Unique model identifier
argument_defs : list(attribute.AttributeDefinition)
Definition of valid arguments for the given model
arguments : list(dict('name':...,'value:...')), optional
List of attribute instances
properties : Dictionary, optional
Set of model run properties.
Returns
-------
PredictionHandle
Object handle for created model run
]
variable[identifier] assign[=] call[call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]].replace, parameter[constant[-], constant[]]]
variable[directory] assign[=] call[name[os].path.join, parameter[name[self].directory, name[identifier]]]
if <ast.UnaryOp object at 0x7da1b15ac7c0> begin[:]
call[name[os].makedirs, parameter[name[directory]]]
variable[state] assign[=] call[name[ModelRunIdle], parameter[]]
variable[run_properties] assign[=] dictionary[[<ast.Attribute object at 0x7da1b1451c90>, <ast.Attribute object at 0x7da1b1452680>, <ast.Attribute object at 0x7da1b1451bd0>], [<ast.Name object at 0x7da1b1451d50>, <ast.Call object at 0x7da1b14518d0>, <ast.Name object at 0x7da1b1452080>]]
if <ast.UnaryOp object at 0x7da1b14511e0> begin[:]
for taget[name[prop]] in starred[name[properties]] begin[:]
if <ast.UnaryOp object at 0x7da1b1451cf0> begin[:]
call[name[run_properties]][name[prop]] assign[=] call[name[properties]][name[prop]]
variable[run_arguments] assign[=] dictionary[[], []]
if <ast.UnaryOp object at 0x7da1b1450d60> begin[:]
variable[run_arguments] assign[=] call[name[attribute].to_dict, parameter[name[arguments], name[argument_defs]]]
variable[obj] assign[=] call[name[ModelRunHandle], parameter[name[identifier], name[run_properties], name[directory], name[state], name[experiment_id], name[model_id], name[run_arguments]]]
call[name[self].insert_object, parameter[name[obj]]]
return[name[obj]] | keyword[def] identifier[create_object] ( identifier[self] , identifier[name] , identifier[experiment_id] , identifier[model_id] , identifier[argument_defs] , identifier[arguments] = keyword[None] , identifier[properties] = keyword[None] ):
literal[string]
identifier[identifier] = identifier[str] ( identifier[uuid] . identifier[uuid4] ()). identifier[replace] ( literal[string] , literal[string] )
identifier[directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[directory] , identifier[identifier] )
keyword[if] keyword[not] identifier[os] . identifier[access] ( identifier[directory] , identifier[os] . identifier[F_OK] ):
identifier[os] . identifier[makedirs] ( identifier[directory] )
identifier[state] = identifier[ModelRunIdle] ()
identifier[run_properties] ={
identifier[datastore] . identifier[PROPERTY_NAME] : identifier[name] ,
identifier[datastore] . identifier[PROPERTY_STATE] : identifier[str] ( identifier[state] ),
identifier[datastore] . identifier[PROPERTY_MODEL] : identifier[model_id]
}
keyword[if] keyword[not] identifier[properties] keyword[is] keyword[None] :
keyword[for] identifier[prop] keyword[in] identifier[properties] :
keyword[if] keyword[not] identifier[prop] keyword[in] identifier[run_properties] :
identifier[run_properties] [ identifier[prop] ]= identifier[properties] [ identifier[prop] ]
identifier[run_arguments] ={}
keyword[if] keyword[not] identifier[arguments] keyword[is] keyword[None] :
identifier[run_arguments] = identifier[attribute] . identifier[to_dict] ( identifier[arguments] , identifier[argument_defs] )
identifier[obj] = identifier[ModelRunHandle] (
identifier[identifier] ,
identifier[run_properties] ,
identifier[directory] ,
identifier[state] ,
identifier[experiment_id] ,
identifier[model_id] ,
identifier[run_arguments]
)
identifier[self] . identifier[insert_object] ( identifier[obj] )
keyword[return] identifier[obj] | def create_object(self, name, experiment_id, model_id, argument_defs, arguments=None, properties=None):
"""Create a model run object with the given list of arguments. The
initial state of the object is RUNNING.
Raises ValueError if given arguments are invalid.
Parameters
----------
name : string
User-provided name for the model run
experiment_id : string
Unique identifier of associated experiment object
model_id : string
Unique model identifier
argument_defs : list(attribute.AttributeDefinition)
Definition of valid arguments for the given model
arguments : list(dict('name':...,'value:...')), optional
List of attribute instances
properties : Dictionary, optional
Set of model run properties.
Returns
-------
PredictionHandle
Object handle for created model run
"""
# Create a new object identifier.
identifier = str(uuid.uuid4()).replace('-', '')
# Directory for successful model run resource files. Directories are
# simply named by object identifier
directory = os.path.join(self.directory, identifier)
# Create the directory if it doesn't exists
if not os.access(directory, os.F_OK):
os.makedirs(directory) # depends on [control=['if'], data=[]]
# By default all model runs are in IDLE state at creation
state = ModelRunIdle()
# Create the initial set of properties.
run_properties = {datastore.PROPERTY_NAME: name, datastore.PROPERTY_STATE: str(state), datastore.PROPERTY_MODEL: model_id}
if not properties is None:
for prop in properties:
if not prop in run_properties:
run_properties[prop] = properties[prop] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['prop']] # depends on [control=['if'], data=[]]
# If argument list is not given then the initial set of arguments is
# empty. Here we do not validate the given arguments. Definitions of
# valid argument sets are maintained in the model registry and are not
# accessible by the model run manager at this point.
run_arguments = {}
if not arguments is None:
# Convert arguments to dictionary of Atrribute instances. Will
# raise an exception if values are of invalid type.
run_arguments = attribute.to_dict(arguments, argument_defs) # depends on [control=['if'], data=[]]
# Create the image group object and store it in the database before
# returning it.
obj = ModelRunHandle(identifier, run_properties, directory, state, experiment_id, model_id, run_arguments)
self.insert_object(obj)
return obj |
def delete_note(self, note_id):
""" Method to permanently delete a note
Arguments:
- note_id (string): key of the note to trash
Returns:
A tuple `(note, status)`
- note (dict): an empty dict or an error message
- status (int): 0 on success and -1 otherwise
"""
# notes have to be trashed before deletion
note, status = self.trash_note(note_id)
if (status == -1):
return note, status
params = '/i/%s' % (str(note_id))
request = Request(url=DATA_URL+params, method='DELETE')
request.add_header(self.header, self.get_token())
try:
response = urllib2.urlopen(request)
except IOError as e:
return e, -1
except HTTPError as e:
if e.code == 401:
raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')
else:
return e, -1
return {}, 0 | def function[delete_note, parameter[self, note_id]]:
constant[ Method to permanently delete a note
Arguments:
- note_id (string): key of the note to trash
Returns:
A tuple `(note, status)`
- note (dict): an empty dict or an error message
- status (int): 0 on success and -1 otherwise
]
<ast.Tuple object at 0x7da1b04d5c60> assign[=] call[name[self].trash_note, parameter[name[note_id]]]
if compare[name[status] equal[==] <ast.UnaryOp object at 0x7da1b04d5d80>] begin[:]
return[tuple[[<ast.Name object at 0x7da1b04d6530>, <ast.Name object at 0x7da1b04d49a0>]]]
variable[params] assign[=] binary_operation[constant[/i/%s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[note_id]]]]
variable[request] assign[=] call[name[Request], parameter[]]
call[name[request].add_header, parameter[name[self].header, call[name[self].get_token, parameter[]]]]
<ast.Try object at 0x7da1b04d4040>
return[tuple[[<ast.Dict object at 0x7da1b04d4100>, <ast.Constant object at 0x7da18dc99900>]]] | keyword[def] identifier[delete_note] ( identifier[self] , identifier[note_id] ):
literal[string]
identifier[note] , identifier[status] = identifier[self] . identifier[trash_note] ( identifier[note_id] )
keyword[if] ( identifier[status] ==- literal[int] ):
keyword[return] identifier[note] , identifier[status]
identifier[params] = literal[string] %( identifier[str] ( identifier[note_id] ))
identifier[request] = identifier[Request] ( identifier[url] = identifier[DATA_URL] + identifier[params] , identifier[method] = literal[string] )
identifier[request] . identifier[add_header] ( identifier[self] . identifier[header] , identifier[self] . identifier[get_token] ())
keyword[try] :
identifier[response] = identifier[urllib2] . identifier[urlopen] ( identifier[request] )
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[return] identifier[e] ,- literal[int]
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[code] == literal[int] :
keyword[raise] identifier[SimplenoteLoginFailed] ( literal[string] )
keyword[else] :
keyword[return] identifier[e] ,- literal[int]
keyword[return] {}, literal[int] | def delete_note(self, note_id):
""" Method to permanently delete a note
Arguments:
- note_id (string): key of the note to trash
Returns:
A tuple `(note, status)`
- note (dict): an empty dict or an error message
- status (int): 0 on success and -1 otherwise
"""
# notes have to be trashed before deletion
(note, status) = self.trash_note(note_id)
if status == -1:
return (note, status) # depends on [control=['if'], data=['status']]
params = '/i/%s' % str(note_id)
request = Request(url=DATA_URL + params, method='DELETE')
request.add_header(self.header, self.get_token())
try:
response = urllib2.urlopen(request) # depends on [control=['try'], data=[]]
except IOError as e:
return (e, -1) # depends on [control=['except'], data=['e']]
except HTTPError as e:
if e.code == 401:
raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') # depends on [control=['if'], data=[]]
else:
return (e, -1) # depends on [control=['except'], data=['e']]
return ({}, 0) |
def labeled(**kwargs):
"""decorator to give practices labels"""
def for_practice(practice):
"""assigns label to practice"""
practice.code = kwargs.pop('code')
practice.msg = kwargs.pop('msg')
practice.solution = kwargs.pop('solution')
return practice
return for_practice | def function[labeled, parameter[]]:
constant[decorator to give practices labels]
def function[for_practice, parameter[practice]]:
constant[assigns label to practice]
name[practice].code assign[=] call[name[kwargs].pop, parameter[constant[code]]]
name[practice].msg assign[=] call[name[kwargs].pop, parameter[constant[msg]]]
name[practice].solution assign[=] call[name[kwargs].pop, parameter[constant[solution]]]
return[name[practice]]
return[name[for_practice]] | keyword[def] identifier[labeled] (** identifier[kwargs] ):
literal[string]
keyword[def] identifier[for_practice] ( identifier[practice] ):
literal[string]
identifier[practice] . identifier[code] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[practice] . identifier[msg] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[practice] . identifier[solution] = identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[return] identifier[practice]
keyword[return] identifier[for_practice] | def labeled(**kwargs):
"""decorator to give practices labels"""
def for_practice(practice):
"""assigns label to practice"""
practice.code = kwargs.pop('code')
practice.msg = kwargs.pop('msg')
practice.solution = kwargs.pop('solution')
return practice
return for_practice |
def _get_cached_response_from_django_cache(key):
"""
Retrieves a CachedResponse for the given key from the django cache.
If the request was set to force cache misses, then this will always
return a cache miss response.
Args:
key (string)
Returns:
A CachedResponse with is_found status and value.
"""
if TieredCache._should_force_django_cache_miss():
return CachedResponse(is_found=False, key=key, value=None)
cached_value = django_cache.get(key, _CACHE_MISS)
is_found = cached_value is not _CACHE_MISS
return CachedResponse(is_found, key, cached_value) | def function[_get_cached_response_from_django_cache, parameter[key]]:
constant[
Retrieves a CachedResponse for the given key from the django cache.
If the request was set to force cache misses, then this will always
return a cache miss response.
Args:
key (string)
Returns:
A CachedResponse with is_found status and value.
]
if call[name[TieredCache]._should_force_django_cache_miss, parameter[]] begin[:]
return[call[name[CachedResponse], parameter[]]]
variable[cached_value] assign[=] call[name[django_cache].get, parameter[name[key], name[_CACHE_MISS]]]
variable[is_found] assign[=] compare[name[cached_value] is_not name[_CACHE_MISS]]
return[call[name[CachedResponse], parameter[name[is_found], name[key], name[cached_value]]]] | keyword[def] identifier[_get_cached_response_from_django_cache] ( identifier[key] ):
literal[string]
keyword[if] identifier[TieredCache] . identifier[_should_force_django_cache_miss] ():
keyword[return] identifier[CachedResponse] ( identifier[is_found] = keyword[False] , identifier[key] = identifier[key] , identifier[value] = keyword[None] )
identifier[cached_value] = identifier[django_cache] . identifier[get] ( identifier[key] , identifier[_CACHE_MISS] )
identifier[is_found] = identifier[cached_value] keyword[is] keyword[not] identifier[_CACHE_MISS]
keyword[return] identifier[CachedResponse] ( identifier[is_found] , identifier[key] , identifier[cached_value] ) | def _get_cached_response_from_django_cache(key):
"""
Retrieves a CachedResponse for the given key from the django cache.
If the request was set to force cache misses, then this will always
return a cache miss response.
Args:
key (string)
Returns:
A CachedResponse with is_found status and value.
"""
if TieredCache._should_force_django_cache_miss():
return CachedResponse(is_found=False, key=key, value=None) # depends on [control=['if'], data=[]]
cached_value = django_cache.get(key, _CACHE_MISS)
is_found = cached_value is not _CACHE_MISS
return CachedResponse(is_found, key, cached_value) |
def _system(self, *args, **kwargs):
'''
This basically calls grains items and picks out only
necessary information in a certain structure.
:param args:
:param kwargs:
:return:
'''
sysinfo = SysInfo(__grains__.get("kernel"))
data = dict()
data['cpu'] = sysinfo._get_cpu()
data['disks'] = sysinfo._get_fs()
data['mounts'] = sysinfo._get_mounts()
data['memory'] = sysinfo._get_mem()
data['network'] = sysinfo._get_network()
data['os'] = sysinfo._get_os()
return data | def function[_system, parameter[self]]:
constant[
This basically calls grains items and picks out only
necessary information in a certain structure.
:param args:
:param kwargs:
:return:
]
variable[sysinfo] assign[=] call[name[SysInfo], parameter[call[name[__grains__].get, parameter[constant[kernel]]]]]
variable[data] assign[=] call[name[dict], parameter[]]
call[name[data]][constant[cpu]] assign[=] call[name[sysinfo]._get_cpu, parameter[]]
call[name[data]][constant[disks]] assign[=] call[name[sysinfo]._get_fs, parameter[]]
call[name[data]][constant[mounts]] assign[=] call[name[sysinfo]._get_mounts, parameter[]]
call[name[data]][constant[memory]] assign[=] call[name[sysinfo]._get_mem, parameter[]]
call[name[data]][constant[network]] assign[=] call[name[sysinfo]._get_network, parameter[]]
call[name[data]][constant[os]] assign[=] call[name[sysinfo]._get_os, parameter[]]
return[name[data]] | keyword[def] identifier[_system] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[sysinfo] = identifier[SysInfo] ( identifier[__grains__] . identifier[get] ( literal[string] ))
identifier[data] = identifier[dict] ()
identifier[data] [ literal[string] ]= identifier[sysinfo] . identifier[_get_cpu] ()
identifier[data] [ literal[string] ]= identifier[sysinfo] . identifier[_get_fs] ()
identifier[data] [ literal[string] ]= identifier[sysinfo] . identifier[_get_mounts] ()
identifier[data] [ literal[string] ]= identifier[sysinfo] . identifier[_get_mem] ()
identifier[data] [ literal[string] ]= identifier[sysinfo] . identifier[_get_network] ()
identifier[data] [ literal[string] ]= identifier[sysinfo] . identifier[_get_os] ()
keyword[return] identifier[data] | def _system(self, *args, **kwargs):
"""
This basically calls grains items and picks out only
necessary information in a certain structure.
:param args:
:param kwargs:
:return:
"""
sysinfo = SysInfo(__grains__.get('kernel'))
data = dict()
data['cpu'] = sysinfo._get_cpu()
data['disks'] = sysinfo._get_fs()
data['mounts'] = sysinfo._get_mounts()
data['memory'] = sysinfo._get_mem()
data['network'] = sysinfo._get_network()
data['os'] = sysinfo._get_os()
return data |
def minOpar(self,dangle,tdisrupt=None,_return_raw=False):
"""
NAME:
minOpar
PURPOSE:
return the approximate minimum parallel frequency at a given angle
INPUT:
dangle - parallel angle
OUTPUT:
minimum frequency that gets to this parallel angle
HISTORY:
2015-12-28 - Written - Bovy (UofT)
"""
if tdisrupt is None: tdisrupt= self._tdisrupt
# First construct the breakpoints for this dangle
Oparb= (dangle-self._kick_interpdOpar_poly.x[:-1])/self._timpact
# Find the lower limit of the integration in the pw-linear-kick approx.
lowx= ((Oparb-self._kick_interpdOpar_poly.c[-1])\
*(tdisrupt-self._timpact)+Oparb*self._timpact-dangle)\
/((tdisrupt-self._timpact)\
*(1.+self._kick_interpdOpar_poly.c[-2]*self._timpact)\
+self._timpact)
lowx[lowx < 0.]= numpy.inf
lowbindx= numpy.argmin(lowx)
if _return_raw:
return (lowbindx,lowx[lowbindx])
else:
return Oparb[lowbindx]-lowx[lowbindx] | def function[minOpar, parameter[self, dangle, tdisrupt, _return_raw]]:
constant[
NAME:
minOpar
PURPOSE:
return the approximate minimum parallel frequency at a given angle
INPUT:
dangle - parallel angle
OUTPUT:
minimum frequency that gets to this parallel angle
HISTORY:
2015-12-28 - Written - Bovy (UofT)
]
if compare[name[tdisrupt] is constant[None]] begin[:]
variable[tdisrupt] assign[=] name[self]._tdisrupt
variable[Oparb] assign[=] binary_operation[binary_operation[name[dangle] - call[name[self]._kick_interpdOpar_poly.x][<ast.Slice object at 0x7da1b0ea7bb0>]] / name[self]._timpact]
variable[lowx] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[Oparb] - call[name[self]._kick_interpdOpar_poly.c][<ast.UnaryOp object at 0x7da1b0ea7790>]] * binary_operation[name[tdisrupt] - name[self]._timpact]] + binary_operation[name[Oparb] * name[self]._timpact]] - name[dangle]] / binary_operation[binary_operation[binary_operation[name[tdisrupt] - name[self]._timpact] * binary_operation[constant[1.0] + binary_operation[call[name[self]._kick_interpdOpar_poly.c][<ast.UnaryOp object at 0x7da1b0ea7970>] * name[self]._timpact]]] + name[self]._timpact]]
call[name[lowx]][compare[name[lowx] less[<] constant[0.0]]] assign[=] name[numpy].inf
variable[lowbindx] assign[=] call[name[numpy].argmin, parameter[name[lowx]]]
if name[_return_raw] begin[:]
return[tuple[[<ast.Name object at 0x7da1b0ea5210>, <ast.Subscript object at 0x7da1b0ea51b0>]]] | keyword[def] identifier[minOpar] ( identifier[self] , identifier[dangle] , identifier[tdisrupt] = keyword[None] , identifier[_return_raw] = keyword[False] ):
literal[string]
keyword[if] identifier[tdisrupt] keyword[is] keyword[None] : identifier[tdisrupt] = identifier[self] . identifier[_tdisrupt]
identifier[Oparb] =( identifier[dangle] - identifier[self] . identifier[_kick_interpdOpar_poly] . identifier[x] [:- literal[int] ])/ identifier[self] . identifier[_timpact]
identifier[lowx] =(( identifier[Oparb] - identifier[self] . identifier[_kick_interpdOpar_poly] . identifier[c] [- literal[int] ])*( identifier[tdisrupt] - identifier[self] . identifier[_timpact] )+ identifier[Oparb] * identifier[self] . identifier[_timpact] - identifier[dangle] )/(( identifier[tdisrupt] - identifier[self] . identifier[_timpact] )*( literal[int] + identifier[self] . identifier[_kick_interpdOpar_poly] . identifier[c] [- literal[int] ]* identifier[self] . identifier[_timpact] )+ identifier[self] . identifier[_timpact] )
identifier[lowx] [ identifier[lowx] < literal[int] ]= identifier[numpy] . identifier[inf]
identifier[lowbindx] = identifier[numpy] . identifier[argmin] ( identifier[lowx] )
keyword[if] identifier[_return_raw] :
keyword[return] ( identifier[lowbindx] , identifier[lowx] [ identifier[lowbindx] ])
keyword[else] :
keyword[return] identifier[Oparb] [ identifier[lowbindx] ]- identifier[lowx] [ identifier[lowbindx] ] | def minOpar(self, dangle, tdisrupt=None, _return_raw=False):
"""
NAME:
minOpar
PURPOSE:
return the approximate minimum parallel frequency at a given angle
INPUT:
dangle - parallel angle
OUTPUT:
minimum frequency that gets to this parallel angle
HISTORY:
2015-12-28 - Written - Bovy (UofT)
"""
if tdisrupt is None:
tdisrupt = self._tdisrupt # depends on [control=['if'], data=['tdisrupt']]
# First construct the breakpoints for this dangle
Oparb = (dangle - self._kick_interpdOpar_poly.x[:-1]) / self._timpact
# Find the lower limit of the integration in the pw-linear-kick approx.
lowx = ((Oparb - self._kick_interpdOpar_poly.c[-1]) * (tdisrupt - self._timpact) + Oparb * self._timpact - dangle) / ((tdisrupt - self._timpact) * (1.0 + self._kick_interpdOpar_poly.c[-2] * self._timpact) + self._timpact)
lowx[lowx < 0.0] = numpy.inf
lowbindx = numpy.argmin(lowx)
if _return_raw:
return (lowbindx, lowx[lowbindx]) # depends on [control=['if'], data=[]]
else:
return Oparb[lowbindx] - lowx[lowbindx] |
def create(cls, mp, part_number, stream=None, **kwargs):
"""Create a new part object in a multipart object."""
if part_number < 0 or part_number > mp.last_part_number:
raise MultipartInvalidPartNumber()
with db.session.begin_nested():
obj = cls(
multipart=mp,
part_number=part_number,
)
db.session.add(obj)
if stream:
obj.set_contents(stream, **kwargs)
return obj | def function[create, parameter[cls, mp, part_number, stream]]:
constant[Create a new part object in a multipart object.]
if <ast.BoolOp object at 0x7da1b19a3bb0> begin[:]
<ast.Raise object at 0x7da1b19a3be0>
with call[name[db].session.begin_nested, parameter[]] begin[:]
variable[obj] assign[=] call[name[cls], parameter[]]
call[name[db].session.add, parameter[name[obj]]]
if name[stream] begin[:]
call[name[obj].set_contents, parameter[name[stream]]]
return[name[obj]] | keyword[def] identifier[create] ( identifier[cls] , identifier[mp] , identifier[part_number] , identifier[stream] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[part_number] < literal[int] keyword[or] identifier[part_number] > identifier[mp] . identifier[last_part_number] :
keyword[raise] identifier[MultipartInvalidPartNumber] ()
keyword[with] identifier[db] . identifier[session] . identifier[begin_nested] ():
identifier[obj] = identifier[cls] (
identifier[multipart] = identifier[mp] ,
identifier[part_number] = identifier[part_number] ,
)
identifier[db] . identifier[session] . identifier[add] ( identifier[obj] )
keyword[if] identifier[stream] :
identifier[obj] . identifier[set_contents] ( identifier[stream] ,** identifier[kwargs] )
keyword[return] identifier[obj] | def create(cls, mp, part_number, stream=None, **kwargs):
"""Create a new part object in a multipart object."""
if part_number < 0 or part_number > mp.last_part_number:
raise MultipartInvalidPartNumber() # depends on [control=['if'], data=[]]
with db.session.begin_nested():
obj = cls(multipart=mp, part_number=part_number)
db.session.add(obj) # depends on [control=['with'], data=[]]
if stream:
obj.set_contents(stream, **kwargs) # depends on [control=['if'], data=[]]
return obj |
def benchmark_annualize_return(self):
"""基准组合的年化收益
Returns:
[type] -- [description]
"""
return round(
float(
self.calc_annualize_return(
self.benchmark_assets,
self.time_gap
)
),
2
) | def function[benchmark_annualize_return, parameter[self]]:
constant[基准组合的年化收益
Returns:
[type] -- [description]
]
return[call[name[round], parameter[call[name[float], parameter[call[name[self].calc_annualize_return, parameter[name[self].benchmark_assets, name[self].time_gap]]]], constant[2]]]] | keyword[def] identifier[benchmark_annualize_return] ( identifier[self] ):
literal[string]
keyword[return] identifier[round] (
identifier[float] (
identifier[self] . identifier[calc_annualize_return] (
identifier[self] . identifier[benchmark_assets] ,
identifier[self] . identifier[time_gap]
)
),
literal[int]
) | def benchmark_annualize_return(self):
"""基准组合的年化收益
Returns:
[type] -- [description]
"""
return round(float(self.calc_annualize_return(self.benchmark_assets, self.time_gap)), 2) |
def dbus_readBytesFD(self, fd, byte_count):
"""
Reads byte_count bytes from fd and returns them.
"""
f = os.fdopen(fd, 'rb')
result = f.read(byte_count)
f.close()
return bytearray(result) | def function[dbus_readBytesFD, parameter[self, fd, byte_count]]:
constant[
Reads byte_count bytes from fd and returns them.
]
variable[f] assign[=] call[name[os].fdopen, parameter[name[fd], constant[rb]]]
variable[result] assign[=] call[name[f].read, parameter[name[byte_count]]]
call[name[f].close, parameter[]]
return[call[name[bytearray], parameter[name[result]]]] | keyword[def] identifier[dbus_readBytesFD] ( identifier[self] , identifier[fd] , identifier[byte_count] ):
literal[string]
identifier[f] = identifier[os] . identifier[fdopen] ( identifier[fd] , literal[string] )
identifier[result] = identifier[f] . identifier[read] ( identifier[byte_count] )
identifier[f] . identifier[close] ()
keyword[return] identifier[bytearray] ( identifier[result] ) | def dbus_readBytesFD(self, fd, byte_count):
"""
Reads byte_count bytes from fd and returns them.
"""
f = os.fdopen(fd, 'rb')
result = f.read(byte_count)
f.close()
return bytearray(result) |
def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList) | def function[decrypt, parameter[secret, modN, d, blockSize]]:
constant[reverse function of encrypt]
variable[numBlocks] assign[=] <ast.ListComp object at 0x7da1b26af280>
variable[numList] assign[=] call[name[blocks2numList], parameter[name[numBlocks], name[blockSize]]]
return[call[name[numList2string], parameter[name[numList]]]] | keyword[def] identifier[decrypt] ( identifier[secret] , identifier[modN] , identifier[d] , identifier[blockSize] ):
literal[string]
identifier[numBlocks] =[ identifier[modExp] ( identifier[blocks] , identifier[d] , identifier[modN] ) keyword[for] identifier[blocks] keyword[in] identifier[secret] ]
identifier[numList] = identifier[blocks2numList] ( identifier[numBlocks] , identifier[blockSize] )
keyword[return] identifier[numList2string] ( identifier[numList] ) | def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList) |
def get_repo_revision():
'''
Returns git revision string somelike `git rev-parse --short HEAD`
does.
Returns an empty string if anything goes wrong, such as missing
.hg files or an unexpected format of internal HG files or no
mercurial repository found.
'''
repopath = _findrepo()
if not repopath:
return ''
try:
head = open(os.path.join(repopath, 'HEAD'), 'rU').read()
for l in head.splitlines():
l = l.split()
if l[0] == 'ref:':
ref = l[1]
break
else:
ref = None
if ref:
rev = open(os.path.join(repopath, ref), 'rU').read()
rev = rev[:7]
if rev:
return rev
except IOError:
pass
try:
rev = compat.exec_command('git', 'rev-parse', '--short', 'HEAD').strip()
if rev:
return rev
except:
pass
return '' | def function[get_repo_revision, parameter[]]:
constant[
Returns git revision string somelike `git rev-parse --short HEAD`
does.
Returns an empty string if anything goes wrong, such as missing
.hg files or an unexpected format of internal HG files or no
mercurial repository found.
]
variable[repopath] assign[=] call[name[_findrepo], parameter[]]
if <ast.UnaryOp object at 0x7da1b0e6c310> begin[:]
return[constant[]]
<ast.Try object at 0x7da1b0e6cf40>
<ast.Try object at 0x7da204622620>
return[constant[]] | keyword[def] identifier[get_repo_revision] ():
literal[string]
identifier[repopath] = identifier[_findrepo] ()
keyword[if] keyword[not] identifier[repopath] :
keyword[return] literal[string]
keyword[try] :
identifier[head] = identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[repopath] , literal[string] ), literal[string] ). identifier[read] ()
keyword[for] identifier[l] keyword[in] identifier[head] . identifier[splitlines] ():
identifier[l] = identifier[l] . identifier[split] ()
keyword[if] identifier[l] [ literal[int] ]== literal[string] :
identifier[ref] = identifier[l] [ literal[int] ]
keyword[break]
keyword[else] :
identifier[ref] = keyword[None]
keyword[if] identifier[ref] :
identifier[rev] = identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[repopath] , identifier[ref] ), literal[string] ). identifier[read] ()
identifier[rev] = identifier[rev] [: literal[int] ]
keyword[if] identifier[rev] :
keyword[return] identifier[rev]
keyword[except] identifier[IOError] :
keyword[pass]
keyword[try] :
identifier[rev] = identifier[compat] . identifier[exec_command] ( literal[string] , literal[string] , literal[string] , literal[string] ). identifier[strip] ()
keyword[if] identifier[rev] :
keyword[return] identifier[rev]
keyword[except] :
keyword[pass]
keyword[return] literal[string] | def get_repo_revision():
"""
Returns git revision string somelike `git rev-parse --short HEAD`
does.
Returns an empty string if anything goes wrong, such as missing
.hg files or an unexpected format of internal HG files or no
mercurial repository found.
"""
repopath = _findrepo()
if not repopath:
return '' # depends on [control=['if'], data=[]]
try:
head = open(os.path.join(repopath, 'HEAD'), 'rU').read()
for l in head.splitlines():
l = l.split()
if l[0] == 'ref:':
ref = l[1]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['l']]
else:
ref = None
if ref:
rev = open(os.path.join(repopath, ref), 'rU').read()
rev = rev[:7]
if rev:
return rev # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]]
try:
rev = compat.exec_command('git', 'rev-parse', '--short', 'HEAD').strip()
if rev:
return rev # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
return '' |
def get_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the classpath products for the given targets.
Products are returned in order, optionally respecting target excludes.
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ClasspathEntry`)
"""
# remove the duplicate, preserve the ordering.
return list(OrderedSet([cp for cp, target in self.get_product_target_mappings_for_targets(
targets, respect_excludes)])) | def function[get_classpath_entries_for_targets, parameter[self, targets, respect_excludes]]:
constant[Gets the classpath products for the given targets.
Products are returned in order, optionally respecting target excludes.
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ClasspathEntry`)
]
return[call[name[list], parameter[call[name[OrderedSet], parameter[<ast.ListComp object at 0x7da1b22ad300>]]]]] | keyword[def] identifier[get_classpath_entries_for_targets] ( identifier[self] , identifier[targets] , identifier[respect_excludes] = keyword[True] ):
literal[string]
keyword[return] identifier[list] ( identifier[OrderedSet] ([ identifier[cp] keyword[for] identifier[cp] , identifier[target] keyword[in] identifier[self] . identifier[get_product_target_mappings_for_targets] (
identifier[targets] , identifier[respect_excludes] )])) | def get_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the classpath products for the given targets.
Products are returned in order, optionally respecting target excludes.
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ClasspathEntry`)
"""
# remove the duplicate, preserve the ordering.
return list(OrderedSet([cp for (cp, target) in self.get_product_target_mappings_for_targets(targets, respect_excludes)])) |
def beacon(config):
'''
Check if installed packages are the latest versions
and fire an event for those that have upgrades.
.. code-block:: yaml
beacons:
pkg:
- pkgs:
- zsh
- apache2
- refresh: True
'''
ret = []
_refresh = False
pkgs = []
for config_item in config:
if 'pkgs' in config_item:
pkgs += config_item['pkgs']
if 'refresh' in config and config['refresh']:
_refresh = True
for pkg in pkgs:
_installed = __salt__['pkg.version'](pkg)
_latest = __salt__['pkg.latest_version'](pkg, refresh=_refresh)
if _installed and _latest:
_pkg = {'pkg': pkg,
'version': _latest
}
ret.append(_pkg)
return ret | def function[beacon, parameter[config]]:
constant[
Check if installed packages are the latest versions
and fire an event for those that have upgrades.
.. code-block:: yaml
beacons:
pkg:
- pkgs:
- zsh
- apache2
- refresh: True
]
variable[ret] assign[=] list[[]]
variable[_refresh] assign[=] constant[False]
variable[pkgs] assign[=] list[[]]
for taget[name[config_item]] in starred[name[config]] begin[:]
if compare[constant[pkgs] in name[config_item]] begin[:]
<ast.AugAssign object at 0x7da20c6aabf0>
if <ast.BoolOp object at 0x7da20c6aaf20> begin[:]
variable[_refresh] assign[=] constant[True]
for taget[name[pkg]] in starred[name[pkgs]] begin[:]
variable[_installed] assign[=] call[call[name[__salt__]][constant[pkg.version]], parameter[name[pkg]]]
variable[_latest] assign[=] call[call[name[__salt__]][constant[pkg.latest_version]], parameter[name[pkg]]]
if <ast.BoolOp object at 0x7da20c6a8700> begin[:]
variable[_pkg] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a8790>, <ast.Constant object at 0x7da20c6a81f0>], [<ast.Name object at 0x7da20c6a9ba0>, <ast.Name object at 0x7da20c6a9b40>]]
call[name[ret].append, parameter[name[_pkg]]]
return[name[ret]] | keyword[def] identifier[beacon] ( identifier[config] ):
literal[string]
identifier[ret] =[]
identifier[_refresh] = keyword[False]
identifier[pkgs] =[]
keyword[for] identifier[config_item] keyword[in] identifier[config] :
keyword[if] literal[string] keyword[in] identifier[config_item] :
identifier[pkgs] += identifier[config_item] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[config] keyword[and] identifier[config] [ literal[string] ]:
identifier[_refresh] = keyword[True]
keyword[for] identifier[pkg] keyword[in] identifier[pkgs] :
identifier[_installed] = identifier[__salt__] [ literal[string] ]( identifier[pkg] )
identifier[_latest] = identifier[__salt__] [ literal[string] ]( identifier[pkg] , identifier[refresh] = identifier[_refresh] )
keyword[if] identifier[_installed] keyword[and] identifier[_latest] :
identifier[_pkg] ={ literal[string] : identifier[pkg] ,
literal[string] : identifier[_latest]
}
identifier[ret] . identifier[append] ( identifier[_pkg] )
keyword[return] identifier[ret] | def beacon(config):
"""
Check if installed packages are the latest versions
and fire an event for those that have upgrades.
.. code-block:: yaml
beacons:
pkg:
- pkgs:
- zsh
- apache2
- refresh: True
"""
ret = []
_refresh = False
pkgs = []
for config_item in config:
if 'pkgs' in config_item:
pkgs += config_item['pkgs'] # depends on [control=['if'], data=['config_item']]
if 'refresh' in config and config['refresh']:
_refresh = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['config_item']]
for pkg in pkgs:
_installed = __salt__['pkg.version'](pkg)
_latest = __salt__['pkg.latest_version'](pkg, refresh=_refresh)
if _installed and _latest:
_pkg = {'pkg': pkg, 'version': _latest}
ret.append(_pkg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pkg']]
return ret |
def sorter(expr):
"""
This is a sorting function generator that takes an expression optionally
prefixed with a "+" (ascending, the default) or "-" (descending) character.
>>> sorted([{'a': 12}, {'a': 1}, {'a': 4}], sorter("+a"))
[{'a': 1}, {'a': 4}, {'a': 12}]
>>> sorted([{'a': 24}, {'a': 16}, {'a': 32}], sorter("-a"))
[{'a': 32}, {'a': 24}, {'a': 16}]
"""
order = ascending
if not callable(expr):
if ',' in expr:
sorts = map(sorter, expr.split(','))
return multisorter(*sorts)
if expr[0] == '-':
order = descending
expr = expr[1:]
elif expr[0] == '+':
expr = expr[1:]
expr = expression(expr)
def _sort(a, b):
return order(expr(a), expr(b))
return _sort | def function[sorter, parameter[expr]]:
constant[
This is a sorting function generator that takes an expression optionally
prefixed with a "+" (ascending, the default) or "-" (descending) character.
>>> sorted([{'a': 12}, {'a': 1}, {'a': 4}], sorter("+a"))
[{'a': 1}, {'a': 4}, {'a': 12}]
>>> sorted([{'a': 24}, {'a': 16}, {'a': 32}], sorter("-a"))
[{'a': 32}, {'a': 24}, {'a': 16}]
]
variable[order] assign[=] name[ascending]
if <ast.UnaryOp object at 0x7da1b09bb610> begin[:]
if compare[constant[,] in name[expr]] begin[:]
variable[sorts] assign[=] call[name[map], parameter[name[sorter], call[name[expr].split, parameter[constant[,]]]]]
return[call[name[multisorter], parameter[<ast.Starred object at 0x7da1b09bbd60>]]]
if compare[call[name[expr]][constant[0]] equal[==] constant[-]] begin[:]
variable[order] assign[=] name[descending]
variable[expr] assign[=] call[name[expr]][<ast.Slice object at 0x7da1b09bbb80>]
variable[expr] assign[=] call[name[expression], parameter[name[expr]]]
def function[_sort, parameter[a, b]]:
return[call[name[order], parameter[call[name[expr], parameter[name[a]]], call[name[expr], parameter[name[b]]]]]]
return[name[_sort]] | keyword[def] identifier[sorter] ( identifier[expr] ):
literal[string]
identifier[order] = identifier[ascending]
keyword[if] keyword[not] identifier[callable] ( identifier[expr] ):
keyword[if] literal[string] keyword[in] identifier[expr] :
identifier[sorts] = identifier[map] ( identifier[sorter] , identifier[expr] . identifier[split] ( literal[string] ))
keyword[return] identifier[multisorter] (* identifier[sorts] )
keyword[if] identifier[expr] [ literal[int] ]== literal[string] :
identifier[order] = identifier[descending]
identifier[expr] = identifier[expr] [ literal[int] :]
keyword[elif] identifier[expr] [ literal[int] ]== literal[string] :
identifier[expr] = identifier[expr] [ literal[int] :]
identifier[expr] = identifier[expression] ( identifier[expr] )
keyword[def] identifier[_sort] ( identifier[a] , identifier[b] ):
keyword[return] identifier[order] ( identifier[expr] ( identifier[a] ), identifier[expr] ( identifier[b] ))
keyword[return] identifier[_sort] | def sorter(expr):
"""
This is a sorting function generator that takes an expression optionally
prefixed with a "+" (ascending, the default) or "-" (descending) character.
>>> sorted([{'a': 12}, {'a': 1}, {'a': 4}], sorter("+a"))
[{'a': 1}, {'a': 4}, {'a': 12}]
>>> sorted([{'a': 24}, {'a': 16}, {'a': 32}], sorter("-a"))
[{'a': 32}, {'a': 24}, {'a': 16}]
"""
order = ascending
if not callable(expr):
if ',' in expr:
sorts = map(sorter, expr.split(','))
return multisorter(*sorts) # depends on [control=['if'], data=['expr']]
if expr[0] == '-':
order = descending
expr = expr[1:] # depends on [control=['if'], data=[]]
elif expr[0] == '+':
expr = expr[1:] # depends on [control=['if'], data=[]]
expr = expression(expr) # depends on [control=['if'], data=[]]
def _sort(a, b):
return order(expr(a), expr(b))
return _sort |
def beamcentery(self) -> ErrorValue:
"""Y (row) coordinate of the beam center, pixel units, 0-based."""
try:
return ErrorValue(self._data['geometry']['beamposx'],
self._data['geometry']['beamposx.err'])
except KeyError:
return ErrorValue(self._data['geometry']['beamposx'],
0.0) | def function[beamcentery, parameter[self]]:
constant[Y (row) coordinate of the beam center, pixel units, 0-based.]
<ast.Try object at 0x7da1b10ed0c0> | keyword[def] identifier[beamcentery] ( identifier[self] )-> identifier[ErrorValue] :
literal[string]
keyword[try] :
keyword[return] identifier[ErrorValue] ( identifier[self] . identifier[_data] [ literal[string] ][ literal[string] ],
identifier[self] . identifier[_data] [ literal[string] ][ literal[string] ])
keyword[except] identifier[KeyError] :
keyword[return] identifier[ErrorValue] ( identifier[self] . identifier[_data] [ literal[string] ][ literal[string] ],
literal[int] ) | def beamcentery(self) -> ErrorValue:
"""Y (row) coordinate of the beam center, pixel units, 0-based."""
try:
return ErrorValue(self._data['geometry']['beamposx'], self._data['geometry']['beamposx.err']) # depends on [control=['try'], data=[]]
except KeyError:
return ErrorValue(self._data['geometry']['beamposx'], 0.0) # depends on [control=['except'], data=[]] |
def Chen_Yang(self, T, full=True, quick=True):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Hamid and Yang (2017) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Seven coefficients needed.
.. math::
\alpha = e^{\left(- c_{3}^{\log{\left (\frac{T}{Tc} \right )}}
+ 1\right) \left(- \frac{T c_{2}}{Tc} + c_{1}\right)}
References
----------
.. [1] Chen, Zehua, and Daoyong Yang. "Optimization of the Reduced
Temperature Associated with Peng–Robinson Equation of State and
Soave-Redlich-Kwong Equation of State To Improve Vapor Pressure
Prediction for Heavy Hydrocarbon Compounds." Journal of Chemical &
Engineering Data, August 31, 2017. doi:10.1021/acs.jced.7b00496.
'''
c1, c2, c3, c4, c5, c6, c7 = self.alpha_function_coeffs
T, Tc, a = self.T, self.Tc, self.a
a_alpha = a*exp(c4*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 + (-T/Tc + 1)*(c1 + c2*omega + c3*omega**2))
if not full:
return a_alpha
else:
da_alpha_dT = a*(-(c1 + c2*omega + c3*omega**2)/Tc - c4*sqrt(T/Tc)*(c5 + c6*omega + c7*omega**2)*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)/(T*((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)))*exp(c4*log((-sqrt(T/Tc) + 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 + (-T/Tc + 1)*(c1 + c2*omega + c3*omega**2))
d2a_alpha_dT2 = a*(((c1 + c2*omega + c3*omega**2)/Tc - c4*sqrt(T/Tc)*(c5 + c6*omega + c7*omega**2)*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)/(T*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)))**2 - c4*(c5 + c6*omega + c7*omega**2)*((c5 + c6*omega + c7*omega**2)*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)/(Tc*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)) - (c5 + c6*omega + c7*omega**2)/(Tc*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)) + sqrt(T/Tc)*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)/T)/(2*T*((sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) - 1)))*exp(c4*log(-(sqrt(T/Tc) - 1)*(c5 + c6*omega + c7*omega**2) + 1)**2 - (T/Tc - 1)*(c1 + c2*omega + c3*omega**2))
return a_alpha, da_alpha_dT, d2a_alpha_dT2 | def function[Chen_Yang, parameter[self, T, full, quick]]:
constant[Method to calculate `a_alpha` and its first and second
derivatives according to Hamid and Yang (2017) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Seven coefficients needed.
.. math::
\alpha = e^{\left(- c_{3}^{\log{\left (\frac{T}{Tc} \right )}}
+ 1\right) \left(- \frac{T c_{2}}{Tc} + c_{1}\right)}
References
----------
.. [1] Chen, Zehua, and Daoyong Yang. "Optimization of the Reduced
Temperature Associated with Peng–Robinson Equation of State and
Soave-Redlich-Kwong Equation of State To Improve Vapor Pressure
Prediction for Heavy Hydrocarbon Compounds." Journal of Chemical &
Engineering Data, August 31, 2017. doi:10.1021/acs.jced.7b00496.
]
<ast.Tuple object at 0x7da18f810880> assign[=] name[self].alpha_function_coeffs
<ast.Tuple object at 0x7da18f810a60> assign[=] tuple[[<ast.Attribute object at 0x7da18f813760>, <ast.Attribute object at 0x7da18f813610>, <ast.Attribute object at 0x7da18f812d10>]]
variable[a_alpha] assign[=] binary_operation[name[a] * call[name[exp], parameter[binary_operation[binary_operation[name[c4] * binary_operation[call[name[log], parameter[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18f812020> + constant[1]] * binary_operation[binary_operation[name[c5] + binary_operation[name[c6] * name[omega]]] + binary_operation[name[c7] * binary_operation[name[omega] ** constant[2]]]]] + constant[1]]]] ** constant[2]]] + binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18f813d00> / name[Tc]] + constant[1]] * binary_operation[binary_operation[name[c1] + binary_operation[name[c2] * name[omega]]] + binary_operation[name[c3] * binary_operation[name[omega] ** constant[2]]]]]]]]]
if <ast.UnaryOp object at 0x7da18f810df0> begin[:]
return[name[a_alpha]] | keyword[def] identifier[Chen_Yang] ( identifier[self] , identifier[T] , identifier[full] = keyword[True] , identifier[quick] = keyword[True] ):
literal[string]
identifier[c1] , identifier[c2] , identifier[c3] , identifier[c4] , identifier[c5] , identifier[c6] , identifier[c7] = identifier[self] . identifier[alpha_function_coeffs]
identifier[T] , identifier[Tc] , identifier[a] = identifier[self] . identifier[T] , identifier[self] . identifier[Tc] , identifier[self] . identifier[a]
identifier[a_alpha] = identifier[a] * identifier[exp] ( identifier[c4] * identifier[log] ((- identifier[sqrt] ( identifier[T] / identifier[Tc] )+ literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )+ literal[int] )** literal[int] +(- identifier[T] / identifier[Tc] + literal[int] )*( identifier[c1] + identifier[c2] * identifier[omega] + identifier[c3] * identifier[omega] ** literal[int] ))
keyword[if] keyword[not] identifier[full] :
keyword[return] identifier[a_alpha]
keyword[else] :
identifier[da_alpha_dT] = identifier[a] *(-( identifier[c1] + identifier[c2] * identifier[omega] + identifier[c3] * identifier[omega] ** literal[int] )/ identifier[Tc] - identifier[c4] * identifier[sqrt] ( identifier[T] / identifier[Tc] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )* identifier[log] ((- identifier[sqrt] ( identifier[T] / identifier[Tc] )+ literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )+ literal[int] )/( identifier[T] *((- identifier[sqrt] ( identifier[T] / identifier[Tc] )+ literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )+ literal[int] )))* identifier[exp] ( identifier[c4] * identifier[log] ((- identifier[sqrt] ( identifier[T] / identifier[Tc] )+ literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )+ literal[int] )** literal[int] +(- identifier[T] / identifier[Tc] + literal[int] )*( identifier[c1] + identifier[c2] * identifier[omega] + identifier[c3] * identifier[omega] ** literal[int] ))
identifier[d2a_alpha_dT2] = identifier[a] *((( identifier[c1] + identifier[c2] * identifier[omega] + identifier[c3] * identifier[omega] ** literal[int] )/ identifier[Tc] - identifier[c4] * identifier[sqrt] ( identifier[T] / identifier[Tc] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )* identifier[log] (-( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )+ literal[int] )/( identifier[T] *(( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )- literal[int] )))** literal[int] - identifier[c4] *( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )*(( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )* identifier[log] (-( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )+ literal[int] )/( identifier[Tc] *(( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )- literal[int] ))-( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )/( identifier[Tc] *(( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )- literal[int] ))+ identifier[sqrt] ( identifier[T] / identifier[Tc] )* identifier[log] (-( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )+ literal[int] )/ identifier[T] )/( literal[int] * identifier[T] *(( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )- literal[int] )))* identifier[exp] ( identifier[c4] * identifier[log] (-( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )*( identifier[c5] + identifier[c6] * identifier[omega] + identifier[c7] * identifier[omega] ** literal[int] )+ literal[int] )** literal[int] -( identifier[T] / identifier[Tc] - literal[int] )*( identifier[c1] + identifier[c2] * identifier[omega] + identifier[c3] * identifier[omega] ** literal[int] ))
keyword[return] identifier[a_alpha] , identifier[da_alpha_dT] , identifier[d2a_alpha_dT2] | def Chen_Yang(self, T, full=True, quick=True):
"""Method to calculate `a_alpha` and its first and second
derivatives according to Hamid and Yang (2017) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. Seven coefficients needed.
.. math::
\\alpha = e^{\\left(- c_{3}^{\\log{\\left (\\frac{T}{Tc} \\right )}}
+ 1\\right) \\left(- \\frac{T c_{2}}{Tc} + c_{1}\\right)}
References
----------
.. [1] Chen, Zehua, and Daoyong Yang. "Optimization of the Reduced
Temperature Associated with Peng–Robinson Equation of State and
Soave-Redlich-Kwong Equation of State To Improve Vapor Pressure
Prediction for Heavy Hydrocarbon Compounds." Journal of Chemical &
Engineering Data, August 31, 2017. doi:10.1021/acs.jced.7b00496.
"""
(c1, c2, c3, c4, c5, c6, c7) = self.alpha_function_coeffs
(T, Tc, a) = (self.T, self.Tc, self.a)
a_alpha = a * exp(c4 * log((-sqrt(T / Tc) + 1) * (c5 + c6 * omega + c7 * omega ** 2) + 1) ** 2 + (-T / Tc + 1) * (c1 + c2 * omega + c3 * omega ** 2))
if not full:
return a_alpha # depends on [control=['if'], data=[]]
else:
da_alpha_dT = a * (-(c1 + c2 * omega + c3 * omega ** 2) / Tc - c4 * sqrt(T / Tc) * (c5 + c6 * omega + c7 * omega ** 2) * log((-sqrt(T / Tc) + 1) * (c5 + c6 * omega + c7 * omega ** 2) + 1) / (T * ((-sqrt(T / Tc) + 1) * (c5 + c6 * omega + c7 * omega ** 2) + 1))) * exp(c4 * log((-sqrt(T / Tc) + 1) * (c5 + c6 * omega + c7 * omega ** 2) + 1) ** 2 + (-T / Tc + 1) * (c1 + c2 * omega + c3 * omega ** 2))
d2a_alpha_dT2 = a * (((c1 + c2 * omega + c3 * omega ** 2) / Tc - c4 * sqrt(T / Tc) * (c5 + c6 * omega + c7 * omega ** 2) * log(-(sqrt(T / Tc) - 1) * (c5 + c6 * omega + c7 * omega ** 2) + 1) / (T * ((sqrt(T / Tc) - 1) * (c5 + c6 * omega + c7 * omega ** 2) - 1))) ** 2 - c4 * (c5 + c6 * omega + c7 * omega ** 2) * ((c5 + c6 * omega + c7 * omega ** 2) * log(-(sqrt(T / Tc) - 1) * (c5 + c6 * omega + c7 * omega ** 2) + 1) / (Tc * ((sqrt(T / Tc) - 1) * (c5 + c6 * omega + c7 * omega ** 2) - 1)) - (c5 + c6 * omega + c7 * omega ** 2) / (Tc * ((sqrt(T / Tc) - 1) * (c5 + c6 * omega + c7 * omega ** 2) - 1)) + sqrt(T / Tc) * log(-(sqrt(T / Tc) - 1) * (c5 + c6 * omega + c7 * omega ** 2) + 1) / T) / (2 * T * ((sqrt(T / Tc) - 1) * (c5 + c6 * omega + c7 * omega ** 2) - 1))) * exp(c4 * log(-(sqrt(T / Tc) - 1) * (c5 + c6 * omega + c7 * omega ** 2) + 1) ** 2 - (T / Tc - 1) * (c1 + c2 * omega + c3 * omega ** 2))
return (a_alpha, da_alpha_dT, d2a_alpha_dT2) |
def diffusion_driver(self):
""" diffusion driver are the underlying `dW` of each process `X` in a SDE like `dX = m dt + s dW`
:return list(StochasticProcess):
"""
if self._diffusion_driver is None:
return self,
if isinstance(self._diffusion_driver, list):
return tuple(self._diffusion_driver)
if isinstance(self._diffusion_driver, tuple):
return self._diffusion_driver
return self._diffusion_driver, | def function[diffusion_driver, parameter[self]]:
constant[ diffusion driver are the underlying `dW` of each process `X` in a SDE like `dX = m dt + s dW`
:return list(StochasticProcess):
]
if compare[name[self]._diffusion_driver is constant[None]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b28f02b0>]]]
if call[name[isinstance], parameter[name[self]._diffusion_driver, name[list]]] begin[:]
return[call[name[tuple], parameter[name[self]._diffusion_driver]]]
if call[name[isinstance], parameter[name[self]._diffusion_driver, name[tuple]]] begin[:]
return[name[self]._diffusion_driver]
return[tuple[[<ast.Attribute object at 0x7da1b28f2e60>]]] | keyword[def] identifier[diffusion_driver] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_diffusion_driver] keyword[is] keyword[None] :
keyword[return] identifier[self] ,
keyword[if] identifier[isinstance] ( identifier[self] . identifier[_diffusion_driver] , identifier[list] ):
keyword[return] identifier[tuple] ( identifier[self] . identifier[_diffusion_driver] )
keyword[if] identifier[isinstance] ( identifier[self] . identifier[_diffusion_driver] , identifier[tuple] ):
keyword[return] identifier[self] . identifier[_diffusion_driver]
keyword[return] identifier[self] . identifier[_diffusion_driver] , | def diffusion_driver(self):
""" diffusion driver are the underlying `dW` of each process `X` in a SDE like `dX = m dt + s dW`
:return list(StochasticProcess):
"""
if self._diffusion_driver is None:
return (self,) # depends on [control=['if'], data=[]]
if isinstance(self._diffusion_driver, list):
return tuple(self._diffusion_driver) # depends on [control=['if'], data=[]]
if isinstance(self._diffusion_driver, tuple):
return self._diffusion_driver # depends on [control=['if'], data=[]]
return (self._diffusion_driver,) |
def start_numbered_list(self):
"""Start a numbered list."""
self._ordered = True
self.start_container(List, stylename='_numbered_list')
self.set_next_paragraph_style('numbered-list-paragraph'
if self._item_level <= 0
else 'sublist-paragraph') | def function[start_numbered_list, parameter[self]]:
constant[Start a numbered list.]
name[self]._ordered assign[=] constant[True]
call[name[self].start_container, parameter[name[List]]]
call[name[self].set_next_paragraph_style, parameter[<ast.IfExp object at 0x7da1b057b7c0>]] | keyword[def] identifier[start_numbered_list] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_ordered] = keyword[True]
identifier[self] . identifier[start_container] ( identifier[List] , identifier[stylename] = literal[string] )
identifier[self] . identifier[set_next_paragraph_style] ( literal[string]
keyword[if] identifier[self] . identifier[_item_level] <= literal[int]
keyword[else] literal[string] ) | def start_numbered_list(self):
"""Start a numbered list."""
self._ordered = True
self.start_container(List, stylename='_numbered_list')
self.set_next_paragraph_style('numbered-list-paragraph' if self._item_level <= 0 else 'sublist-paragraph') |
def _set_dst_ip_any(self, v, load=False):
"""
Setter method for dst_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/dst_ip_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_ip_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_ip_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="dst-ip-any", rest_name="dst-ip-any", parent=self, choice=(u'choice-dst-ip', u'case-dst-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst ip address: any', u'display-when': u'(../tag)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dst_ip_any must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="dst-ip-any", rest_name="dst-ip-any", parent=self, choice=(u'choice-dst-ip', u'case-dst-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst ip address: any', u'display-when': u'(../tag)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)""",
})
self.__dst_ip_any = t
if hasattr(self, '_set'):
self._set() | def function[_set_dst_ip_any, parameter[self, v, load]]:
constant[
Setter method for dst_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/dst_ip_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_ip_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_ip_any() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f00eda0>
name[self].__dst_ip_any assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_dst_ip_any] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[YANGBool] , identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[choice] =( literal[string] , literal[string] ), identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__dst_ip_any] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_dst_ip_any(self, v, load=False):
"""
Setter method for dst_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/dst_ip_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_ip_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_ip_any() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=YANGBool, is_leaf=True, yang_name='dst-ip-any', rest_name='dst-ip-any', parent=self, choice=(u'choice-dst-ip', u'case-dst-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst ip address: any', u'display-when': u'(../tag)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'dst_ip_any must be of a type compatible with empty', 'defined-type': 'empty', 'generated-type': 'YANGDynClass(base=YANGBool, is_leaf=True, yang_name="dst-ip-any", rest_name="dst-ip-any", parent=self, choice=(u\'choice-dst-ip\', u\'case-dst-ip-any\'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'dst ip address: any\', u\'display-when\': u\'(../tag)\', u\'cli-incomplete-command\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-vxlan-visibility\', defining_module=\'brocade-vxlan-visibility\', yang_type=\'empty\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__dst_ip_any = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def get_attr_from_dict(inspected_obj: Any, attr_name: str) -> Any:
"""Ensures we get descriptor object instead of its return value.
"""
if inspect.isclass(inspected_obj):
obj_list = [inspected_obj] + list(inspected_obj.__mro__)
else:
obj_list = [inspected_obj] + list(inspected_obj.__class__.__mro__)
for obj in obj_list:
if hasattr(obj, '__dict__') and attr_name in obj.__dict__:
return obj.__dict__[attr_name]
# This happens when user-defined __dir__ returns something that's not
# in any __dict__. See test_override_dir.
# Returns attr_name so that it's treated as a normal property.
return attr_name | def function[get_attr_from_dict, parameter[inspected_obj, attr_name]]:
constant[Ensures we get descriptor object instead of its return value.
]
if call[name[inspect].isclass, parameter[name[inspected_obj]]] begin[:]
variable[obj_list] assign[=] binary_operation[list[[<ast.Name object at 0x7da18dc9ac80>]] + call[name[list], parameter[name[inspected_obj].__mro__]]]
for taget[name[obj]] in starred[name[obj_list]] begin[:]
if <ast.BoolOp object at 0x7da2041daad0> begin[:]
return[call[name[obj].__dict__][name[attr_name]]]
return[name[attr_name]] | keyword[def] identifier[get_attr_from_dict] ( identifier[inspected_obj] : identifier[Any] , identifier[attr_name] : identifier[str] )-> identifier[Any] :
literal[string]
keyword[if] identifier[inspect] . identifier[isclass] ( identifier[inspected_obj] ):
identifier[obj_list] =[ identifier[inspected_obj] ]+ identifier[list] ( identifier[inspected_obj] . identifier[__mro__] )
keyword[else] :
identifier[obj_list] =[ identifier[inspected_obj] ]+ identifier[list] ( identifier[inspected_obj] . identifier[__class__] . identifier[__mro__] )
keyword[for] identifier[obj] keyword[in] identifier[obj_list] :
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ) keyword[and] identifier[attr_name] keyword[in] identifier[obj] . identifier[__dict__] :
keyword[return] identifier[obj] . identifier[__dict__] [ identifier[attr_name] ]
keyword[return] identifier[attr_name] | def get_attr_from_dict(inspected_obj: Any, attr_name: str) -> Any:
"""Ensures we get descriptor object instead of its return value.
"""
if inspect.isclass(inspected_obj):
obj_list = [inspected_obj] + list(inspected_obj.__mro__) # depends on [control=['if'], data=[]]
else:
obj_list = [inspected_obj] + list(inspected_obj.__class__.__mro__)
for obj in obj_list:
if hasattr(obj, '__dict__') and attr_name in obj.__dict__:
return obj.__dict__[attr_name] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj']]
# This happens when user-defined __dir__ returns something that's not
# in any __dict__. See test_override_dir.
# Returns attr_name so that it's treated as a normal property.
return attr_name |
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Mount ISO Image (requires DPM mode)."""
assert wait_for_completion is True # synchronous operation
partition_oid = uri_parms[0]
partition_uri = '/api/partitions/' + partition_oid
try:
partition = hmc.lookup_by_uri(partition_uri)
except KeyError:
raise InvalidResourceError(method, uri)
cpc = partition.manager.parent
assert cpc.dpm_enabled
check_valid_cpc_status(method, uri, cpc)
check_partition_status(method, uri, partition,
invalid_statuses=['starting', 'stopping'])
# Parse and check required query parameters
query_parms = parse_query_parms(method, uri, uri_parms[1])
try:
image_name = query_parms['image-name']
except KeyError:
raise BadRequestError(
method, uri, reason=1,
message="Missing required URI query parameter 'image-name'")
try:
ins_file_name = query_parms['ins-file-name']
except KeyError:
raise BadRequestError(
method, uri, reason=1,
message="Missing required URI query parameter 'ins-file-name'")
# Reflect the effect of mounting in the partition properties
partition.properties['boot-iso-image-name'] = image_name
partition.properties['boot-iso-ins-file'] = ins_file_name
return {} | def function[post, parameter[method, hmc, uri, uri_parms, body, logon_required, wait_for_completion]]:
constant[Operation: Mount ISO Image (requires DPM mode).]
assert[compare[name[wait_for_completion] is constant[True]]]
variable[partition_oid] assign[=] call[name[uri_parms]][constant[0]]
variable[partition_uri] assign[=] binary_operation[constant[/api/partitions/] + name[partition_oid]]
<ast.Try object at 0x7da1b05901c0>
variable[cpc] assign[=] name[partition].manager.parent
assert[name[cpc].dpm_enabled]
call[name[check_valid_cpc_status], parameter[name[method], name[uri], name[cpc]]]
call[name[check_partition_status], parameter[name[method], name[uri], name[partition]]]
variable[query_parms] assign[=] call[name[parse_query_parms], parameter[name[method], name[uri], call[name[uri_parms]][constant[1]]]]
<ast.Try object at 0x7da1b0591d50>
<ast.Try object at 0x7da1b26adba0>
call[name[partition].properties][constant[boot-iso-image-name]] assign[=] name[image_name]
call[name[partition].properties][constant[boot-iso-ins-file]] assign[=] name[ins_file_name]
return[dictionary[[], []]] | keyword[def] identifier[post] ( identifier[method] , identifier[hmc] , identifier[uri] , identifier[uri_parms] , identifier[body] , identifier[logon_required] ,
identifier[wait_for_completion] ):
literal[string]
keyword[assert] identifier[wait_for_completion] keyword[is] keyword[True]
identifier[partition_oid] = identifier[uri_parms] [ literal[int] ]
identifier[partition_uri] = literal[string] + identifier[partition_oid]
keyword[try] :
identifier[partition] = identifier[hmc] . identifier[lookup_by_uri] ( identifier[partition_uri] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[InvalidResourceError] ( identifier[method] , identifier[uri] )
identifier[cpc] = identifier[partition] . identifier[manager] . identifier[parent]
keyword[assert] identifier[cpc] . identifier[dpm_enabled]
identifier[check_valid_cpc_status] ( identifier[method] , identifier[uri] , identifier[cpc] )
identifier[check_partition_status] ( identifier[method] , identifier[uri] , identifier[partition] ,
identifier[invalid_statuses] =[ literal[string] , literal[string] ])
identifier[query_parms] = identifier[parse_query_parms] ( identifier[method] , identifier[uri] , identifier[uri_parms] [ literal[int] ])
keyword[try] :
identifier[image_name] = identifier[query_parms] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[BadRequestError] (
identifier[method] , identifier[uri] , identifier[reason] = literal[int] ,
identifier[message] = literal[string] )
keyword[try] :
identifier[ins_file_name] = identifier[query_parms] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[BadRequestError] (
identifier[method] , identifier[uri] , identifier[reason] = literal[int] ,
identifier[message] = literal[string] )
identifier[partition] . identifier[properties] [ literal[string] ]= identifier[image_name]
identifier[partition] . identifier[properties] [ literal[string] ]= identifier[ins_file_name]
keyword[return] {} | def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion):
"""Operation: Mount ISO Image (requires DPM mode)."""
assert wait_for_completion is True # synchronous operation
partition_oid = uri_parms[0]
partition_uri = '/api/partitions/' + partition_oid
try:
partition = hmc.lookup_by_uri(partition_uri) # depends on [control=['try'], data=[]]
except KeyError:
raise InvalidResourceError(method, uri) # depends on [control=['except'], data=[]]
cpc = partition.manager.parent
assert cpc.dpm_enabled
check_valid_cpc_status(method, uri, cpc)
check_partition_status(method, uri, partition, invalid_statuses=['starting', 'stopping'])
# Parse and check required query parameters
query_parms = parse_query_parms(method, uri, uri_parms[1])
try:
image_name = query_parms['image-name'] # depends on [control=['try'], data=[]]
except KeyError:
raise BadRequestError(method, uri, reason=1, message="Missing required URI query parameter 'image-name'") # depends on [control=['except'], data=[]]
try:
ins_file_name = query_parms['ins-file-name'] # depends on [control=['try'], data=[]]
except KeyError:
raise BadRequestError(method, uri, reason=1, message="Missing required URI query parameter 'ins-file-name'") # depends on [control=['except'], data=[]]
# Reflect the effect of mounting in the partition properties
partition.properties['boot-iso-image-name'] = image_name
partition.properties['boot-iso-ins-file'] = ins_file_name
return {} |
def Fierz_to_Bern_lep(C, ddll):
"""From semileptonic Fierz basis to Bern semileptonic basis for Class V.
C should be the corresponding leptonic Fierz basis and
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
ind = ddll.replace('l_','').replace('nu_','')
dic = {
'1' + ind : 5 * C['F'+ ind + '10'] / 3 + C['F'+ ind + '9'],
'3' + ind : -C['F' + ind + '10'] / 6,
'5' + ind : C['F' + ind + 'S'] - 5 * C['F' + ind + 'P'] / 3,
'7' + ind : 2 * C['F' + ind + 'P'] / 3 + C['F' + ind + 'T']
+ C['F' + ind + 'T5'],
'9' + ind : C['F' + ind + 'P'] / 24,
'1p' + ind : C['F' + ind + '9p'] - 5 * C['F' + ind + '10p'] / 3,
'3p' + ind : C['F' + ind + '10p'] / 6,
'5p' + ind : 5 * C['F' + ind + 'Pp'] / 3 + C['F' + ind + 'Sp'],
'7p' + ind : -2 * C['F' + ind + 'Pp'] / 3 + C['F' + ind + 'T']
- C['F' + ind + 'T5'],
'9p' + ind : -C['F' + ind + 'Pp'] / 24,
}
return dic | def function[Fierz_to_Bern_lep, parameter[C, ddll]]:
constant[From semileptonic Fierz basis to Bern semileptonic basis for Class V.
C should be the corresponding leptonic Fierz basis and
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc.]
variable[ind] assign[=] call[call[name[ddll].replace, parameter[constant[l_], constant[]]].replace, parameter[constant[nu_], constant[]]]
variable[dic] assign[=] dictionary[[<ast.BinOp object at 0x7da1b190d390>, <ast.BinOp object at 0x7da1b190d300>, <ast.BinOp object at 0x7da1b190d180>, <ast.BinOp object at 0x7da1b190d0f0>, <ast.BinOp object at 0x7da1b190d060>, <ast.BinOp object at 0x7da1b190cfd0>, <ast.BinOp object at 0x7da1b190cf40>, <ast.BinOp object at 0x7da1b190ceb0>, <ast.BinOp object at 0x7da1b190ce20>, <ast.BinOp object at 0x7da1b190cd90>], [<ast.BinOp object at 0x7da1b190cd00>, <ast.BinOp object at 0x7da1b190d450>, <ast.BinOp object at 0x7da1b190d630>, <ast.BinOp object at 0x7da1b1b68a60>, <ast.BinOp object at 0x7da1b19815a0>, <ast.BinOp object at 0x7da1b1909e10>, <ast.BinOp object at 0x7da1b190a200>, <ast.BinOp object at 0x7da1b190a470>, <ast.BinOp object at 0x7da1b190a800>, <ast.BinOp object at 0x7da1b1a9c610>]]
return[name[dic]] | keyword[def] identifier[Fierz_to_Bern_lep] ( identifier[C] , identifier[ddll] ):
literal[string]
identifier[ind] = identifier[ddll] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
identifier[dic] ={
literal[string] + identifier[ind] : literal[int] * identifier[C] [ literal[string] + identifier[ind] + literal[string] ]/ literal[int] + identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[ind] :- identifier[C] [ literal[string] + identifier[ind] + literal[string] ]/ literal[int] ,
literal[string] + identifier[ind] : identifier[C] [ literal[string] + identifier[ind] + literal[string] ]- literal[int] * identifier[C] [ literal[string] + identifier[ind] + literal[string] ]/ literal[int] ,
literal[string] + identifier[ind] : literal[int] * identifier[C] [ literal[string] + identifier[ind] + literal[string] ]/ literal[int] + identifier[C] [ literal[string] + identifier[ind] + literal[string] ]
+ identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[ind] : identifier[C] [ literal[string] + identifier[ind] + literal[string] ]/ literal[int] ,
literal[string] + identifier[ind] : identifier[C] [ literal[string] + identifier[ind] + literal[string] ]- literal[int] * identifier[C] [ literal[string] + identifier[ind] + literal[string] ]/ literal[int] ,
literal[string] + identifier[ind] : identifier[C] [ literal[string] + identifier[ind] + literal[string] ]/ literal[int] ,
literal[string] + identifier[ind] : literal[int] * identifier[C] [ literal[string] + identifier[ind] + literal[string] ]/ literal[int] + identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[ind] :- literal[int] * identifier[C] [ literal[string] + identifier[ind] + literal[string] ]/ literal[int] + identifier[C] [ literal[string] + identifier[ind] + literal[string] ]
- identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[ind] :- identifier[C] [ literal[string] + identifier[ind] + literal[string] ]/ literal[int] ,
}
keyword[return] identifier[dic] | def Fierz_to_Bern_lep(C, ddll):
"""From semileptonic Fierz basis to Bern semileptonic basis for Class V.
C should be the corresponding leptonic Fierz basis and
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
ind = ddll.replace('l_', '').replace('nu_', '')
dic = {'1' + ind: 5 * C['F' + ind + '10'] / 3 + C['F' + ind + '9'], '3' + ind: -C['F' + ind + '10'] / 6, '5' + ind: C['F' + ind + 'S'] - 5 * C['F' + ind + 'P'] / 3, '7' + ind: 2 * C['F' + ind + 'P'] / 3 + C['F' + ind + 'T'] + C['F' + ind + 'T5'], '9' + ind: C['F' + ind + 'P'] / 24, '1p' + ind: C['F' + ind + '9p'] - 5 * C['F' + ind + '10p'] / 3, '3p' + ind: C['F' + ind + '10p'] / 6, '5p' + ind: 5 * C['F' + ind + 'Pp'] / 3 + C['F' + ind + 'Sp'], '7p' + ind: -2 * C['F' + ind + 'Pp'] / 3 + C['F' + ind + 'T'] - C['F' + ind + 'T5'], '9p' + ind: -C['F' + ind + 'Pp'] / 24}
return dic |
def unpack(endian, fmt, data):
"""Unpack a byte string to the given format. If the byte string
contains more bytes than required for the given format, the function
returns a tuple of values.
"""
if fmt == 's':
# read data as an array of chars
val = struct.unpack(''.join([endian, str(len(data)), 's']),
data)[0]
else:
# read a number of values
num = len(data) // struct.calcsize(fmt)
val = struct.unpack(''.join([endian, str(num), fmt]), data)
if len(val) == 1:
val = val[0]
return val | def function[unpack, parameter[endian, fmt, data]]:
constant[Unpack a byte string to the given format. If the byte string
contains more bytes than required for the given format, the function
returns a tuple of values.
]
if compare[name[fmt] equal[==] constant[s]] begin[:]
variable[val] assign[=] call[call[name[struct].unpack, parameter[call[constant[].join, parameter[list[[<ast.Name object at 0x7da1afe1ad70>, <ast.Call object at 0x7da1afe193f0>, <ast.Constant object at 0x7da1afe1b850>]]]], name[data]]]][constant[0]]
return[name[val]] | keyword[def] identifier[unpack] ( identifier[endian] , identifier[fmt] , identifier[data] ):
literal[string]
keyword[if] identifier[fmt] == literal[string] :
identifier[val] = identifier[struct] . identifier[unpack] ( literal[string] . identifier[join] ([ identifier[endian] , identifier[str] ( identifier[len] ( identifier[data] )), literal[string] ]),
identifier[data] )[ literal[int] ]
keyword[else] :
identifier[num] = identifier[len] ( identifier[data] )// identifier[struct] . identifier[calcsize] ( identifier[fmt] )
identifier[val] = identifier[struct] . identifier[unpack] ( literal[string] . identifier[join] ([ identifier[endian] , identifier[str] ( identifier[num] ), identifier[fmt] ]), identifier[data] )
keyword[if] identifier[len] ( identifier[val] )== literal[int] :
identifier[val] = identifier[val] [ literal[int] ]
keyword[return] identifier[val] | def unpack(endian, fmt, data):
"""Unpack a byte string to the given format. If the byte string
contains more bytes than required for the given format, the function
returns a tuple of values.
"""
if fmt == 's':
# read data as an array of chars
val = struct.unpack(''.join([endian, str(len(data)), 's']), data)[0] # depends on [control=['if'], data=[]]
else:
# read a number of values
num = len(data) // struct.calcsize(fmt)
val = struct.unpack(''.join([endian, str(num), fmt]), data)
if len(val) == 1:
val = val[0] # depends on [control=['if'], data=[]]
return val |
def heap_snapshot(self):
""" Return heap snapshot """
self.lock.acquire()
res = self.ext.mr_heap_snapshot(self.ctx)
self.lock.release()
python_value = res.contents.to_python()
self.free(res)
return python_value | def function[heap_snapshot, parameter[self]]:
constant[ Return heap snapshot ]
call[name[self].lock.acquire, parameter[]]
variable[res] assign[=] call[name[self].ext.mr_heap_snapshot, parameter[name[self].ctx]]
call[name[self].lock.release, parameter[]]
variable[python_value] assign[=] call[name[res].contents.to_python, parameter[]]
call[name[self].free, parameter[name[res]]]
return[name[python_value]] | keyword[def] identifier[heap_snapshot] ( identifier[self] ):
literal[string]
identifier[self] . identifier[lock] . identifier[acquire] ()
identifier[res] = identifier[self] . identifier[ext] . identifier[mr_heap_snapshot] ( identifier[self] . identifier[ctx] )
identifier[self] . identifier[lock] . identifier[release] ()
identifier[python_value] = identifier[res] . identifier[contents] . identifier[to_python] ()
identifier[self] . identifier[free] ( identifier[res] )
keyword[return] identifier[python_value] | def heap_snapshot(self):
""" Return heap snapshot """
self.lock.acquire()
res = self.ext.mr_heap_snapshot(self.ctx)
self.lock.release()
python_value = res.contents.to_python()
self.free(res)
return python_value |
def get_sub_commands(parser: argparse.ArgumentParser) -> List[str]:
"""Get a list of sub-commands for an ArgumentParser"""
sub_cmds = []
# Check if this is parser has sub-commands
if parser is not None and parser._subparsers is not None:
# Find the _SubParsersAction for the sub-commands of this parser
for action in parser._subparsers._actions:
if isinstance(action, argparse._SubParsersAction):
for sub_cmd, sub_cmd_parser in action.choices.items():
sub_cmds.append(sub_cmd)
# Look for nested sub-commands
for nested_sub_cmd in get_sub_commands(sub_cmd_parser):
sub_cmds.append('{} {}'.format(sub_cmd, nested_sub_cmd))
break
sub_cmds.sort()
return sub_cmds | def function[get_sub_commands, parameter[parser]]:
constant[Get a list of sub-commands for an ArgumentParser]
variable[sub_cmds] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b26ae200> begin[:]
for taget[name[action]] in starred[name[parser]._subparsers._actions] begin[:]
if call[name[isinstance], parameter[name[action], name[argparse]._SubParsersAction]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20e957c70>, <ast.Name object at 0x7da20e9552a0>]]] in starred[call[name[action].choices.items, parameter[]]] begin[:]
call[name[sub_cmds].append, parameter[name[sub_cmd]]]
for taget[name[nested_sub_cmd]] in starred[call[name[get_sub_commands], parameter[name[sub_cmd_parser]]]] begin[:]
call[name[sub_cmds].append, parameter[call[constant[{} {}].format, parameter[name[sub_cmd], name[nested_sub_cmd]]]]]
break
call[name[sub_cmds].sort, parameter[]]
return[name[sub_cmds]] | keyword[def] identifier[get_sub_commands] ( identifier[parser] : identifier[argparse] . identifier[ArgumentParser] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[sub_cmds] =[]
keyword[if] identifier[parser] keyword[is] keyword[not] keyword[None] keyword[and] identifier[parser] . identifier[_subparsers] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[action] keyword[in] identifier[parser] . identifier[_subparsers] . identifier[_actions] :
keyword[if] identifier[isinstance] ( identifier[action] , identifier[argparse] . identifier[_SubParsersAction] ):
keyword[for] identifier[sub_cmd] , identifier[sub_cmd_parser] keyword[in] identifier[action] . identifier[choices] . identifier[items] ():
identifier[sub_cmds] . identifier[append] ( identifier[sub_cmd] )
keyword[for] identifier[nested_sub_cmd] keyword[in] identifier[get_sub_commands] ( identifier[sub_cmd_parser] ):
identifier[sub_cmds] . identifier[append] ( literal[string] . identifier[format] ( identifier[sub_cmd] , identifier[nested_sub_cmd] ))
keyword[break]
identifier[sub_cmds] . identifier[sort] ()
keyword[return] identifier[sub_cmds] | def get_sub_commands(parser: argparse.ArgumentParser) -> List[str]:
"""Get a list of sub-commands for an ArgumentParser"""
sub_cmds = []
# Check if this is parser has sub-commands
if parser is not None and parser._subparsers is not None:
# Find the _SubParsersAction for the sub-commands of this parser
for action in parser._subparsers._actions:
if isinstance(action, argparse._SubParsersAction):
for (sub_cmd, sub_cmd_parser) in action.choices.items():
sub_cmds.append(sub_cmd)
# Look for nested sub-commands
for nested_sub_cmd in get_sub_commands(sub_cmd_parser):
sub_cmds.append('{} {}'.format(sub_cmd, nested_sub_cmd)) # depends on [control=['for'], data=['nested_sub_cmd']] # depends on [control=['for'], data=[]]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['action']] # depends on [control=['if'], data=[]]
sub_cmds.sort()
return sub_cmds |
def get_backend_path(service):
"""Return the dotted path of the matching backend."""
for backend in _get_backends():
try:
if backend.service_allowed(service):
return "%s.%s" % (backend.__class__.__module__, backend.__class__.__name__)
except AttributeError:
raise NotImplementedError("%s.%s.service_allowed() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__)
)
return None | def function[get_backend_path, parameter[service]]:
constant[Return the dotted path of the matching backend.]
for taget[name[backend]] in starred[call[name[_get_backends], parameter[]]] begin[:]
<ast.Try object at 0x7da1b12b9d80>
return[constant[None]] | keyword[def] identifier[get_backend_path] ( identifier[service] ):
literal[string]
keyword[for] identifier[backend] keyword[in] identifier[_get_backends] ():
keyword[try] :
keyword[if] identifier[backend] . identifier[service_allowed] ( identifier[service] ):
keyword[return] literal[string] %( identifier[backend] . identifier[__class__] . identifier[__module__] , identifier[backend] . identifier[__class__] . identifier[__name__] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[NotImplementedError] ( literal[string] %(
identifier[backend] . identifier[__class__] . identifier[__module__] , identifier[backend] . identifier[__class__] . identifier[__name__] )
)
keyword[return] keyword[None] | def get_backend_path(service):
"""Return the dotted path of the matching backend."""
for backend in _get_backends():
try:
if backend.service_allowed(service):
return '%s.%s' % (backend.__class__.__module__, backend.__class__.__name__) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except AttributeError:
raise NotImplementedError('%s.%s.service_allowed() not implemented' % (backend.__class__.__module__, backend.__class__.__name__)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['backend']]
return None |
def _identify_heterogeneity_blocks_shared(in_file, segment_fn, params, work_dir, somatic_info):
"""Identify heterogeneity blocks corresponding to segmentation from CNV input file.
"""
out_file = os.path.join(work_dir, "%s-hetblocks.bed" % utils.splitext_plus(os.path.basename(in_file))[0])
if not utils.file_uptodate(out_file, in_file):
with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for chrom, freqs, coords in _freqs_by_chromosome(in_file, params, somatic_info):
for start, end in segment_fn(chrom, freqs, coords):
out_handle.write("%s\t%s\t%s\n" % (chrom, start, end))
return out_file | def function[_identify_heterogeneity_blocks_shared, parameter[in_file, segment_fn, params, work_dir, somatic_info]]:
constant[Identify heterogeneity blocks corresponding to segmentation from CNV input file.
]
variable[out_file] assign[=] call[name[os].path.join, parameter[name[work_dir], binary_operation[constant[%s-hetblocks.bed] <ast.Mod object at 0x7da2590d6920> call[call[name[utils].splitext_plus, parameter[call[name[os].path.basename, parameter[name[in_file]]]]]][constant[0]]]]]
if <ast.UnaryOp object at 0x7da1b18bebf0> begin[:]
with call[name[file_transaction], parameter[name[somatic_info].tumor_data, name[out_file]]] begin[:]
with call[name[open], parameter[name[tx_out_file], constant[w]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b18bdd50>, <ast.Name object at 0x7da1b18be800>, <ast.Name object at 0x7da1b18bd7e0>]]] in starred[call[name[_freqs_by_chromosome], parameter[name[in_file], name[params], name[somatic_info]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1986a70>, <ast.Name object at 0x7da1b1985f30>]]] in starred[call[name[segment_fn], parameter[name[chrom], name[freqs], name[coords]]]] begin[:]
call[name[out_handle].write, parameter[binary_operation[constant[%s %s %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b19872b0>, <ast.Name object at 0x7da1b1984370>, <ast.Name object at 0x7da1b19870d0>]]]]]
return[name[out_file]] | keyword[def] identifier[_identify_heterogeneity_blocks_shared] ( identifier[in_file] , identifier[segment_fn] , identifier[params] , identifier[work_dir] , identifier[somatic_info] ):
literal[string]
identifier[out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] % identifier[utils] . identifier[splitext_plus] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[in_file] ))[ literal[int] ])
keyword[if] keyword[not] identifier[utils] . identifier[file_uptodate] ( identifier[out_file] , identifier[in_file] ):
keyword[with] identifier[file_transaction] ( identifier[somatic_info] . identifier[tumor_data] , identifier[out_file] ) keyword[as] identifier[tx_out_file] :
keyword[with] identifier[open] ( identifier[tx_out_file] , literal[string] ) keyword[as] identifier[out_handle] :
keyword[for] identifier[chrom] , identifier[freqs] , identifier[coords] keyword[in] identifier[_freqs_by_chromosome] ( identifier[in_file] , identifier[params] , identifier[somatic_info] ):
keyword[for] identifier[start] , identifier[end] keyword[in] identifier[segment_fn] ( identifier[chrom] , identifier[freqs] , identifier[coords] ):
identifier[out_handle] . identifier[write] ( literal[string] %( identifier[chrom] , identifier[start] , identifier[end] ))
keyword[return] identifier[out_file] | def _identify_heterogeneity_blocks_shared(in_file, segment_fn, params, work_dir, somatic_info):
"""Identify heterogeneity blocks corresponding to segmentation from CNV input file.
"""
out_file = os.path.join(work_dir, '%s-hetblocks.bed' % utils.splitext_plus(os.path.basename(in_file))[0])
if not utils.file_uptodate(out_file, in_file):
with file_transaction(somatic_info.tumor_data, out_file) as tx_out_file:
with open(tx_out_file, 'w') as out_handle:
for (chrom, freqs, coords) in _freqs_by_chromosome(in_file, params, somatic_info):
for (start, end) in segment_fn(chrom, freqs, coords):
out_handle.write('%s\t%s\t%s\n' % (chrom, start, end)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['out_handle']] # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
return out_file |
def _process_methods(self, req, resp, resource):
"""Adds the Access-Control-Allow-Methods header to the response,
using the cors settings to determine which methods are allowed.
"""
requested_method = self._get_requested_method(req)
if not requested_method:
return False
if self._cors_config['allow_all_methods']:
allowed_methods = self._get_resource_methods(resource)
self._set_allowed_methods(resp, allowed_methods)
if requested_method in allowed_methods:
return True
elif requested_method in self._cors_config['allow_methods_list']:
resource_methods = self._get_resource_methods(resource)
# Only list methods as allowed if they exist
# on the resource AND are in the allowed_methods_list
allowed_methods = [
method for method in resource_methods
if method in self._cors_config['allow_methods_list']
]
self._set_allowed_methods(resp, allowed_methods)
if requested_method in allowed_methods:
return True
return False | def function[_process_methods, parameter[self, req, resp, resource]]:
constant[Adds the Access-Control-Allow-Methods header to the response,
using the cors settings to determine which methods are allowed.
]
variable[requested_method] assign[=] call[name[self]._get_requested_method, parameter[name[req]]]
if <ast.UnaryOp object at 0x7da18eb551e0> begin[:]
return[constant[False]]
if call[name[self]._cors_config][constant[allow_all_methods]] begin[:]
variable[allowed_methods] assign[=] call[name[self]._get_resource_methods, parameter[name[resource]]]
call[name[self]._set_allowed_methods, parameter[name[resp], name[allowed_methods]]]
if compare[name[requested_method] in name[allowed_methods]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[_process_methods] ( identifier[self] , identifier[req] , identifier[resp] , identifier[resource] ):
literal[string]
identifier[requested_method] = identifier[self] . identifier[_get_requested_method] ( identifier[req] )
keyword[if] keyword[not] identifier[requested_method] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_cors_config] [ literal[string] ]:
identifier[allowed_methods] = identifier[self] . identifier[_get_resource_methods] ( identifier[resource] )
identifier[self] . identifier[_set_allowed_methods] ( identifier[resp] , identifier[allowed_methods] )
keyword[if] identifier[requested_method] keyword[in] identifier[allowed_methods] :
keyword[return] keyword[True]
keyword[elif] identifier[requested_method] keyword[in] identifier[self] . identifier[_cors_config] [ literal[string] ]:
identifier[resource_methods] = identifier[self] . identifier[_get_resource_methods] ( identifier[resource] )
identifier[allowed_methods] =[
identifier[method] keyword[for] identifier[method] keyword[in] identifier[resource_methods]
keyword[if] identifier[method] keyword[in] identifier[self] . identifier[_cors_config] [ literal[string] ]
]
identifier[self] . identifier[_set_allowed_methods] ( identifier[resp] , identifier[allowed_methods] )
keyword[if] identifier[requested_method] keyword[in] identifier[allowed_methods] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def _process_methods(self, req, resp, resource):
"""Adds the Access-Control-Allow-Methods header to the response,
using the cors settings to determine which methods are allowed.
"""
requested_method = self._get_requested_method(req)
if not requested_method:
return False # depends on [control=['if'], data=[]]
if self._cors_config['allow_all_methods']:
allowed_methods = self._get_resource_methods(resource)
self._set_allowed_methods(resp, allowed_methods)
if requested_method in allowed_methods:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif requested_method in self._cors_config['allow_methods_list']:
resource_methods = self._get_resource_methods(resource)
# Only list methods as allowed if they exist
# on the resource AND are in the allowed_methods_list
allowed_methods = [method for method in resource_methods if method in self._cors_config['allow_methods_list']]
self._set_allowed_methods(resp, allowed_methods)
if requested_method in allowed_methods:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['requested_method']]
return False |
def popup(self, title, callfn, initialdir=None, filename=None):
"""Let user select and load file(s). This allows wildcards and
extensions, like in FBrowser.
Parameters
----------
title : str
Title for the file dialog.
callfn : func
Function used to open the file(s).
initialdir : str or `None`
Directory for file dialog.
filename : str
Filter for file dialog.
"""
self.cb = callfn
filenames = QtGui.QFileDialog.getOpenFileNames(
self.parent, title, initialdir, filename)
# Special handling for PyQt5, see
# https://www.reddit.com/r/learnpython/comments/2xhagb/pyqt5_trouble_with_openinggetting_the_name_of_the/
if ginga.toolkit.get_toolkit() == 'qt5':
filenames = filenames[0]
all_paths = []
for filename in filenames:
# Special handling for wildcard or extension.
# This is similar to open_files() in FBrowser plugin.
if '*' in filename or '[' in filename:
info = iohelper.get_fileinfo(filename)
ext = iohelper.get_hdu_suffix(info.numhdu)
files = glob.glob(info.filepath) # Expand wildcard
paths = ['{0}{1}'.format(f, ext) for f in files]
if self.all_at_once:
all_paths.extend(paths)
else:
for path in paths:
self.cb(path)
else:
# Normal load
if self.all_at_once:
all_paths.append(filename)
else:
self.cb(filename)
if self.all_at_once and len(all_paths) > 0:
self.cb(all_paths) | def function[popup, parameter[self, title, callfn, initialdir, filename]]:
constant[Let user select and load file(s). This allows wildcards and
extensions, like in FBrowser.
Parameters
----------
title : str
Title for the file dialog.
callfn : func
Function used to open the file(s).
initialdir : str or `None`
Directory for file dialog.
filename : str
Filter for file dialog.
]
name[self].cb assign[=] name[callfn]
variable[filenames] assign[=] call[name[QtGui].QFileDialog.getOpenFileNames, parameter[name[self].parent, name[title], name[initialdir], name[filename]]]
if compare[call[name[ginga].toolkit.get_toolkit, parameter[]] equal[==] constant[qt5]] begin[:]
variable[filenames] assign[=] call[name[filenames]][constant[0]]
variable[all_paths] assign[=] list[[]]
for taget[name[filename]] in starred[name[filenames]] begin[:]
if <ast.BoolOp object at 0x7da20e9545e0> begin[:]
variable[info] assign[=] call[name[iohelper].get_fileinfo, parameter[name[filename]]]
variable[ext] assign[=] call[name[iohelper].get_hdu_suffix, parameter[name[info].numhdu]]
variable[files] assign[=] call[name[glob].glob, parameter[name[info].filepath]]
variable[paths] assign[=] <ast.ListComp object at 0x7da20e956200>
if name[self].all_at_once begin[:]
call[name[all_paths].extend, parameter[name[paths]]]
if <ast.BoolOp object at 0x7da20e954c70> begin[:]
call[name[self].cb, parameter[name[all_paths]]] | keyword[def] identifier[popup] ( identifier[self] , identifier[title] , identifier[callfn] , identifier[initialdir] = keyword[None] , identifier[filename] = keyword[None] ):
literal[string]
identifier[self] . identifier[cb] = identifier[callfn]
identifier[filenames] = identifier[QtGui] . identifier[QFileDialog] . identifier[getOpenFileNames] (
identifier[self] . identifier[parent] , identifier[title] , identifier[initialdir] , identifier[filename] )
keyword[if] identifier[ginga] . identifier[toolkit] . identifier[get_toolkit] ()== literal[string] :
identifier[filenames] = identifier[filenames] [ literal[int] ]
identifier[all_paths] =[]
keyword[for] identifier[filename] keyword[in] identifier[filenames] :
keyword[if] literal[string] keyword[in] identifier[filename] keyword[or] literal[string] keyword[in] identifier[filename] :
identifier[info] = identifier[iohelper] . identifier[get_fileinfo] ( identifier[filename] )
identifier[ext] = identifier[iohelper] . identifier[get_hdu_suffix] ( identifier[info] . identifier[numhdu] )
identifier[files] = identifier[glob] . identifier[glob] ( identifier[info] . identifier[filepath] )
identifier[paths] =[ literal[string] . identifier[format] ( identifier[f] , identifier[ext] ) keyword[for] identifier[f] keyword[in] identifier[files] ]
keyword[if] identifier[self] . identifier[all_at_once] :
identifier[all_paths] . identifier[extend] ( identifier[paths] )
keyword[else] :
keyword[for] identifier[path] keyword[in] identifier[paths] :
identifier[self] . identifier[cb] ( identifier[path] )
keyword[else] :
keyword[if] identifier[self] . identifier[all_at_once] :
identifier[all_paths] . identifier[append] ( identifier[filename] )
keyword[else] :
identifier[self] . identifier[cb] ( identifier[filename] )
keyword[if] identifier[self] . identifier[all_at_once] keyword[and] identifier[len] ( identifier[all_paths] )> literal[int] :
identifier[self] . identifier[cb] ( identifier[all_paths] ) | def popup(self, title, callfn, initialdir=None, filename=None):
"""Let user select and load file(s). This allows wildcards and
extensions, like in FBrowser.
Parameters
----------
title : str
Title for the file dialog.
callfn : func
Function used to open the file(s).
initialdir : str or `None`
Directory for file dialog.
filename : str
Filter for file dialog.
"""
self.cb = callfn
filenames = QtGui.QFileDialog.getOpenFileNames(self.parent, title, initialdir, filename)
# Special handling for PyQt5, see
# https://www.reddit.com/r/learnpython/comments/2xhagb/pyqt5_trouble_with_openinggetting_the_name_of_the/
if ginga.toolkit.get_toolkit() == 'qt5':
filenames = filenames[0] # depends on [control=['if'], data=[]]
all_paths = []
for filename in filenames:
# Special handling for wildcard or extension.
# This is similar to open_files() in FBrowser plugin.
if '*' in filename or '[' in filename:
info = iohelper.get_fileinfo(filename)
ext = iohelper.get_hdu_suffix(info.numhdu)
files = glob.glob(info.filepath) # Expand wildcard
paths = ['{0}{1}'.format(f, ext) for f in files]
if self.all_at_once:
all_paths.extend(paths) # depends on [control=['if'], data=[]]
else:
for path in paths:
self.cb(path) # depends on [control=['for'], data=['path']] # depends on [control=['if'], data=[]]
# Normal load
elif self.all_at_once:
all_paths.append(filename) # depends on [control=['if'], data=[]]
else:
self.cb(filename) # depends on [control=['for'], data=['filename']]
if self.all_at_once and len(all_paths) > 0:
self.cb(all_paths) # depends on [control=['if'], data=[]] |
def _pixelsize(self, p):
"""Calculate line width necessary to cover at least one pixel on all axes."""
xpixelsize = 1./float(p.xdensity)
ypixelsize = 1./float(p.ydensity)
return max([xpixelsize,ypixelsize]) | def function[_pixelsize, parameter[self, p]]:
constant[Calculate line width necessary to cover at least one pixel on all axes.]
variable[xpixelsize] assign[=] binary_operation[constant[1.0] / call[name[float], parameter[name[p].xdensity]]]
variable[ypixelsize] assign[=] binary_operation[constant[1.0] / call[name[float], parameter[name[p].ydensity]]]
return[call[name[max], parameter[list[[<ast.Name object at 0x7da20c6a91e0>, <ast.Name object at 0x7da20c6a8610>]]]]] | keyword[def] identifier[_pixelsize] ( identifier[self] , identifier[p] ):
literal[string]
identifier[xpixelsize] = literal[int] / identifier[float] ( identifier[p] . identifier[xdensity] )
identifier[ypixelsize] = literal[int] / identifier[float] ( identifier[p] . identifier[ydensity] )
keyword[return] identifier[max] ([ identifier[xpixelsize] , identifier[ypixelsize] ]) | def _pixelsize(self, p):
"""Calculate line width necessary to cover at least one pixel on all axes."""
xpixelsize = 1.0 / float(p.xdensity)
ypixelsize = 1.0 / float(p.ydensity)
return max([xpixelsize, ypixelsize]) |
def handle_get_txn_req(self, request: Request, frm: str):
"""
Handle GET_TXN request
"""
ledger_id = request.operation.get(f.LEDGER_ID.nm, DOMAIN_LEDGER_ID)
if ledger_id not in self.ledger_to_req_handler:
self.send_nack_to_client((request.identifier, request.reqId),
'Invalid ledger id {}'.format(ledger_id),
frm)
return
seq_no = request.operation.get(DATA)
self.send_ack_to_client((request.identifier, request.reqId), frm)
ledger = self.getLedger(ledger_id)
try:
txn = self.getReplyFromLedger(ledger, seq_no)
except KeyError:
txn = None
if txn is None:
logger.debug(
"{} can not handle GET_TXN request: ledger doesn't "
"have txn with seqNo={}".format(self, str(seq_no)))
result = {
f.IDENTIFIER.nm: request.identifier,
f.REQ_ID.nm: request.reqId,
TXN_TYPE: request.operation[TXN_TYPE],
DATA: None
}
if txn:
result[DATA] = txn.result
result[f.SEQ_NO.nm] = get_seq_no(txn.result)
self.transmitToClient(Reply(result), frm) | def function[handle_get_txn_req, parameter[self, request, frm]]:
constant[
Handle GET_TXN request
]
variable[ledger_id] assign[=] call[name[request].operation.get, parameter[name[f].LEDGER_ID.nm, name[DOMAIN_LEDGER_ID]]]
if compare[name[ledger_id] <ast.NotIn object at 0x7da2590d7190> name[self].ledger_to_req_handler] begin[:]
call[name[self].send_nack_to_client, parameter[tuple[[<ast.Attribute object at 0x7da1b16c1b70>, <ast.Attribute object at 0x7da1b16c3550>]], call[constant[Invalid ledger id {}].format, parameter[name[ledger_id]]], name[frm]]]
return[None]
variable[seq_no] assign[=] call[name[request].operation.get, parameter[name[DATA]]]
call[name[self].send_ack_to_client, parameter[tuple[[<ast.Attribute object at 0x7da1b16c0b20>, <ast.Attribute object at 0x7da1b16c3b50>]], name[frm]]]
variable[ledger] assign[=] call[name[self].getLedger, parameter[name[ledger_id]]]
<ast.Try object at 0x7da1b16c14b0>
if compare[name[txn] is constant[None]] begin[:]
call[name[logger].debug, parameter[call[constant[{} can not handle GET_TXN request: ledger doesn't have txn with seqNo={}].format, parameter[name[self], call[name[str], parameter[name[seq_no]]]]]]]
variable[result] assign[=] dictionary[[<ast.Attribute object at 0x7da1b16c0a00>, <ast.Attribute object at 0x7da1b16c1f30>, <ast.Name object at 0x7da1b16c0c10>, <ast.Name object at 0x7da1b16c3d30>], [<ast.Attribute object at 0x7da1b16c3f70>, <ast.Attribute object at 0x7da1b16c23b0>, <ast.Subscript object at 0x7da1b16c1f90>, <ast.Constant object at 0x7da1b16c3eb0>]]
if name[txn] begin[:]
call[name[result]][name[DATA]] assign[=] name[txn].result
call[name[result]][name[f].SEQ_NO.nm] assign[=] call[name[get_seq_no], parameter[name[txn].result]]
call[name[self].transmitToClient, parameter[call[name[Reply], parameter[name[result]]], name[frm]]] | keyword[def] identifier[handle_get_txn_req] ( identifier[self] , identifier[request] : identifier[Request] , identifier[frm] : identifier[str] ):
literal[string]
identifier[ledger_id] = identifier[request] . identifier[operation] . identifier[get] ( identifier[f] . identifier[LEDGER_ID] . identifier[nm] , identifier[DOMAIN_LEDGER_ID] )
keyword[if] identifier[ledger_id] keyword[not] keyword[in] identifier[self] . identifier[ledger_to_req_handler] :
identifier[self] . identifier[send_nack_to_client] (( identifier[request] . identifier[identifier] , identifier[request] . identifier[reqId] ),
literal[string] . identifier[format] ( identifier[ledger_id] ),
identifier[frm] )
keyword[return]
identifier[seq_no] = identifier[request] . identifier[operation] . identifier[get] ( identifier[DATA] )
identifier[self] . identifier[send_ack_to_client] (( identifier[request] . identifier[identifier] , identifier[request] . identifier[reqId] ), identifier[frm] )
identifier[ledger] = identifier[self] . identifier[getLedger] ( identifier[ledger_id] )
keyword[try] :
identifier[txn] = identifier[self] . identifier[getReplyFromLedger] ( identifier[ledger] , identifier[seq_no] )
keyword[except] identifier[KeyError] :
identifier[txn] = keyword[None]
keyword[if] identifier[txn] keyword[is] keyword[None] :
identifier[logger] . identifier[debug] (
literal[string]
literal[string] . identifier[format] ( identifier[self] , identifier[str] ( identifier[seq_no] )))
identifier[result] ={
identifier[f] . identifier[IDENTIFIER] . identifier[nm] : identifier[request] . identifier[identifier] ,
identifier[f] . identifier[REQ_ID] . identifier[nm] : identifier[request] . identifier[reqId] ,
identifier[TXN_TYPE] : identifier[request] . identifier[operation] [ identifier[TXN_TYPE] ],
identifier[DATA] : keyword[None]
}
keyword[if] identifier[txn] :
identifier[result] [ identifier[DATA] ]= identifier[txn] . identifier[result]
identifier[result] [ identifier[f] . identifier[SEQ_NO] . identifier[nm] ]= identifier[get_seq_no] ( identifier[txn] . identifier[result] )
identifier[self] . identifier[transmitToClient] ( identifier[Reply] ( identifier[result] ), identifier[frm] ) | def handle_get_txn_req(self, request: Request, frm: str):
"""
Handle GET_TXN request
"""
ledger_id = request.operation.get(f.LEDGER_ID.nm, DOMAIN_LEDGER_ID)
if ledger_id not in self.ledger_to_req_handler:
self.send_nack_to_client((request.identifier, request.reqId), 'Invalid ledger id {}'.format(ledger_id), frm)
return # depends on [control=['if'], data=['ledger_id']]
seq_no = request.operation.get(DATA)
self.send_ack_to_client((request.identifier, request.reqId), frm)
ledger = self.getLedger(ledger_id)
try:
txn = self.getReplyFromLedger(ledger, seq_no) # depends on [control=['try'], data=[]]
except KeyError:
txn = None # depends on [control=['except'], data=[]]
if txn is None:
logger.debug("{} can not handle GET_TXN request: ledger doesn't have txn with seqNo={}".format(self, str(seq_no))) # depends on [control=['if'], data=[]]
result = {f.IDENTIFIER.nm: request.identifier, f.REQ_ID.nm: request.reqId, TXN_TYPE: request.operation[TXN_TYPE], DATA: None}
if txn:
result[DATA] = txn.result
result[f.SEQ_NO.nm] = get_seq_no(txn.result) # depends on [control=['if'], data=[]]
self.transmitToClient(Reply(result), frm) |
def print_file_results(file_result):
"""Print the results of validating a file.
Args:
file_result: A FileValidationResults instance.
"""
print_results_header(file_result.filepath, file_result.is_valid)
for object_result in file_result.object_results:
if object_result.warnings:
print_warning_results(object_result, 1)
if object_result.errors:
print_schema_results(object_result, 1)
if file_result.fatal:
print_fatal_results(file_result.fatal, 1) | def function[print_file_results, parameter[file_result]]:
constant[Print the results of validating a file.
Args:
file_result: A FileValidationResults instance.
]
call[name[print_results_header], parameter[name[file_result].filepath, name[file_result].is_valid]]
for taget[name[object_result]] in starred[name[file_result].object_results] begin[:]
if name[object_result].warnings begin[:]
call[name[print_warning_results], parameter[name[object_result], constant[1]]]
if name[object_result].errors begin[:]
call[name[print_schema_results], parameter[name[object_result], constant[1]]]
if name[file_result].fatal begin[:]
call[name[print_fatal_results], parameter[name[file_result].fatal, constant[1]]] | keyword[def] identifier[print_file_results] ( identifier[file_result] ):
literal[string]
identifier[print_results_header] ( identifier[file_result] . identifier[filepath] , identifier[file_result] . identifier[is_valid] )
keyword[for] identifier[object_result] keyword[in] identifier[file_result] . identifier[object_results] :
keyword[if] identifier[object_result] . identifier[warnings] :
identifier[print_warning_results] ( identifier[object_result] , literal[int] )
keyword[if] identifier[object_result] . identifier[errors] :
identifier[print_schema_results] ( identifier[object_result] , literal[int] )
keyword[if] identifier[file_result] . identifier[fatal] :
identifier[print_fatal_results] ( identifier[file_result] . identifier[fatal] , literal[int] ) | def print_file_results(file_result):
"""Print the results of validating a file.
Args:
file_result: A FileValidationResults instance.
"""
print_results_header(file_result.filepath, file_result.is_valid)
for object_result in file_result.object_results:
if object_result.warnings:
print_warning_results(object_result, 1) # depends on [control=['if'], data=[]]
if object_result.errors:
print_schema_results(object_result, 1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['object_result']]
if file_result.fatal:
print_fatal_results(file_result.fatal, 1) # depends on [control=['if'], data=[]] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.