code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _create_extn_pattern(single_extn_symbols):
"""Helper initialiser method to create the regular-expression pattern to
match extensions, allowing the one-char extension symbols provided by
single_extn_symbols."""
# There are three regular expressions here. The first covers RFC 3966
# format, where the extension is added using ";ext=". The second more
# generic one starts with optional white space and ends with an optional
# full stop (.), followed by zero or more spaces/tabs/commas and then the
# numbers themselves. The other one covers the special case of American
# numbers where the extension is written with a hash at the end, such as
# "- 503#". Note that the only capturing groups should be around the
# digits that you want to capture as part of the extension, or else
# parsing will fail! Canonical-equivalence doesn't seem to be an option
# with Android java, so we allow two options for representing the accented
# o - the character itself, and one in the unicode decomposed form with
# the combining acute accent.
return (_RFC3966_EXTN_PREFIX + _CAPTURING_EXTN_DIGITS + u("|") +
u("[ \u00A0\\t,]*(?:e?xt(?:ensi(?:o\u0301?|\u00F3))?n?|") +
u("\uFF45?\uFF58\uFF54\uFF4E?|") +
u("\u0434\u043e\u0431|") + u("[") + single_extn_symbols + u("]|int|anexo|\uFF49\uFF4E\uFF54)") +
u("[:\\.\uFF0E]?[ \u00A0\\t,-]*") + _CAPTURING_EXTN_DIGITS + u("#?|") +
u("[- ]+(") + _DIGITS + u("{1,5})#")) | def function[_create_extn_pattern, parameter[single_extn_symbols]]:
constant[Helper initialiser method to create the regular-expression pattern to
match extensions, allowing the one-char extension symbols provided by
single_extn_symbols.]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[_RFC3966_EXTN_PREFIX] + name[_CAPTURING_EXTN_DIGITS]] + call[name[u], parameter[constant[|]]]] + call[name[u], parameter[constant[[ \t,]*(?:e?xt(?:ensi(?:ó?|ó))?n?|]]]] + call[name[u], parameter[constant[e?xtn?|]]]] + call[name[u], parameter[constant[доб|]]]] + call[name[u], parameter[constant[[]]]] + name[single_extn_symbols]] + call[name[u], parameter[constant[]|int|anexo|int)]]]] + call[name[u], parameter[constant[[:\..]?[ \t,-]*]]]] + name[_CAPTURING_EXTN_DIGITS]] + call[name[u], parameter[constant[#?|]]]] + call[name[u], parameter[constant[[- ]+(]]]] + name[_DIGITS]] + call[name[u], parameter[constant[{1,5})#]]]]] | keyword[def] identifier[_create_extn_pattern] ( identifier[single_extn_symbols] ):
literal[string]
keyword[return] ( identifier[_RFC3966_EXTN_PREFIX] + identifier[_CAPTURING_EXTN_DIGITS] + identifier[u] ( literal[string] )+
identifier[u] ( literal[string] )+
identifier[u] ( literal[string] )+
identifier[u] ( literal[string] )+ identifier[u] ( literal[string] )+ identifier[single_extn_symbols] + identifier[u] ( literal[string] )+
identifier[u] ( literal[string] )+ identifier[_CAPTURING_EXTN_DIGITS] + identifier[u] ( literal[string] )+
identifier[u] ( literal[string] )+ identifier[_DIGITS] + identifier[u] ( literal[string] )) | def _create_extn_pattern(single_extn_symbols):
"""Helper initialiser method to create the regular-expression pattern to
match extensions, allowing the one-char extension symbols provided by
single_extn_symbols."""
# There are three regular expressions here. The first covers RFC 3966
# format, where the extension is added using ";ext=". The second more
# generic one starts with optional white space and ends with an optional
# full stop (.), followed by zero or more spaces/tabs/commas and then the
# numbers themselves. The other one covers the special case of American
# numbers where the extension is written with a hash at the end, such as
# "- 503#". Note that the only capturing groups should be around the
# digits that you want to capture as part of the extension, or else
# parsing will fail! Canonical-equivalence doesn't seem to be an option
# with Android java, so we allow two options for representing the accented
# o - the character itself, and one in the unicode decomposed form with
# the combining acute accent.
return _RFC3966_EXTN_PREFIX + _CAPTURING_EXTN_DIGITS + u('|') + u('[ \xa0\\t,]*(?:e?xt(?:ensi(?:ó?|ó))?n?|') + u('e?xtn?|') + u('доб|') + u('[') + single_extn_symbols + u(']|int|anexo|int)') + u('[:\\..]?[ \xa0\\t,-]*') + _CAPTURING_EXTN_DIGITS + u('#?|') + u('[- ]+(') + _DIGITS + u('{1,5})#') |
def zero_state(self, batch_size, dtype=LayersConfig.tf_dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
Returns:
tensor of shape '[batch_size x shape[0] x shape[1] x num_features]
filled with zeros
"""
shape = self.shape
num_features = self.num_features
# TODO : TypeError: 'NoneType' object is not subscriptable
zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype)
return zeros | def function[zero_state, parameter[self, batch_size, dtype]]:
constant[Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
Returns:
tensor of shape '[batch_size x shape[0] x shape[1] x num_features]
filled with zeros
]
variable[shape] assign[=] name[self].shape
variable[num_features] assign[=] name[self].num_features
variable[zeros] assign[=] call[name[tf].zeros, parameter[list[[<ast.Name object at 0x7da2044c07c0>, <ast.Subscript object at 0x7da2044c0340>, <ast.Subscript object at 0x7da2044c2f20>, <ast.BinOp object at 0x7da2044c1c60>]]]]
return[name[zeros]] | keyword[def] identifier[zero_state] ( identifier[self] , identifier[batch_size] , identifier[dtype] = identifier[LayersConfig] . identifier[tf_dtype] ):
literal[string]
identifier[shape] = identifier[self] . identifier[shape]
identifier[num_features] = identifier[self] . identifier[num_features]
identifier[zeros] = identifier[tf] . identifier[zeros] ([ identifier[batch_size] , identifier[shape] [ literal[int] ], identifier[shape] [ literal[int] ], identifier[num_features] * literal[int] ], identifier[dtype] = identifier[dtype] )
keyword[return] identifier[zeros] | def zero_state(self, batch_size, dtype=LayersConfig.tf_dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
Returns:
tensor of shape '[batch_size x shape[0] x shape[1] x num_features]
filled with zeros
"""
shape = self.shape
num_features = self.num_features
# TODO : TypeError: 'NoneType' object is not subscriptable
zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype)
return zeros |
def run_noCall_hetero_snps(in_prefix, in_type, out_prefix, base_dir, options):
"""Runs step 3 (clean no call and hetero).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``tfile``).
This function calls the
:py:mod:`pyGenClean.NoCallHetero.clean_noCall_hetero_snps` module. The
required file type for this module is ``tfile``, hence the need to use the
:py:func:`check_input_files` to check if the file input file type is the
good one, or to create it if needed.
"""
# Creating the output directory
os.mkdir(out_prefix)
# We know we need a tfile
required_type = "tfile"
check_input_files(in_prefix, in_type, required_type)
# We need to inject the name of the input file and the name of the output
# prefix
script_prefix = os.path.join(out_prefix, "clean_noCall_hetero")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
# We run the script
try:
noCall_hetero_snps.main(options)
except noCall_hetero_snps.ProgramError as e:
msg = "noCall_hetero_snps: {}".format(e)
raise ProgramError(msg)
# We want to save in a file the markers and samples that were removed
# There are two files to look at, which contains only one row, the name of
# the markers:
# - prefix.allFailed
# - prefix.allHetero
nb_all_failed = 0
nb_all_hetero = 0
o_filename = os.path.join(base_dir, "excluded_markers.txt")
with open(o_filename, "a") as o_file:
# The first file
i_filename = script_prefix + ".allFailed"
if os.path.isfile(i_filename):
with open(i_filename, "r") as i_file:
for line in i_file:
nb_all_failed += 1
print >>o_file, line.rstrip("\r\n") + "\tall failed"
# The second file
i_filename = os.path.join(script_prefix + ".allHetero")
if os.path.isfile(i_filename):
with open(i_filename, "r") as i_file:
for line in i_file:
nb_all_hetero += 1
print >>o_file, line.rstrip("\r\n") + "\tall hetero"
# We write a LaTeX summary
latex_file = os.path.join(script_prefix + ".summary.tex")
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(
noCall_hetero_snps.pretty_name,
)
text = (
"After scrutiny, {:,d} marker{} {} excluded from the "
"dataset because of a call rate of 0. Also, {:,d} marker{} "
"{} excluded from the dataset because all samples were "
"heterozygous (excluding the mitochondrial "
"chromosome)".format(nb_all_failed,
"s" if nb_all_failed > 1 else "",
"were" if nb_all_failed > 1 else "was",
nb_all_hetero,
"s" if nb_all_hetero > 1 else "",
"were" if nb_all_hetero > 1 else "was")
)
print >>o_file, latex_template.wrap_lines(text, 80)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
# Writing the summary results
with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file:
print >>o_file, "# {}".format(script_prefix)
print >>o_file, ("Number of completely failed markers\t"
"{nb:,d}\t-{nb:,d}".format(nb=nb_all_failed))
print >>o_file, "---"
print >>o_file, ("Number of all heterozygous markers\t"
"{nb:,d}\t-{nb:,d}".format(nb=nb_all_hetero))
print >>o_file, "---"
# We know this step does produce a new data set (tfile), so we return it
# along with the report name
return _StepResult(
next_file=os.path.join(out_prefix, "clean_noCall_hetero"),
next_file_type="tfile",
latex_summary=latex_file,
description=noCall_hetero_snps.desc,
long_description=noCall_hetero_snps.long_desc,
graph_path=None,
) | def function[run_noCall_hetero_snps, parameter[in_prefix, in_type, out_prefix, base_dir, options]]:
constant[Runs step 3 (clean no call and hetero).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``tfile``).
This function calls the
:py:mod:`pyGenClean.NoCallHetero.clean_noCall_hetero_snps` module. The
required file type for this module is ``tfile``, hence the need to use the
:py:func:`check_input_files` to check if the file input file type is the
good one, or to create it if needed.
]
call[name[os].mkdir, parameter[name[out_prefix]]]
variable[required_type] assign[=] constant[tfile]
call[name[check_input_files], parameter[name[in_prefix], name[in_type], name[required_type]]]
variable[script_prefix] assign[=] call[name[os].path.join, parameter[name[out_prefix], constant[clean_noCall_hetero]]]
<ast.AugAssign object at 0x7da1b0a7a590>
<ast.Try object at 0x7da1b0a78d00>
variable[nb_all_failed] assign[=] constant[0]
variable[nb_all_hetero] assign[=] constant[0]
variable[o_filename] assign[=] call[name[os].path.join, parameter[name[base_dir], constant[excluded_markers.txt]]]
with call[name[open], parameter[name[o_filename], constant[a]]] begin[:]
variable[i_filename] assign[=] binary_operation[name[script_prefix] + constant[.allFailed]]
if call[name[os].path.isfile, parameter[name[i_filename]]] begin[:]
with call[name[open], parameter[name[i_filename], constant[r]]] begin[:]
for taget[name[line]] in starred[name[i_file]] begin[:]
<ast.AugAssign object at 0x7da1b094a4a0>
tuple[[<ast.BinOp object at 0x7da1b094ad10>, <ast.BinOp object at 0x7da1b094aec0>]]
variable[i_filename] assign[=] call[name[os].path.join, parameter[binary_operation[name[script_prefix] + constant[.allHetero]]]]
if call[name[os].path.isfile, parameter[name[i_filename]]] begin[:]
with call[name[open], parameter[name[i_filename], constant[r]]] begin[:]
for taget[name[line]] in starred[name[i_file]] begin[:]
<ast.AugAssign object at 0x7da1b09481f0>
tuple[[<ast.BinOp object at 0x7da1b09497b0>, <ast.BinOp object at 0x7da1b094a710>]]
variable[latex_file] assign[=] call[name[os].path.join, parameter[binary_operation[name[script_prefix] + constant[.summary.tex]]]]
<ast.Try object at 0x7da1b094b550>
with call[name[open], parameter[call[name[os].path.join, parameter[name[base_dir], constant[results_summary.txt]]], constant[a]]] begin[:]
tuple[[<ast.BinOp object at 0x7da1b09766e0>, <ast.Call object at 0x7da1b0976d10>]]
tuple[[<ast.BinOp object at 0x7da1b0976950>, <ast.Call object at 0x7da1b0976ad0>]]
tuple[[<ast.BinOp object at 0x7da1b0976a40>, <ast.Constant object at 0x7da1b0976ec0>]]
tuple[[<ast.BinOp object at 0x7da1b0976e30>, <ast.Call object at 0x7da1b0976da0>]]
tuple[[<ast.BinOp object at 0x7da1b0977550>, <ast.Constant object at 0x7da1b0977460>]]
return[call[name[_StepResult], parameter[]]] | keyword[def] identifier[run_noCall_hetero_snps] ( identifier[in_prefix] , identifier[in_type] , identifier[out_prefix] , identifier[base_dir] , identifier[options] ):
literal[string]
identifier[os] . identifier[mkdir] ( identifier[out_prefix] )
identifier[required_type] = literal[string]
identifier[check_input_files] ( identifier[in_prefix] , identifier[in_type] , identifier[required_type] )
identifier[script_prefix] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_prefix] , literal[string] )
identifier[options] +=[ literal[string] . identifier[format] ( identifier[required_type] ), identifier[in_prefix] ,
literal[string] , identifier[script_prefix] ]
keyword[try] :
identifier[noCall_hetero_snps] . identifier[main] ( identifier[options] )
keyword[except] identifier[noCall_hetero_snps] . identifier[ProgramError] keyword[as] identifier[e] :
identifier[msg] = literal[string] . identifier[format] ( identifier[e] )
keyword[raise] identifier[ProgramError] ( identifier[msg] )
identifier[nb_all_failed] = literal[int]
identifier[nb_all_hetero] = literal[int]
identifier[o_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , literal[string] )
keyword[with] identifier[open] ( identifier[o_filename] , literal[string] ) keyword[as] identifier[o_file] :
identifier[i_filename] = identifier[script_prefix] + literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[i_filename] ):
keyword[with] identifier[open] ( identifier[i_filename] , literal[string] ) keyword[as] identifier[i_file] :
keyword[for] identifier[line] keyword[in] identifier[i_file] :
identifier[nb_all_failed] += literal[int]
identifier[print] >> identifier[o_file] , identifier[line] . identifier[rstrip] ( literal[string] )+ literal[string]
identifier[i_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[script_prefix] + literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[i_filename] ):
keyword[with] identifier[open] ( identifier[i_filename] , literal[string] ) keyword[as] identifier[i_file] :
keyword[for] identifier[line] keyword[in] identifier[i_file] :
identifier[nb_all_hetero] += literal[int]
identifier[print] >> identifier[o_file] , identifier[line] . identifier[rstrip] ( literal[string] )+ literal[string]
identifier[latex_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[script_prefix] + literal[string] )
keyword[try] :
keyword[with] identifier[open] ( identifier[latex_file] , literal[string] ) keyword[as] identifier[o_file] :
identifier[print] >> identifier[o_file] , identifier[latex_template] . identifier[subsection] (
identifier[noCall_hetero_snps] . identifier[pretty_name] ,
)
identifier[text] =(
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[nb_all_failed] ,
literal[string] keyword[if] identifier[nb_all_failed] > literal[int] keyword[else] literal[string] ,
literal[string] keyword[if] identifier[nb_all_failed] > literal[int] keyword[else] literal[string] ,
identifier[nb_all_hetero] ,
literal[string] keyword[if] identifier[nb_all_hetero] > literal[int] keyword[else] literal[string] ,
literal[string] keyword[if] identifier[nb_all_hetero] > literal[int] keyword[else] literal[string] )
)
identifier[print] >> identifier[o_file] , identifier[latex_template] . identifier[wrap_lines] ( identifier[text] , literal[int] )
keyword[except] identifier[IOError] :
identifier[msg] = literal[string] . identifier[format] ( identifier[latex_file] )
keyword[raise] identifier[ProgramError] ( identifier[msg] )
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , literal[string] ), literal[string] ) keyword[as] identifier[o_file] :
identifier[print] >> identifier[o_file] , literal[string] . identifier[format] ( identifier[script_prefix] )
identifier[print] >> identifier[o_file] ,( literal[string]
literal[string] . identifier[format] ( identifier[nb] = identifier[nb_all_failed] ))
identifier[print] >> identifier[o_file] , literal[string]
identifier[print] >> identifier[o_file] ,( literal[string]
literal[string] . identifier[format] ( identifier[nb] = identifier[nb_all_hetero] ))
identifier[print] >> identifier[o_file] , literal[string]
keyword[return] identifier[_StepResult] (
identifier[next_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_prefix] , literal[string] ),
identifier[next_file_type] = literal[string] ,
identifier[latex_summary] = identifier[latex_file] ,
identifier[description] = identifier[noCall_hetero_snps] . identifier[desc] ,
identifier[long_description] = identifier[noCall_hetero_snps] . identifier[long_desc] ,
identifier[graph_path] = keyword[None] ,
) | def run_noCall_hetero_snps(in_prefix, in_type, out_prefix, base_dir, options):
"""Runs step 3 (clean no call and hetero).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``tfile``).
This function calls the
:py:mod:`pyGenClean.NoCallHetero.clean_noCall_hetero_snps` module. The
required file type for this module is ``tfile``, hence the need to use the
:py:func:`check_input_files` to check if the file input file type is the
good one, or to create it if needed.
"""
# Creating the output directory
os.mkdir(out_prefix)
# We know we need a tfile
required_type = 'tfile'
check_input_files(in_prefix, in_type, required_type)
# We need to inject the name of the input file and the name of the output
# prefix
script_prefix = os.path.join(out_prefix, 'clean_noCall_hetero')
options += ['--{}'.format(required_type), in_prefix, '--out', script_prefix]
# We run the script
try:
noCall_hetero_snps.main(options) # depends on [control=['try'], data=[]]
except noCall_hetero_snps.ProgramError as e:
msg = 'noCall_hetero_snps: {}'.format(e)
raise ProgramError(msg) # depends on [control=['except'], data=['e']]
# We want to save in a file the markers and samples that were removed
# There are two files to look at, which contains only one row, the name of
# the markers:
# - prefix.allFailed
# - prefix.allHetero
nb_all_failed = 0
nb_all_hetero = 0
o_filename = os.path.join(base_dir, 'excluded_markers.txt')
with open(o_filename, 'a') as o_file:
# The first file
i_filename = script_prefix + '.allFailed'
if os.path.isfile(i_filename):
with open(i_filename, 'r') as i_file:
for line in i_file:
nb_all_failed += 1
(print >> o_file, line.rstrip('\r\n') + '\tall failed') # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['i_file']] # depends on [control=['if'], data=[]]
# The second file
i_filename = os.path.join(script_prefix + '.allHetero')
if os.path.isfile(i_filename):
with open(i_filename, 'r') as i_file:
for line in i_file:
nb_all_hetero += 1
(print >> o_file, line.rstrip('\r\n') + '\tall hetero') # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['i_file']] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['open', 'o_file']]
# We write a LaTeX summary
latex_file = os.path.join(script_prefix + '.summary.tex')
try:
with open(latex_file, 'w') as o_file:
(print >> o_file, latex_template.subsection(noCall_hetero_snps.pretty_name))
text = 'After scrutiny, {:,d} marker{} {} excluded from the dataset because of a call rate of 0. Also, {:,d} marker{} {} excluded from the dataset because all samples were heterozygous (excluding the mitochondrial chromosome)'.format(nb_all_failed, 's' if nb_all_failed > 1 else '', 'were' if nb_all_failed > 1 else 'was', nb_all_hetero, 's' if nb_all_hetero > 1 else '', 'were' if nb_all_hetero > 1 else 'was')
(print >> o_file, latex_template.wrap_lines(text, 80)) # depends on [control=['with'], data=['o_file']] # depends on [control=['try'], data=[]]
except IOError:
msg = '{}: cannot write LaTeX summary'.format(latex_file)
raise ProgramError(msg) # depends on [control=['except'], data=[]]
# Writing the summary results
with open(os.path.join(base_dir, 'results_summary.txt'), 'a') as o_file:
(print >> o_file, '# {}'.format(script_prefix))
(print >> o_file, 'Number of completely failed markers\t{nb:,d}\t-{nb:,d}'.format(nb=nb_all_failed))
(print >> o_file, '---')
(print >> o_file, 'Number of all heterozygous markers\t{nb:,d}\t-{nb:,d}'.format(nb=nb_all_hetero))
(print >> o_file, '---') # depends on [control=['with'], data=['o_file']]
# We know this step does produce a new data set (tfile), so we return it
# along with the report name
return _StepResult(next_file=os.path.join(out_prefix, 'clean_noCall_hetero'), next_file_type='tfile', latex_summary=latex_file, description=noCall_hetero_snps.desc, long_description=noCall_hetero_snps.long_desc, graph_path=None) |
def get_erasure_profile(service, name):
"""
:param service: six.string_types. The Ceph user name to run the command under
:param name:
:return:
"""
try:
out = check_output(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get',
name, '--format=json'])
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
except (CalledProcessError, OSError, ValueError):
return None | def function[get_erasure_profile, parameter[service, name]]:
constant[
:param service: six.string_types. The Ceph user name to run the command under
:param name:
:return:
]
<ast.Try object at 0x7da1b121b400> | keyword[def] identifier[get_erasure_profile] ( identifier[service] , identifier[name] ):
literal[string]
keyword[try] :
identifier[out] = identifier[check_output] ([ literal[string] , literal[string] , identifier[service] ,
literal[string] , literal[string] , literal[string] ,
identifier[name] , literal[string] ])
keyword[if] identifier[six] . identifier[PY3] :
identifier[out] = identifier[out] . identifier[decode] ( literal[string] )
keyword[return] identifier[json] . identifier[loads] ( identifier[out] )
keyword[except] ( identifier[CalledProcessError] , identifier[OSError] , identifier[ValueError] ):
keyword[return] keyword[None] | def get_erasure_profile(service, name):
"""
:param service: six.string_types. The Ceph user name to run the command under
:param name:
:return:
"""
try:
out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', name, '--format=json'])
if six.PY3:
out = out.decode('UTF-8') # depends on [control=['if'], data=[]]
return json.loads(out) # depends on [control=['try'], data=[]]
except (CalledProcessError, OSError, ValueError):
return None # depends on [control=['except'], data=[]] |
def _maybe_init_tags(self, run_id, tag_to_metadata):
"""Returns a tag-to-ID map for the given tags, creating rows if needed.
Args:
run_id: the ID of the run to which these tags belong.
tag_to_metadata: map of tag name to SummaryMetadata for the tag.
"""
cursor = self._db.cursor()
# TODO: for huge numbers of tags (e.g. 1000+), this is slower than just
# querying for the known tag names explicitly; find a better tradeoff.
cursor.execute('SELECT tag_name, tag_id FROM Tags WHERE run_id = ?',
(run_id,))
tag_to_id = {row[0]: row[1] for row in cursor.fetchall()
if row[0] in tag_to_metadata}
new_tag_data = []
for tag, metadata in six.iteritems(tag_to_metadata):
if tag not in tag_to_id:
tag_id = self._create_id()
tag_to_id[tag] = tag_id
new_tag_data.append((run_id, tag_id, tag, time.time(),
metadata.display_name,
metadata.plugin_data.plugin_name,
self._make_blob(metadata.plugin_data.content)))
cursor.executemany(
"""
INSERT INTO Tags (
run_id, tag_id, tag_name, inserted_time, display_name, plugin_name,
plugin_data
) VALUES (?, ?, ?, ?, ?, ?, ?)
""",
new_tag_data)
return tag_to_id | def function[_maybe_init_tags, parameter[self, run_id, tag_to_metadata]]:
constant[Returns a tag-to-ID map for the given tags, creating rows if needed.
Args:
run_id: the ID of the run to which these tags belong.
tag_to_metadata: map of tag name to SummaryMetadata for the tag.
]
variable[cursor] assign[=] call[name[self]._db.cursor, parameter[]]
call[name[cursor].execute, parameter[constant[SELECT tag_name, tag_id FROM Tags WHERE run_id = ?], tuple[[<ast.Name object at 0x7da1b216a260>]]]]
variable[tag_to_id] assign[=] <ast.DictComp object at 0x7da1b2168eb0>
variable[new_tag_data] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b2168070>, <ast.Name object at 0x7da1b216b370>]]] in starred[call[name[six].iteritems, parameter[name[tag_to_metadata]]]] begin[:]
if compare[name[tag] <ast.NotIn object at 0x7da2590d7190> name[tag_to_id]] begin[:]
variable[tag_id] assign[=] call[name[self]._create_id, parameter[]]
call[name[tag_to_id]][name[tag]] assign[=] name[tag_id]
call[name[new_tag_data].append, parameter[tuple[[<ast.Name object at 0x7da1b2168700>, <ast.Name object at 0x7da1b2168850>, <ast.Name object at 0x7da1b2168940>, <ast.Call object at 0x7da1b2168640>, <ast.Attribute object at 0x7da1b2169120>, <ast.Attribute object at 0x7da1b21688e0>, <ast.Call object at 0x7da1b2168f10>]]]]
call[name[cursor].executemany, parameter[constant[
INSERT INTO Tags (
run_id, tag_id, tag_name, inserted_time, display_name, plugin_name,
plugin_data
) VALUES (?, ?, ?, ?, ?, ?, ?)
], name[new_tag_data]]]
return[name[tag_to_id]] | keyword[def] identifier[_maybe_init_tags] ( identifier[self] , identifier[run_id] , identifier[tag_to_metadata] ):
literal[string]
identifier[cursor] = identifier[self] . identifier[_db] . identifier[cursor] ()
identifier[cursor] . identifier[execute] ( literal[string] ,
( identifier[run_id] ,))
identifier[tag_to_id] ={ identifier[row] [ literal[int] ]: identifier[row] [ literal[int] ] keyword[for] identifier[row] keyword[in] identifier[cursor] . identifier[fetchall] ()
keyword[if] identifier[row] [ literal[int] ] keyword[in] identifier[tag_to_metadata] }
identifier[new_tag_data] =[]
keyword[for] identifier[tag] , identifier[metadata] keyword[in] identifier[six] . identifier[iteritems] ( identifier[tag_to_metadata] ):
keyword[if] identifier[tag] keyword[not] keyword[in] identifier[tag_to_id] :
identifier[tag_id] = identifier[self] . identifier[_create_id] ()
identifier[tag_to_id] [ identifier[tag] ]= identifier[tag_id]
identifier[new_tag_data] . identifier[append] (( identifier[run_id] , identifier[tag_id] , identifier[tag] , identifier[time] . identifier[time] (),
identifier[metadata] . identifier[display_name] ,
identifier[metadata] . identifier[plugin_data] . identifier[plugin_name] ,
identifier[self] . identifier[_make_blob] ( identifier[metadata] . identifier[plugin_data] . identifier[content] )))
identifier[cursor] . identifier[executemany] (
literal[string] ,
identifier[new_tag_data] )
keyword[return] identifier[tag_to_id] | def _maybe_init_tags(self, run_id, tag_to_metadata):
"""Returns a tag-to-ID map for the given tags, creating rows if needed.
Args:
run_id: the ID of the run to which these tags belong.
tag_to_metadata: map of tag name to SummaryMetadata for the tag.
"""
cursor = self._db.cursor()
# TODO: for huge numbers of tags (e.g. 1000+), this is slower than just
# querying for the known tag names explicitly; find a better tradeoff.
cursor.execute('SELECT tag_name, tag_id FROM Tags WHERE run_id = ?', (run_id,))
tag_to_id = {row[0]: row[1] for row in cursor.fetchall() if row[0] in tag_to_metadata}
new_tag_data = []
for (tag, metadata) in six.iteritems(tag_to_metadata):
if tag not in tag_to_id:
tag_id = self._create_id()
tag_to_id[tag] = tag_id
new_tag_data.append((run_id, tag_id, tag, time.time(), metadata.display_name, metadata.plugin_data.plugin_name, self._make_blob(metadata.plugin_data.content))) # depends on [control=['if'], data=['tag', 'tag_to_id']] # depends on [control=['for'], data=[]]
cursor.executemany('\n INSERT INTO Tags (\n run_id, tag_id, tag_name, inserted_time, display_name, plugin_name,\n plugin_data\n ) VALUES (?, ?, ?, ?, ?, ?, ?)\n ', new_tag_data)
return tag_to_id |
def download_and_expand(self):
"""Download and expand RPM Python binding."""
top_dir_name = None
if self.git_branch:
# Download a source by git clone.
top_dir_name = self._download_and_expand_by_git()
else:
# Download a source from the arcihve URL.
# Downloading the compressed archive is better than "git clone",
# because it is faster.
# If download failed due to URL not found, try "git clone".
try:
top_dir_name = self._download_and_expand_from_archive_url()
except RemoteFileNotFoundError:
Log.info('Try to download by git clone.')
top_dir_name = self._download_and_expand_by_git()
return top_dir_name | def function[download_and_expand, parameter[self]]:
constant[Download and expand RPM Python binding.]
variable[top_dir_name] assign[=] constant[None]
if name[self].git_branch begin[:]
variable[top_dir_name] assign[=] call[name[self]._download_and_expand_by_git, parameter[]]
return[name[top_dir_name]] | keyword[def] identifier[download_and_expand] ( identifier[self] ):
literal[string]
identifier[top_dir_name] = keyword[None]
keyword[if] identifier[self] . identifier[git_branch] :
identifier[top_dir_name] = identifier[self] . identifier[_download_and_expand_by_git] ()
keyword[else] :
keyword[try] :
identifier[top_dir_name] = identifier[self] . identifier[_download_and_expand_from_archive_url] ()
keyword[except] identifier[RemoteFileNotFoundError] :
identifier[Log] . identifier[info] ( literal[string] )
identifier[top_dir_name] = identifier[self] . identifier[_download_and_expand_by_git] ()
keyword[return] identifier[top_dir_name] | def download_and_expand(self):
"""Download and expand RPM Python binding."""
top_dir_name = None
if self.git_branch:
# Download a source by git clone.
top_dir_name = self._download_and_expand_by_git() # depends on [control=['if'], data=[]]
else:
# Download a source from the arcihve URL.
# Downloading the compressed archive is better than "git clone",
# because it is faster.
# If download failed due to URL not found, try "git clone".
try:
top_dir_name = self._download_and_expand_from_archive_url() # depends on [control=['try'], data=[]]
except RemoteFileNotFoundError:
Log.info('Try to download by git clone.')
top_dir_name = self._download_and_expand_by_git() # depends on [control=['except'], data=[]]
return top_dir_name |
def generate_canonical_request(method, parsed_url, headers, signed_headers, content_sha256):
"""
Generate canonical request.
:param method: HTTP method.
:param parsed_url: Parsed url is input from :func:`urlsplit`
:param headers: HTTP header dictionary.
:param content_sha256: Content sha256 hexdigest string.
"""
lines = [method, parsed_url.path, parsed_url.query]
# Headers added to canonical request.
header_lines = []
for header in signed_headers:
value = headers[header.title()]
value = str(value).strip()
header_lines.append(header + ':' + str(value))
lines = lines + header_lines
lines.append('')
lines.append(';'.join(signed_headers))
lines.append(content_sha256)
return '\n'.join(lines) | def function[generate_canonical_request, parameter[method, parsed_url, headers, signed_headers, content_sha256]]:
constant[
Generate canonical request.
:param method: HTTP method.
:param parsed_url: Parsed url is input from :func:`urlsplit`
:param headers: HTTP header dictionary.
:param content_sha256: Content sha256 hexdigest string.
]
variable[lines] assign[=] list[[<ast.Name object at 0x7da1b1da10f0>, <ast.Attribute object at 0x7da1b22e8af0>, <ast.Attribute object at 0x7da1b22e8b20>]]
variable[header_lines] assign[=] list[[]]
for taget[name[header]] in starred[name[signed_headers]] begin[:]
variable[value] assign[=] call[name[headers]][call[name[header].title, parameter[]]]
variable[value] assign[=] call[call[name[str], parameter[name[value]]].strip, parameter[]]
call[name[header_lines].append, parameter[binary_operation[binary_operation[name[header] + constant[:]] + call[name[str], parameter[name[value]]]]]]
variable[lines] assign[=] binary_operation[name[lines] + name[header_lines]]
call[name[lines].append, parameter[constant[]]]
call[name[lines].append, parameter[call[constant[;].join, parameter[name[signed_headers]]]]]
call[name[lines].append, parameter[name[content_sha256]]]
return[call[constant[
].join, parameter[name[lines]]]] | keyword[def] identifier[generate_canonical_request] ( identifier[method] , identifier[parsed_url] , identifier[headers] , identifier[signed_headers] , identifier[content_sha256] ):
literal[string]
identifier[lines] =[ identifier[method] , identifier[parsed_url] . identifier[path] , identifier[parsed_url] . identifier[query] ]
identifier[header_lines] =[]
keyword[for] identifier[header] keyword[in] identifier[signed_headers] :
identifier[value] = identifier[headers] [ identifier[header] . identifier[title] ()]
identifier[value] = identifier[str] ( identifier[value] ). identifier[strip] ()
identifier[header_lines] . identifier[append] ( identifier[header] + literal[string] + identifier[str] ( identifier[value] ))
identifier[lines] = identifier[lines] + identifier[header_lines]
identifier[lines] . identifier[append] ( literal[string] )
identifier[lines] . identifier[append] ( literal[string] . identifier[join] ( identifier[signed_headers] ))
identifier[lines] . identifier[append] ( identifier[content_sha256] )
keyword[return] literal[string] . identifier[join] ( identifier[lines] ) | def generate_canonical_request(method, parsed_url, headers, signed_headers, content_sha256):
"""
Generate canonical request.
:param method: HTTP method.
:param parsed_url: Parsed url is input from :func:`urlsplit`
:param headers: HTTP header dictionary.
:param content_sha256: Content sha256 hexdigest string.
"""
lines = [method, parsed_url.path, parsed_url.query]
# Headers added to canonical request.
header_lines = []
for header in signed_headers:
value = headers[header.title()]
value = str(value).strip()
header_lines.append(header + ':' + str(value)) # depends on [control=['for'], data=['header']]
lines = lines + header_lines
lines.append('')
lines.append(';'.join(signed_headers))
lines.append(content_sha256)
return '\n'.join(lines) |
def get_subscriptions(self, limit=100, offset=0, params={}):
"""
Get all subscriptions
"""
url = self.SUBSCRIPTIONS_URL + "?limit=%s&offset=%s" % (limit, offset)
for key, value in params.items():
if key is 'ids':
value = ",".join(value)
url += '&%s=%s' % (key, value)
connection = Connection(self.token)
connection.set_url(self.production, url)
return connection.get_request() | def function[get_subscriptions, parameter[self, limit, offset, params]]:
constant[
Get all subscriptions
]
variable[url] assign[=] binary_operation[name[self].SUBSCRIPTIONS_URL + binary_operation[constant[?limit=%s&offset=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b12b55a0>, <ast.Name object at 0x7da1b12b4790>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b12b5b70>, <ast.Name object at 0x7da1b12b61d0>]]] in starred[call[name[params].items, parameter[]]] begin[:]
if compare[name[key] is constant[ids]] begin[:]
variable[value] assign[=] call[constant[,].join, parameter[name[value]]]
<ast.AugAssign object at 0x7da1b1152590>
variable[connection] assign[=] call[name[Connection], parameter[name[self].token]]
call[name[connection].set_url, parameter[name[self].production, name[url]]]
return[call[name[connection].get_request, parameter[]]] | keyword[def] identifier[get_subscriptions] ( identifier[self] , identifier[limit] = literal[int] , identifier[offset] = literal[int] , identifier[params] ={}):
literal[string]
identifier[url] = identifier[self] . identifier[SUBSCRIPTIONS_URL] + literal[string] %( identifier[limit] , identifier[offset] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[params] . identifier[items] ():
keyword[if] identifier[key] keyword[is] literal[string] :
identifier[value] = literal[string] . identifier[join] ( identifier[value] )
identifier[url] += literal[string] %( identifier[key] , identifier[value] )
identifier[connection] = identifier[Connection] ( identifier[self] . identifier[token] )
identifier[connection] . identifier[set_url] ( identifier[self] . identifier[production] , identifier[url] )
keyword[return] identifier[connection] . identifier[get_request] () | def get_subscriptions(self, limit=100, offset=0, params={}):
"""
Get all subscriptions
"""
url = self.SUBSCRIPTIONS_URL + '?limit=%s&offset=%s' % (limit, offset)
for (key, value) in params.items():
if key is 'ids':
value = ','.join(value) # depends on [control=['if'], data=[]]
url += '&%s=%s' % (key, value) # depends on [control=['for'], data=[]]
connection = Connection(self.token)
connection.set_url(self.production, url)
return connection.get_request() |
def check_registered_trademark_symbol(text):
"""Use the registered trademark symbol instead of (R)."""
err = "typography.symbols.trademark"
msg = u"(R) is a goofy alphabetic approximation, use the symbol ®."
regex = "\([rR]\)"
return existence_check(
text, [regex], err, msg, max_errors=3, require_padding=False) | def function[check_registered_trademark_symbol, parameter[text]]:
constant[Use the registered trademark symbol instead of (R).]
variable[err] assign[=] constant[typography.symbols.trademark]
variable[msg] assign[=] constant[(R) is a goofy alphabetic approximation, use the symbol ®.]
variable[regex] assign[=] constant[\([rR]\)]
return[call[name[existence_check], parameter[name[text], list[[<ast.Name object at 0x7da20e956320>]], name[err], name[msg]]]] | keyword[def] identifier[check_registered_trademark_symbol] ( identifier[text] ):
literal[string]
identifier[err] = literal[string]
identifier[msg] = literal[string]
identifier[regex] = literal[string]
keyword[return] identifier[existence_check] (
identifier[text] ,[ identifier[regex] ], identifier[err] , identifier[msg] , identifier[max_errors] = literal[int] , identifier[require_padding] = keyword[False] ) | def check_registered_trademark_symbol(text):
"""Use the registered trademark symbol instead of (R)."""
err = 'typography.symbols.trademark'
msg = u'(R) is a goofy alphabetic approximation, use the symbol ®.'
regex = '\\([rR]\\)'
return existence_check(text, [regex], err, msg, max_errors=3, require_padding=False) |
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
allow_fill : bool, default None
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Series.take : Similar method for Series.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = pd.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``allow_fill==False`` to have negative indices mean indexing
from the right.
>>> cat.take([0, -1, -2], allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
Categories (3, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
dtype = self.dtype
if isna(fill_value):
fill_value = -1
elif allow_fill:
# convert user-provided `fill_value` to codes
if fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
msg = (
"'fill_value' ('{}') is not in this Categorical's "
"categories."
)
raise TypeError(msg.format(fill_value))
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = type(self).from_codes(codes, dtype=dtype)
return result | def function[take_nd, parameter[self, indexer, allow_fill, fill_value]]:
constant[
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
allow_fill : bool, default None
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Series.take : Similar method for Series.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = pd.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``allow_fill==False`` to have negative indices mean indexing
from the right.
>>> cat.take([0, -1, -2], allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
Categories (3, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
]
variable[indexer] assign[=] call[name[np].asarray, parameter[name[indexer]]]
if compare[name[allow_fill] is constant[None]] begin[:]
if call[compare[name[indexer] less[<] constant[0]].any, parameter[]] begin[:]
call[name[warn], parameter[name[_take_msg], name[FutureWarning]]]
variable[allow_fill] assign[=] constant[True]
variable[dtype] assign[=] name[self].dtype
if call[name[isna], parameter[name[fill_value]]] begin[:]
variable[fill_value] assign[=] <ast.UnaryOp object at 0x7da18c4cdcf0>
variable[codes] assign[=] call[name[take], parameter[name[self]._codes, name[indexer]]]
variable[result] assign[=] call[call[name[type], parameter[name[self]]].from_codes, parameter[name[codes]]]
return[name[result]] | keyword[def] identifier[take_nd] ( identifier[self] , identifier[indexer] , identifier[allow_fill] = keyword[None] , identifier[fill_value] = keyword[None] ):
literal[string]
identifier[indexer] = identifier[np] . identifier[asarray] ( identifier[indexer] , identifier[dtype] = identifier[np] . identifier[intp] )
keyword[if] identifier[allow_fill] keyword[is] keyword[None] :
keyword[if] ( identifier[indexer] < literal[int] ). identifier[any] ():
identifier[warn] ( identifier[_take_msg] , identifier[FutureWarning] , identifier[stacklevel] = literal[int] )
identifier[allow_fill] = keyword[True]
identifier[dtype] = identifier[self] . identifier[dtype]
keyword[if] identifier[isna] ( identifier[fill_value] ):
identifier[fill_value] =- literal[int]
keyword[elif] identifier[allow_fill] :
keyword[if] identifier[fill_value] keyword[in] identifier[self] . identifier[categories] :
identifier[fill_value] = identifier[self] . identifier[categories] . identifier[get_loc] ( identifier[fill_value] )
keyword[else] :
identifier[msg] =(
literal[string]
literal[string]
)
keyword[raise] identifier[TypeError] ( identifier[msg] . identifier[format] ( identifier[fill_value] ))
identifier[codes] = identifier[take] ( identifier[self] . identifier[_codes] , identifier[indexer] , identifier[allow_fill] = identifier[allow_fill] ,
identifier[fill_value] = identifier[fill_value] )
identifier[result] = identifier[type] ( identifier[self] ). identifier[from_codes] ( identifier[codes] , identifier[dtype] = identifier[dtype] )
keyword[return] identifier[result] | def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
allow_fill : bool, default None
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Series.take : Similar method for Series.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = pd.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``allow_fill==False`` to have negative indices mean indexing
from the right.
>>> cat.take([0, -1, -2], allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
Categories (3, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['allow_fill']]
dtype = self.dtype
if isna(fill_value):
fill_value = -1 # depends on [control=['if'], data=[]]
elif allow_fill:
# convert user-provided `fill_value` to codes
if fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value) # depends on [control=['if'], data=['fill_value']]
else:
msg = "'fill_value' ('{}') is not in this Categorical's categories."
raise TypeError(msg.format(fill_value)) # depends on [control=['if'], data=[]]
codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
result = type(self).from_codes(codes, dtype=dtype)
return result |
def rectangle(left, top, width, height, filled=False, thickness=1):
"""
Returns a generator that produces (x, y) tuples for a rectangle.
The `left` and `top` arguments are the x and y coordinates for the topleft corner of the square.
If `filled` is `True`, the interior points are also returned.
NOTE: The `thickness` argument is not yet implemented.
>>> list(rectangle(0, 0, 10, 4))
[(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0), (9, 0), (9, 1), (9, 2), (9, 3), (8, 3), (7, 3), (6, 3), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3), (0, 2), (0, 1)]
>>> drawPoints(rectangle(0, 0, 10, 4))
OOOOOOOOOO
O,,,,,,,,O
O,,,,,,,,O
OOOOOOOOOO
>>> drawPoints(rectangle(0, 0, 10, 4, filled=True))
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
"""
# Note: For perfomance, this function does not rely on line() to generate its points.
if thickness != 1:
# TODO - should the original left and top be for the thick border, or should thick borders go to the left and above of the left and top coordinates?
raise NotImplementedError('The pybresenham module is under development and the filled, thickness, and endcap parameters are not implemented. You can contribute at https://github.com/asweigart/pybresenham')
# Validate arguments
_checkForIntOrFloat(left)
_checkForIntOrFloat(top)
_checkForIntOrFloat(width)
_checkForIntOrFloat(height)
left, top, width, height = int(left), int(top), int(width), int(height)
if width < 1 or height < 1:
raise PyBresenhamException('width and height must be positive integers')
# Generate all the points.
if filled:
for y in range(top, top + height):
for x in range(left, left + width):
yield (x, y)
else:
# Note: The `- 1` adjustments here are to prevent duplicate coordinates of the corners being returned.
# Top side.
y = top
for x in range(left, left + width - 1):
yield (x, y)
# Right side.
x = left + width - 1
for y in range(top, top + height - 1):
yield (x, y)
# Bottom side.
y = top + height - 1
for x in range(left + width - 1, left, -1):
yield (x, y)
# Left side.
x = left
for y in range(top + height - 1, top, -1):
yield (x, y) | def function[rectangle, parameter[left, top, width, height, filled, thickness]]:
constant[
Returns a generator that produces (x, y) tuples for a rectangle.
The `left` and `top` arguments are the x and y coordinates for the topleft corner of the square.
If `filled` is `True`, the interior points are also returned.
NOTE: The `thickness` argument is not yet implemented.
>>> list(rectangle(0, 0, 10, 4))
[(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0), (9, 0), (9, 1), (9, 2), (9, 3), (8, 3), (7, 3), (6, 3), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3), (0, 2), (0, 1)]
>>> drawPoints(rectangle(0, 0, 10, 4))
OOOOOOOOOO
O,,,,,,,,O
O,,,,,,,,O
OOOOOOOOOO
>>> drawPoints(rectangle(0, 0, 10, 4, filled=True))
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
]
if compare[name[thickness] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da20c992200>
call[name[_checkForIntOrFloat], parameter[name[left]]]
call[name[_checkForIntOrFloat], parameter[name[top]]]
call[name[_checkForIntOrFloat], parameter[name[width]]]
call[name[_checkForIntOrFloat], parameter[name[height]]]
<ast.Tuple object at 0x7da20c991420> assign[=] tuple[[<ast.Call object at 0x7da20c9924a0>, <ast.Call object at 0x7da20c991060>, <ast.Call object at 0x7da20c9917b0>, <ast.Call object at 0x7da20c991cf0>]]
if <ast.BoolOp object at 0x7da20c991d50> begin[:]
<ast.Raise object at 0x7da20c991600>
if name[filled] begin[:]
for taget[name[y]] in starred[call[name[range], parameter[name[top], binary_operation[name[top] + name[height]]]]] begin[:]
for taget[name[x]] in starred[call[name[range], parameter[name[left], binary_operation[name[left] + name[width]]]]] begin[:]
<ast.Yield object at 0x7da20c990280> | keyword[def] identifier[rectangle] ( identifier[left] , identifier[top] , identifier[width] , identifier[height] , identifier[filled] = keyword[False] , identifier[thickness] = literal[int] ):
literal[string]
keyword[if] identifier[thickness] != literal[int] :
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[_checkForIntOrFloat] ( identifier[left] )
identifier[_checkForIntOrFloat] ( identifier[top] )
identifier[_checkForIntOrFloat] ( identifier[width] )
identifier[_checkForIntOrFloat] ( identifier[height] )
identifier[left] , identifier[top] , identifier[width] , identifier[height] = identifier[int] ( identifier[left] ), identifier[int] ( identifier[top] ), identifier[int] ( identifier[width] ), identifier[int] ( identifier[height] )
keyword[if] identifier[width] < literal[int] keyword[or] identifier[height] < literal[int] :
keyword[raise] identifier[PyBresenhamException] ( literal[string] )
keyword[if] identifier[filled] :
keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[top] , identifier[top] + identifier[height] ):
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[left] , identifier[left] + identifier[width] ):
keyword[yield] ( identifier[x] , identifier[y] )
keyword[else] :
identifier[y] = identifier[top]
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[left] , identifier[left] + identifier[width] - literal[int] ):
keyword[yield] ( identifier[x] , identifier[y] )
identifier[x] = identifier[left] + identifier[width] - literal[int]
keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[top] , identifier[top] + identifier[height] - literal[int] ):
keyword[yield] ( identifier[x] , identifier[y] )
identifier[y] = identifier[top] + identifier[height] - literal[int]
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[left] + identifier[width] - literal[int] , identifier[left] ,- literal[int] ):
keyword[yield] ( identifier[x] , identifier[y] )
identifier[x] = identifier[left]
keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[top] + identifier[height] - literal[int] , identifier[top] ,- literal[int] ):
keyword[yield] ( identifier[x] , identifier[y] ) | def rectangle(left, top, width, height, filled=False, thickness=1):
"""
Returns a generator that produces (x, y) tuples for a rectangle.
The `left` and `top` arguments are the x and y coordinates for the topleft corner of the square.
If `filled` is `True`, the interior points are also returned.
NOTE: The `thickness` argument is not yet implemented.
>>> list(rectangle(0, 0, 10, 4))
[(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0), (9, 0), (9, 1), (9, 2), (9, 3), (8, 3), (7, 3), (6, 3), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3), (0, 2), (0, 1)]
>>> drawPoints(rectangle(0, 0, 10, 4))
OOOOOOOOOO
O,,,,,,,,O
O,,,,,,,,O
OOOOOOOOOO
>>> drawPoints(rectangle(0, 0, 10, 4, filled=True))
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
OOOOOOOOOO
"""
# Note: For perfomance, this function does not rely on line() to generate its points.
if thickness != 1:
# TODO - should the original left and top be for the thick border, or should thick borders go to the left and above of the left and top coordinates?
raise NotImplementedError('The pybresenham module is under development and the filled, thickness, and endcap parameters are not implemented. You can contribute at https://github.com/asweigart/pybresenham') # depends on [control=['if'], data=[]]
# Validate arguments
_checkForIntOrFloat(left)
_checkForIntOrFloat(top)
_checkForIntOrFloat(width)
_checkForIntOrFloat(height)
(left, top, width, height) = (int(left), int(top), int(width), int(height))
if width < 1 or height < 1:
raise PyBresenhamException('width and height must be positive integers') # depends on [control=['if'], data=[]]
# Generate all the points.
if filled:
for y in range(top, top + height):
for x in range(left, left + width):
yield (x, y) # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['y']] # depends on [control=['if'], data=[]]
else:
# Note: The `- 1` adjustments here are to prevent duplicate coordinates of the corners being returned.
# Top side.
y = top
for x in range(left, left + width - 1):
yield (x, y) # depends on [control=['for'], data=['x']]
# Right side.
x = left + width - 1
for y in range(top, top + height - 1):
yield (x, y) # depends on [control=['for'], data=['y']]
# Bottom side.
y = top + height - 1
for x in range(left + width - 1, left, -1):
yield (x, y) # depends on [control=['for'], data=['x']]
# Left side.
x = left
for y in range(top + height - 1, top, -1):
yield (x, y) # depends on [control=['for'], data=['y']] |
def _mine_flush(self, load, skip_verify=False):
'''
Allow the minion to delete all of its own mine contents
'''
if not skip_verify and 'id' not in load:
return False
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
return self.cache.flush('minions/{0}'.format(load['id']), 'mine')
return True | def function[_mine_flush, parameter[self, load, skip_verify]]:
constant[
Allow the minion to delete all of its own mine contents
]
if <ast.BoolOp object at 0x7da207f03850> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da207f01150> begin[:]
return[call[name[self].cache.flush, parameter[call[constant[minions/{0}].format, parameter[call[name[load]][constant[id]]]], constant[mine]]]]
return[constant[True]] | keyword[def] identifier[_mine_flush] ( identifier[self] , identifier[load] , identifier[skip_verify] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[skip_verify] keyword[and] literal[string] keyword[not] keyword[in] identifier[load] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[opts] . identifier[get] ( literal[string] , keyword[False] ) keyword[or] identifier[self] . identifier[opts] . identifier[get] ( literal[string] , keyword[False] ):
keyword[return] identifier[self] . identifier[cache] . identifier[flush] ( literal[string] . identifier[format] ( identifier[load] [ literal[string] ]), literal[string] )
keyword[return] keyword[True] | def _mine_flush(self, load, skip_verify=False):
"""
Allow the minion to delete all of its own mine contents
"""
if not skip_verify and 'id' not in load:
return False # depends on [control=['if'], data=[]]
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
return self.cache.flush('minions/{0}'.format(load['id']), 'mine') # depends on [control=['if'], data=[]]
return True |
def add_bookmark(self, name, time, chan=''):
"""Add a new bookmark
Parameters
----------
name : str
name of the bookmark
time : (float, float)
float with start and end time in s
Raises
------
IndexError
When there is no selected rater
"""
try:
bookmarks = self.rater.find('bookmarks')
except AttributeError:
raise IndexError('You need to have at least one rater')
new_bookmark = SubElement(bookmarks, 'bookmark')
bookmark_name = SubElement(new_bookmark, 'bookmark_name')
bookmark_name.text = name
bookmark_time = SubElement(new_bookmark, 'bookmark_start')
bookmark_time.text = str(time[0])
bookmark_time = SubElement(new_bookmark, 'bookmark_end')
bookmark_time.text = str(time[1])
if isinstance(chan, (tuple, list)):
chan = ', '.join(chan)
event_chan = SubElement(new_bookmark, 'bookmark_chan')
event_chan.text = chan
self.save() | def function[add_bookmark, parameter[self, name, time, chan]]:
constant[Add a new bookmark
Parameters
----------
name : str
name of the bookmark
time : (float, float)
float with start and end time in s
Raises
------
IndexError
When there is no selected rater
]
<ast.Try object at 0x7da1b0d75a20>
variable[new_bookmark] assign[=] call[name[SubElement], parameter[name[bookmarks], constant[bookmark]]]
variable[bookmark_name] assign[=] call[name[SubElement], parameter[name[new_bookmark], constant[bookmark_name]]]
name[bookmark_name].text assign[=] name[name]
variable[bookmark_time] assign[=] call[name[SubElement], parameter[name[new_bookmark], constant[bookmark_start]]]
name[bookmark_time].text assign[=] call[name[str], parameter[call[name[time]][constant[0]]]]
variable[bookmark_time] assign[=] call[name[SubElement], parameter[name[new_bookmark], constant[bookmark_end]]]
name[bookmark_time].text assign[=] call[name[str], parameter[call[name[time]][constant[1]]]]
if call[name[isinstance], parameter[name[chan], tuple[[<ast.Name object at 0x7da1b0d758d0>, <ast.Name object at 0x7da1b0d772b0>]]]] begin[:]
variable[chan] assign[=] call[constant[, ].join, parameter[name[chan]]]
variable[event_chan] assign[=] call[name[SubElement], parameter[name[new_bookmark], constant[bookmark_chan]]]
name[event_chan].text assign[=] name[chan]
call[name[self].save, parameter[]] | keyword[def] identifier[add_bookmark] ( identifier[self] , identifier[name] , identifier[time] , identifier[chan] = literal[string] ):
literal[string]
keyword[try] :
identifier[bookmarks] = identifier[self] . identifier[rater] . identifier[find] ( literal[string] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[IndexError] ( literal[string] )
identifier[new_bookmark] = identifier[SubElement] ( identifier[bookmarks] , literal[string] )
identifier[bookmark_name] = identifier[SubElement] ( identifier[new_bookmark] , literal[string] )
identifier[bookmark_name] . identifier[text] = identifier[name]
identifier[bookmark_time] = identifier[SubElement] ( identifier[new_bookmark] , literal[string] )
identifier[bookmark_time] . identifier[text] = identifier[str] ( identifier[time] [ literal[int] ])
identifier[bookmark_time] = identifier[SubElement] ( identifier[new_bookmark] , literal[string] )
identifier[bookmark_time] . identifier[text] = identifier[str] ( identifier[time] [ literal[int] ])
keyword[if] identifier[isinstance] ( identifier[chan] ,( identifier[tuple] , identifier[list] )):
identifier[chan] = literal[string] . identifier[join] ( identifier[chan] )
identifier[event_chan] = identifier[SubElement] ( identifier[new_bookmark] , literal[string] )
identifier[event_chan] . identifier[text] = identifier[chan]
identifier[self] . identifier[save] () | def add_bookmark(self, name, time, chan=''):
"""Add a new bookmark
Parameters
----------
name : str
name of the bookmark
time : (float, float)
float with start and end time in s
Raises
------
IndexError
When there is no selected rater
"""
try:
bookmarks = self.rater.find('bookmarks') # depends on [control=['try'], data=[]]
except AttributeError:
raise IndexError('You need to have at least one rater') # depends on [control=['except'], data=[]]
new_bookmark = SubElement(bookmarks, 'bookmark')
bookmark_name = SubElement(new_bookmark, 'bookmark_name')
bookmark_name.text = name
bookmark_time = SubElement(new_bookmark, 'bookmark_start')
bookmark_time.text = str(time[0])
bookmark_time = SubElement(new_bookmark, 'bookmark_end')
bookmark_time.text = str(time[1])
if isinstance(chan, (tuple, list)):
chan = ', '.join(chan) # depends on [control=['if'], data=[]]
event_chan = SubElement(new_bookmark, 'bookmark_chan')
event_chan.text = chan
self.save() |
def traits(args):
"""
%prog traits directory
Make HTML page that reports eye and skin color.
"""
p = OptionParser(traits.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
samples = []
for folder in args:
targets = iglob(folder, "*-traits.json")
if not targets:
continue
filename = targets[0]
js = json.load(open(filename))
js["skin_rgb"] = make_rgb(
js["traits"]["skin-color"]["L"],
js["traits"]["skin-color"]["A"],
js["traits"]["skin-color"]["B"])
js["eye_rgb"] = make_rgb(
js["traits"]["eye-color"]["L"],
js["traits"]["eye-color"]["A"],
js["traits"]["eye-color"]["B"])
samples.append(js)
template = Template(traits_template)
fw = open("report.html", "w")
print(template.render(samples=samples), file=fw)
logging.debug("Report written to `{}`".format(fw.name))
fw.close() | def function[traits, parameter[args]]:
constant[
%prog traits directory
Make HTML page that reports eye and skin color.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[traits].__doc__]]
<ast.Tuple object at 0x7da20e954df0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] less[<] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da20e9572b0>]]
variable[samples] assign[=] list[[]]
for taget[name[folder]] in starred[name[args]] begin[:]
variable[targets] assign[=] call[name[iglob], parameter[name[folder], constant[*-traits.json]]]
if <ast.UnaryOp object at 0x7da20e9563e0> begin[:]
continue
variable[filename] assign[=] call[name[targets]][constant[0]]
variable[js] assign[=] call[name[json].load, parameter[call[name[open], parameter[name[filename]]]]]
call[name[js]][constant[skin_rgb]] assign[=] call[name[make_rgb], parameter[call[call[call[name[js]][constant[traits]]][constant[skin-color]]][constant[L]], call[call[call[name[js]][constant[traits]]][constant[skin-color]]][constant[A]], call[call[call[name[js]][constant[traits]]][constant[skin-color]]][constant[B]]]]
call[name[js]][constant[eye_rgb]] assign[=] call[name[make_rgb], parameter[call[call[call[name[js]][constant[traits]]][constant[eye-color]]][constant[L]], call[call[call[name[js]][constant[traits]]][constant[eye-color]]][constant[A]], call[call[call[name[js]][constant[traits]]][constant[eye-color]]][constant[B]]]]
call[name[samples].append, parameter[name[js]]]
variable[template] assign[=] call[name[Template], parameter[name[traits_template]]]
variable[fw] assign[=] call[name[open], parameter[constant[report.html], constant[w]]]
call[name[print], parameter[call[name[template].render, parameter[]]]]
call[name[logging].debug, parameter[call[constant[Report written to `{}`].format, parameter[name[fw].name]]]]
call[name[fw].close, parameter[]] | keyword[def] identifier[traits] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[traits] . identifier[__doc__] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[samples] =[]
keyword[for] identifier[folder] keyword[in] identifier[args] :
identifier[targets] = identifier[iglob] ( identifier[folder] , literal[string] )
keyword[if] keyword[not] identifier[targets] :
keyword[continue]
identifier[filename] = identifier[targets] [ literal[int] ]
identifier[js] = identifier[json] . identifier[load] ( identifier[open] ( identifier[filename] ))
identifier[js] [ literal[string] ]= identifier[make_rgb] (
identifier[js] [ literal[string] ][ literal[string] ][ literal[string] ],
identifier[js] [ literal[string] ][ literal[string] ][ literal[string] ],
identifier[js] [ literal[string] ][ literal[string] ][ literal[string] ])
identifier[js] [ literal[string] ]= identifier[make_rgb] (
identifier[js] [ literal[string] ][ literal[string] ][ literal[string] ],
identifier[js] [ literal[string] ][ literal[string] ][ literal[string] ],
identifier[js] [ literal[string] ][ literal[string] ][ literal[string] ])
identifier[samples] . identifier[append] ( identifier[js] )
identifier[template] = identifier[Template] ( identifier[traits_template] )
identifier[fw] = identifier[open] ( literal[string] , literal[string] )
identifier[print] ( identifier[template] . identifier[render] ( identifier[samples] = identifier[samples] ), identifier[file] = identifier[fw] )
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[fw] . identifier[name] ))
identifier[fw] . identifier[close] () | def traits(args):
"""
%prog traits directory
Make HTML page that reports eye and skin color.
"""
p = OptionParser(traits.__doc__)
(opts, args) = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
samples = []
for folder in args:
targets = iglob(folder, '*-traits.json')
if not targets:
continue # depends on [control=['if'], data=[]]
filename = targets[0]
js = json.load(open(filename))
js['skin_rgb'] = make_rgb(js['traits']['skin-color']['L'], js['traits']['skin-color']['A'], js['traits']['skin-color']['B'])
js['eye_rgb'] = make_rgb(js['traits']['eye-color']['L'], js['traits']['eye-color']['A'], js['traits']['eye-color']['B'])
samples.append(js) # depends on [control=['for'], data=['folder']]
template = Template(traits_template)
fw = open('report.html', 'w')
print(template.render(samples=samples), file=fw)
logging.debug('Report written to `{}`'.format(fw.name))
fw.close() |
async def state(gc: GroupControl):
"""Current group state."""
state = await gc.state()
click.echo(state)
click.echo("Full state info: %s" % repr(state)) | <ast.AsyncFunctionDef object at 0x7da2054a65f0> | keyword[async] keyword[def] identifier[state] ( identifier[gc] : identifier[GroupControl] ):
literal[string]
identifier[state] = keyword[await] identifier[gc] . identifier[state] ()
identifier[click] . identifier[echo] ( identifier[state] )
identifier[click] . identifier[echo] ( literal[string] % identifier[repr] ( identifier[state] )) | async def state(gc: GroupControl):
"""Current group state."""
state = await gc.state()
click.echo(state)
click.echo('Full state info: %s' % repr(state)) |
def ProduceEventSource(self, event_source):
"""Produces an event source.
Args:
event_source (EventSource): an event source.
Raises:
RuntimeError: when storage writer is not set.
"""
if not self._storage_writer:
raise RuntimeError('Storage writer not set.')
self._storage_writer.AddEventSource(event_source)
self._number_of_event_sources += 1
self.last_activity_timestamp = time.time() | def function[ProduceEventSource, parameter[self, event_source]]:
constant[Produces an event source.
Args:
event_source (EventSource): an event source.
Raises:
RuntimeError: when storage writer is not set.
]
if <ast.UnaryOp object at 0x7da1b1d75720> begin[:]
<ast.Raise object at 0x7da1b1d75750>
call[name[self]._storage_writer.AddEventSource, parameter[name[event_source]]]
<ast.AugAssign object at 0x7da1b1d766e0>
name[self].last_activity_timestamp assign[=] call[name[time].time, parameter[]] | keyword[def] identifier[ProduceEventSource] ( identifier[self] , identifier[event_source] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_storage_writer] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[self] . identifier[_storage_writer] . identifier[AddEventSource] ( identifier[event_source] )
identifier[self] . identifier[_number_of_event_sources] += literal[int]
identifier[self] . identifier[last_activity_timestamp] = identifier[time] . identifier[time] () | def ProduceEventSource(self, event_source):
"""Produces an event source.
Args:
event_source (EventSource): an event source.
Raises:
RuntimeError: when storage writer is not set.
"""
if not self._storage_writer:
raise RuntimeError('Storage writer not set.') # depends on [control=['if'], data=[]]
self._storage_writer.AddEventSource(event_source)
self._number_of_event_sources += 1
self.last_activity_timestamp = time.time() |
def LDRSB(self, params):
"""
LDRSB Ra, [Rb, Rc]
Load a byte from memory, sign extend, and put into Ra
Ra, Rb, and Rc must be low registers
"""
# TODO LDRSB cant use immediates
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
self.check_arguments(low_registers=(Ra, Rb, Rc))
def LDRSB_func():
# TODO does memory read up?
self.register[Ra] = 0
self.register[Ra] |= self.memory[self.register[Rb] + self.register[Rc]]
if self.register[Ra] & (1 << 7):
self.register[Ra] |= (0xFFFFFF << 8)
return LDRSB_func | def function[LDRSB, parameter[self, params]]:
constant[
LDRSB Ra, [Rb, Rc]
Load a byte from memory, sign extend, and put into Ra
Ra, Rb, and Rc must be low registers
]
<ast.Tuple object at 0x7da20c992260> assign[=] call[name[self].get_three_parameters, parameter[name[self].THREE_PARAMETER_WITH_BRACKETS, name[params]]]
call[name[self].check_arguments, parameter[]]
def function[LDRSB_func, parameter[]]:
call[name[self].register][name[Ra]] assign[=] constant[0]
<ast.AugAssign object at 0x7da20c991180>
if binary_operation[call[name[self].register][name[Ra]] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> constant[7]]] begin[:]
<ast.AugAssign object at 0x7da20c990250>
return[name[LDRSB_func]] | keyword[def] identifier[LDRSB] ( identifier[self] , identifier[params] ):
literal[string]
identifier[Ra] , identifier[Rb] , identifier[Rc] = identifier[self] . identifier[get_three_parameters] ( identifier[self] . identifier[THREE_PARAMETER_WITH_BRACKETS] , identifier[params] )
identifier[self] . identifier[check_arguments] ( identifier[low_registers] =( identifier[Ra] , identifier[Rb] , identifier[Rc] ))
keyword[def] identifier[LDRSB_func] ():
identifier[self] . identifier[register] [ identifier[Ra] ]= literal[int]
identifier[self] . identifier[register] [ identifier[Ra] ]|= identifier[self] . identifier[memory] [ identifier[self] . identifier[register] [ identifier[Rb] ]+ identifier[self] . identifier[register] [ identifier[Rc] ]]
keyword[if] identifier[self] . identifier[register] [ identifier[Ra] ]&( literal[int] << literal[int] ):
identifier[self] . identifier[register] [ identifier[Ra] ]|=( literal[int] << literal[int] )
keyword[return] identifier[LDRSB_func] | def LDRSB(self, params):
"""
LDRSB Ra, [Rb, Rc]
Load a byte from memory, sign extend, and put into Ra
Ra, Rb, and Rc must be low registers
"""
# TODO LDRSB cant use immediates
(Ra, Rb, Rc) = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
self.check_arguments(low_registers=(Ra, Rb, Rc))
def LDRSB_func():
# TODO does memory read up?
self.register[Ra] = 0
self.register[Ra] |= self.memory[self.register[Rb] + self.register[Rc]]
if self.register[Ra] & 1 << 7:
self.register[Ra] |= 16777215 << 8 # depends on [control=['if'], data=[]]
return LDRSB_func |
def _maybe_parse_macro(self):
"""Try to parse an macro (%scope/name)."""
if self._current_token.value != '%':
return False, None
location = self._current_location()
self._advance_one_token()
scoped_name = self._parse_selector(allow_periods_in_scope=True)
with utils.try_with_location(location):
macro = self._delegate.macro(scoped_name)
return True, macro | def function[_maybe_parse_macro, parameter[self]]:
constant[Try to parse an macro (%scope/name).]
if compare[name[self]._current_token.value not_equal[!=] constant[%]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b0314e80>, <ast.Constant object at 0x7da1b03166e0>]]]
variable[location] assign[=] call[name[self]._current_location, parameter[]]
call[name[self]._advance_one_token, parameter[]]
variable[scoped_name] assign[=] call[name[self]._parse_selector, parameter[]]
with call[name[utils].try_with_location, parameter[name[location]]] begin[:]
variable[macro] assign[=] call[name[self]._delegate.macro, parameter[name[scoped_name]]]
return[tuple[[<ast.Constant object at 0x7da1b0315660>, <ast.Name object at 0x7da1b0314e50>]]] | keyword[def] identifier[_maybe_parse_macro] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_current_token] . identifier[value] != literal[string] :
keyword[return] keyword[False] , keyword[None]
identifier[location] = identifier[self] . identifier[_current_location] ()
identifier[self] . identifier[_advance_one_token] ()
identifier[scoped_name] = identifier[self] . identifier[_parse_selector] ( identifier[allow_periods_in_scope] = keyword[True] )
keyword[with] identifier[utils] . identifier[try_with_location] ( identifier[location] ):
identifier[macro] = identifier[self] . identifier[_delegate] . identifier[macro] ( identifier[scoped_name] )
keyword[return] keyword[True] , identifier[macro] | def _maybe_parse_macro(self):
"""Try to parse an macro (%scope/name)."""
if self._current_token.value != '%':
return (False, None) # depends on [control=['if'], data=[]]
location = self._current_location()
self._advance_one_token()
scoped_name = self._parse_selector(allow_periods_in_scope=True)
with utils.try_with_location(location):
macro = self._delegate.macro(scoped_name) # depends on [control=['with'], data=[]]
return (True, macro) |
def uvw(self, context):
""" Supply UVW antenna coordinates to montblanc """
# Shape (ntime, na, 3)
(lt, ut), (la, ua), (l, u) = context.array_extents(context.name)
# Create empty UVW coordinates
data = np.empty(context.shape, context.dtype)
data[:,:,0] = np.arange(la+1, ua+1) # U = antenna index
data[:,:,1] = 0 # V = 0
data[:,:,2] = 0 # W = 0
return data | def function[uvw, parameter[self, context]]:
constant[ Supply UVW antenna coordinates to montblanc ]
<ast.Tuple object at 0x7da1b0f99e40> assign[=] call[name[context].array_extents, parameter[name[context].name]]
variable[data] assign[=] call[name[np].empty, parameter[name[context].shape, name[context].dtype]]
call[name[data]][tuple[[<ast.Slice object at 0x7da1b0f9a110>, <ast.Slice object at 0x7da1b0f992a0>, <ast.Constant object at 0x7da1b0f99810>]]] assign[=] call[name[np].arange, parameter[binary_operation[name[la] + constant[1]], binary_operation[name[ua] + constant[1]]]]
call[name[data]][tuple[[<ast.Slice object at 0x7da1b0f99a20>, <ast.Slice object at 0x7da1b0f99ae0>, <ast.Constant object at 0x7da1b0f990c0>]]] assign[=] constant[0]
call[name[data]][tuple[[<ast.Slice object at 0x7da1b0f9a3e0>, <ast.Slice object at 0x7da1b0f9abc0>, <ast.Constant object at 0x7da1b0f9a500>]]] assign[=] constant[0]
return[name[data]] | keyword[def] identifier[uvw] ( identifier[self] , identifier[context] ):
literal[string]
( identifier[lt] , identifier[ut] ),( identifier[la] , identifier[ua] ),( identifier[l] , identifier[u] )= identifier[context] . identifier[array_extents] ( identifier[context] . identifier[name] )
identifier[data] = identifier[np] . identifier[empty] ( identifier[context] . identifier[shape] , identifier[context] . identifier[dtype] )
identifier[data] [:,:, literal[int] ]= identifier[np] . identifier[arange] ( identifier[la] + literal[int] , identifier[ua] + literal[int] )
identifier[data] [:,:, literal[int] ]= literal[int]
identifier[data] [:,:, literal[int] ]= literal[int]
keyword[return] identifier[data] | def uvw(self, context):
""" Supply UVW antenna coordinates to montblanc """
# Shape (ntime, na, 3)
((lt, ut), (la, ua), (l, u)) = context.array_extents(context.name)
# Create empty UVW coordinates
data = np.empty(context.shape, context.dtype)
data[:, :, 0] = np.arange(la + 1, ua + 1) # U = antenna index
data[:, :, 1] = 0 # V = 0
data[:, :, 2] = 0 # W = 0
return data |
def pop_prefix(string: str):
"""Erases the prefix and returns it.
:throws IndexError: There is no prefix.
:return A set with two elements: 1- the prefix, 2- the type without it.
"""
result = string.split(Naming.TYPE_PREFIX)
if len(result) == 1:
result = string.split(Naming.RESOURCE_PREFIX)
if len(result) == 1:
raise IndexError()
return result | def function[pop_prefix, parameter[string]]:
constant[Erases the prefix and returns it.
:throws IndexError: There is no prefix.
:return A set with two elements: 1- the prefix, 2- the type without it.
]
variable[result] assign[=] call[name[string].split, parameter[name[Naming].TYPE_PREFIX]]
if compare[call[name[len], parameter[name[result]]] equal[==] constant[1]] begin[:]
variable[result] assign[=] call[name[string].split, parameter[name[Naming].RESOURCE_PREFIX]]
if compare[call[name[len], parameter[name[result]]] equal[==] constant[1]] begin[:]
<ast.Raise object at 0x7da18f720190>
return[name[result]] | keyword[def] identifier[pop_prefix] ( identifier[string] : identifier[str] ):
literal[string]
identifier[result] = identifier[string] . identifier[split] ( identifier[Naming] . identifier[TYPE_PREFIX] )
keyword[if] identifier[len] ( identifier[result] )== literal[int] :
identifier[result] = identifier[string] . identifier[split] ( identifier[Naming] . identifier[RESOURCE_PREFIX] )
keyword[if] identifier[len] ( identifier[result] )== literal[int] :
keyword[raise] identifier[IndexError] ()
keyword[return] identifier[result] | def pop_prefix(string: str):
"""Erases the prefix and returns it.
:throws IndexError: There is no prefix.
:return A set with two elements: 1- the prefix, 2- the type without it.
"""
result = string.split(Naming.TYPE_PREFIX)
if len(result) == 1:
result = string.split(Naming.RESOURCE_PREFIX)
if len(result) == 1:
raise IndexError() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return result |
def _home_del(self):
""" Deletes the line content before the cursor """
text = self.edit_text[self.edit_pos:]
self.set_edit_text(text)
self._home() | def function[_home_del, parameter[self]]:
constant[ Deletes the line content before the cursor ]
variable[text] assign[=] call[name[self].edit_text][<ast.Slice object at 0x7da20c6c7700>]
call[name[self].set_edit_text, parameter[name[text]]]
call[name[self]._home, parameter[]] | keyword[def] identifier[_home_del] ( identifier[self] ):
literal[string]
identifier[text] = identifier[self] . identifier[edit_text] [ identifier[self] . identifier[edit_pos] :]
identifier[self] . identifier[set_edit_text] ( identifier[text] )
identifier[self] . identifier[_home] () | def _home_del(self):
""" Deletes the line content before the cursor """
text = self.edit_text[self.edit_pos:]
self.set_edit_text(text)
self._home() |
def version_info(self):
"""
Returns API version information for the HMC.
This operation does not require authentication.
Returns:
:term:`HMC API version`: The HMC API version supported by the HMC.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ConnectionError`
"""
if self._api_version is None:
self.query_api_version()
return self._api_version['api-major-version'],\
self._api_version['api-minor-version'] | def function[version_info, parameter[self]]:
constant[
Returns API version information for the HMC.
This operation does not require authentication.
Returns:
:term:`HMC API version`: The HMC API version supported by the HMC.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ConnectionError`
]
if compare[name[self]._api_version is constant[None]] begin[:]
call[name[self].query_api_version, parameter[]]
return[tuple[[<ast.Subscript object at 0x7da18f720730>, <ast.Subscript object at 0x7da18f7201f0>]]] | keyword[def] identifier[version_info] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_api_version] keyword[is] keyword[None] :
identifier[self] . identifier[query_api_version] ()
keyword[return] identifier[self] . identifier[_api_version] [ literal[string] ], identifier[self] . identifier[_api_version] [ literal[string] ] | def version_info(self):
"""
Returns API version information for the HMC.
This operation does not require authentication.
Returns:
:term:`HMC API version`: The HMC API version supported by the HMC.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.ConnectionError`
"""
if self._api_version is None:
self.query_api_version() # depends on [control=['if'], data=[]]
return (self._api_version['api-major-version'], self._api_version['api-minor-version']) |
def set_global_config(path_dict_or_stream):
'''Set the global configuration.
Call this from `main()` with a file system path, stream
object, or a dict. Calling it repeatedly with the same path is
safe. Calling it with a different path or repeatedly with a
stream or dict requires an explicit call to :func:`clear_global_config`.
:param path_dict_or_stream: source of configuration
'''
path = None
mapping = None
stream = None
global _config_file_path
global _config_cache
if isinstance(path_dict_or_stream, string_types):
path = path_dict_or_stream
if _config_file_path and _config_file_path != path:
raise Exception('set_global_config(%r) differs from %r, '
'consider calling clear_global_config first' %
(path, _config_file_path))
_config_file_path = path
stream = open(path)
elif isinstance(path_dict_or_stream, collections.Mapping):
mapping = path_dict_or_stream
elif hasattr(path_dict_or_stream, 'read'):
stream = path_dict_or_stream
else:
raise Exception('set_global_config(%r) instead of a path, '
'mapping object, or stream open for reading' %
path_dict_or_stream)
if stream is not None:
mapping = yaml.load(stream, Loader)
_config_cache = mapping
# TODO: convert to frozen dict?
return _config_cache | def function[set_global_config, parameter[path_dict_or_stream]]:
constant[Set the global configuration.
Call this from `main()` with a file system path, stream
object, or a dict. Calling it repeatedly with the same path is
safe. Calling it with a different path or repeatedly with a
stream or dict requires an explicit call to :func:`clear_global_config`.
:param path_dict_or_stream: source of configuration
]
variable[path] assign[=] constant[None]
variable[mapping] assign[=] constant[None]
variable[stream] assign[=] constant[None]
<ast.Global object at 0x7da18eb547f0>
<ast.Global object at 0x7da18eb56980>
if call[name[isinstance], parameter[name[path_dict_or_stream], name[string_types]]] begin[:]
variable[path] assign[=] name[path_dict_or_stream]
if <ast.BoolOp object at 0x7da18eb56710> begin[:]
<ast.Raise object at 0x7da18eb54250>
variable[_config_file_path] assign[=] name[path]
variable[stream] assign[=] call[name[open], parameter[name[path]]]
if compare[name[stream] is_not constant[None]] begin[:]
variable[mapping] assign[=] call[name[yaml].load, parameter[name[stream], name[Loader]]]
variable[_config_cache] assign[=] name[mapping]
return[name[_config_cache]] | keyword[def] identifier[set_global_config] ( identifier[path_dict_or_stream] ):
literal[string]
identifier[path] = keyword[None]
identifier[mapping] = keyword[None]
identifier[stream] = keyword[None]
keyword[global] identifier[_config_file_path]
keyword[global] identifier[_config_cache]
keyword[if] identifier[isinstance] ( identifier[path_dict_or_stream] , identifier[string_types] ):
identifier[path] = identifier[path_dict_or_stream]
keyword[if] identifier[_config_file_path] keyword[and] identifier[_config_file_path] != identifier[path] :
keyword[raise] identifier[Exception] ( literal[string]
literal[string] %
( identifier[path] , identifier[_config_file_path] ))
identifier[_config_file_path] = identifier[path]
identifier[stream] = identifier[open] ( identifier[path] )
keyword[elif] identifier[isinstance] ( identifier[path_dict_or_stream] , identifier[collections] . identifier[Mapping] ):
identifier[mapping] = identifier[path_dict_or_stream]
keyword[elif] identifier[hasattr] ( identifier[path_dict_or_stream] , literal[string] ):
identifier[stream] = identifier[path_dict_or_stream]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string]
literal[string] %
identifier[path_dict_or_stream] )
keyword[if] identifier[stream] keyword[is] keyword[not] keyword[None] :
identifier[mapping] = identifier[yaml] . identifier[load] ( identifier[stream] , identifier[Loader] )
identifier[_config_cache] = identifier[mapping]
keyword[return] identifier[_config_cache] | def set_global_config(path_dict_or_stream):
"""Set the global configuration.
Call this from `main()` with a file system path, stream
object, or a dict. Calling it repeatedly with the same path is
safe. Calling it with a different path or repeatedly with a
stream or dict requires an explicit call to :func:`clear_global_config`.
:param path_dict_or_stream: source of configuration
"""
path = None
mapping = None
stream = None
global _config_file_path
global _config_cache
if isinstance(path_dict_or_stream, string_types):
path = path_dict_or_stream
if _config_file_path and _config_file_path != path:
raise Exception('set_global_config(%r) differs from %r, consider calling clear_global_config first' % (path, _config_file_path)) # depends on [control=['if'], data=[]]
_config_file_path = path
stream = open(path) # depends on [control=['if'], data=[]]
elif isinstance(path_dict_or_stream, collections.Mapping):
mapping = path_dict_or_stream # depends on [control=['if'], data=[]]
elif hasattr(path_dict_or_stream, 'read'):
stream = path_dict_or_stream # depends on [control=['if'], data=[]]
else:
raise Exception('set_global_config(%r) instead of a path, mapping object, or stream open for reading' % path_dict_or_stream)
if stream is not None:
mapping = yaml.load(stream, Loader) # depends on [control=['if'], data=['stream']]
_config_cache = mapping
# TODO: convert to frozen dict?
return _config_cache |
def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
all_level_names = set()
for var_name, var in variables.items():
if isinstance(var._data, PandasIndexAdapter):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append('%r (%s)' % (n, var_name))
if idx_level_names:
all_level_names.update(idx_level_names)
for k, v in level_names.items():
if k in variables:
v.append('(%s)' % k)
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = '\n'.join([', '.join(v) for v in duplicate_names])
raise ValueError('conflicting MultiIndex level name(s):\n%s'
% conflict_str)
# Check confliction between level names and dimensions GH:2299
for k, v in variables.items():
for d in v.dims:
if d in all_level_names:
raise ValueError('conflicting level / dimension names. {} '
'already exists as a level name.'.format(d)) | def function[assert_unique_multiindex_level_names, parameter[variables]]:
constant[Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
]
variable[level_names] assign[=] call[name[defaultdict], parameter[name[list]]]
variable[all_level_names] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da204345840>, <ast.Name object at 0x7da2043443d0>]]] in starred[call[name[variables].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[var]._data, name[PandasIndexAdapter]]] begin[:]
variable[idx_level_names] assign[=] call[name[var].to_index_variable, parameter[]].level_names
if compare[name[idx_level_names] is_not constant[None]] begin[:]
for taget[name[n]] in starred[name[idx_level_names]] begin[:]
call[call[name[level_names]][name[n]].append, parameter[binary_operation[constant[%r (%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204344340>, <ast.Name object at 0x7da204344190>]]]]]
if name[idx_level_names] begin[:]
call[name[all_level_names].update, parameter[name[idx_level_names]]]
for taget[tuple[[<ast.Name object at 0x7da2043450f0>, <ast.Name object at 0x7da204347550>]]] in starred[call[name[level_names].items, parameter[]]] begin[:]
if compare[name[k] in name[variables]] begin[:]
call[name[v].append, parameter[binary_operation[constant[(%s)] <ast.Mod object at 0x7da2590d6920> name[k]]]]
variable[duplicate_names] assign[=] <ast.ListComp object at 0x7da204347010>
if name[duplicate_names] begin[:]
variable[conflict_str] assign[=] call[constant[
].join, parameter[<ast.ListComp object at 0x7da204347070>]]
<ast.Raise object at 0x7da204346e90>
for taget[tuple[[<ast.Name object at 0x7da204344460>, <ast.Name object at 0x7da204344670>]]] in starred[call[name[variables].items, parameter[]]] begin[:]
for taget[name[d]] in starred[name[v].dims] begin[:]
if compare[name[d] in name[all_level_names]] begin[:]
<ast.Raise object at 0x7da204346230> | keyword[def] identifier[assert_unique_multiindex_level_names] ( identifier[variables] ):
literal[string]
identifier[level_names] = identifier[defaultdict] ( identifier[list] )
identifier[all_level_names] = identifier[set] ()
keyword[for] identifier[var_name] , identifier[var] keyword[in] identifier[variables] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[var] . identifier[_data] , identifier[PandasIndexAdapter] ):
identifier[idx_level_names] = identifier[var] . identifier[to_index_variable] (). identifier[level_names]
keyword[if] identifier[idx_level_names] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[n] keyword[in] identifier[idx_level_names] :
identifier[level_names] [ identifier[n] ]. identifier[append] ( literal[string] %( identifier[n] , identifier[var_name] ))
keyword[if] identifier[idx_level_names] :
identifier[all_level_names] . identifier[update] ( identifier[idx_level_names] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[level_names] . identifier[items] ():
keyword[if] identifier[k] keyword[in] identifier[variables] :
identifier[v] . identifier[append] ( literal[string] % identifier[k] )
identifier[duplicate_names] =[ identifier[v] keyword[for] identifier[v] keyword[in] identifier[level_names] . identifier[values] () keyword[if] identifier[len] ( identifier[v] )> literal[int] ]
keyword[if] identifier[duplicate_names] :
identifier[conflict_str] = literal[string] . identifier[join] ([ literal[string] . identifier[join] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[duplicate_names] ])
keyword[raise] identifier[ValueError] ( literal[string]
% identifier[conflict_str] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[variables] . identifier[items] ():
keyword[for] identifier[d] keyword[in] identifier[v] . identifier[dims] :
keyword[if] identifier[d] keyword[in] identifier[all_level_names] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[d] )) | def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
all_level_names = set()
for (var_name, var) in variables.items():
if isinstance(var._data, PandasIndexAdapter):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append('%r (%s)' % (n, var_name)) # depends on [control=['for'], data=['n']] # depends on [control=['if'], data=['idx_level_names']]
if idx_level_names:
all_level_names.update(idx_level_names) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
for (k, v) in level_names.items():
if k in variables:
v.append('(%s)' % k) # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=[]]
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = '\n'.join([', '.join(v) for v in duplicate_names])
raise ValueError('conflicting MultiIndex level name(s):\n%s' % conflict_str) # depends on [control=['if'], data=[]]
# Check confliction between level names and dimensions GH:2299
for (k, v) in variables.items():
for d in v.dims:
if d in all_level_names:
raise ValueError('conflicting level / dimension names. {} already exists as a level name.'.format(d)) # depends on [control=['if'], data=['d']] # depends on [control=['for'], data=['d']] # depends on [control=['for'], data=[]] |
def analyse_cache_dir(self, jobhandler=None, batchsize=1, **kwargs):
"""
Scan the cache directory and launch analysis for all unscored alignments
using associated task handler. KWargs are passed to the tree calculating
task managed by the TaskInterface in self.task_interface.
Example kwargs:
TreeCollectionTaskInterface: scale=1, guide_tree=None,
niters=10, keep_topology=False
RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1
FastTreeTaskInterface: ----- No kwargs
"""
if jobhandler is None:
jobhandler = SequentialJobHandler()
files = glob.glob(os.path.join(self.cache_dir, '*.phy'))
#logger.debug('Files - {}'.format(files))
records = []
outfiles = []
dna = self.collection[0].is_dna() # THIS IS ONLY A GUESS AT SEQ TYPE!!
for infile in files:
id_ = fileIO.strip_extensions(infile)
outfile = self.get_result_file(id_)
#logger.debug('Looking for {}: {}'.format(outfile, os.path.exists(outfile)))
if not os.path.exists(outfile):
record = Alignment(infile, 'phylip', True)
records.append(record)
outfiles.append(outfile)
if len(records) == 0:
return []
args, to_delete = self.task_interface.scrape_args(records, outfiles=outfiles, **kwargs)
# logger.debug('Args - {}'.format(args))
with fileIO.TempFileList(to_delete):
result = jobhandler(self.task_interface.get_task(), args, 'Cache dir analysis', batchsize)
for (out, res) in zip(outfiles, result):
if not os.path.exists(out) and res:
with open(out, 'w') as outfl:
json.dump(res, outfl)
return result | def function[analyse_cache_dir, parameter[self, jobhandler, batchsize]]:
constant[
Scan the cache directory and launch analysis for all unscored alignments
using associated task handler. KWargs are passed to the tree calculating
task managed by the TaskInterface in self.task_interface.
Example kwargs:
TreeCollectionTaskInterface: scale=1, guide_tree=None,
niters=10, keep_topology=False
RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1
FastTreeTaskInterface: ----- No kwargs
]
if compare[name[jobhandler] is constant[None]] begin[:]
variable[jobhandler] assign[=] call[name[SequentialJobHandler], parameter[]]
variable[files] assign[=] call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[self].cache_dir, constant[*.phy]]]]]
variable[records] assign[=] list[[]]
variable[outfiles] assign[=] list[[]]
variable[dna] assign[=] call[call[name[self].collection][constant[0]].is_dna, parameter[]]
for taget[name[infile]] in starred[name[files]] begin[:]
variable[id_] assign[=] call[name[fileIO].strip_extensions, parameter[name[infile]]]
variable[outfile] assign[=] call[name[self].get_result_file, parameter[name[id_]]]
if <ast.UnaryOp object at 0x7da20c6aa260> begin[:]
variable[record] assign[=] call[name[Alignment], parameter[name[infile], constant[phylip], constant[True]]]
call[name[records].append, parameter[name[record]]]
call[name[outfiles].append, parameter[name[outfile]]]
if compare[call[name[len], parameter[name[records]]] equal[==] constant[0]] begin[:]
return[list[[]]]
<ast.Tuple object at 0x7da20c6a88e0> assign[=] call[name[self].task_interface.scrape_args, parameter[name[records]]]
with call[name[fileIO].TempFileList, parameter[name[to_delete]]] begin[:]
variable[result] assign[=] call[name[jobhandler], parameter[call[name[self].task_interface.get_task, parameter[]], name[args], constant[Cache dir analysis], name[batchsize]]]
for taget[tuple[[<ast.Name object at 0x7da20c6aa860>, <ast.Name object at 0x7da20c6aa920>]]] in starred[call[name[zip], parameter[name[outfiles], name[result]]]] begin[:]
if <ast.BoolOp object at 0x7da20c6a8550> begin[:]
with call[name[open], parameter[name[out], constant[w]]] begin[:]
call[name[json].dump, parameter[name[res], name[outfl]]]
return[name[result]] | keyword[def] identifier[analyse_cache_dir] ( identifier[self] , identifier[jobhandler] = keyword[None] , identifier[batchsize] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[jobhandler] keyword[is] keyword[None] :
identifier[jobhandler] = identifier[SequentialJobHandler] ()
identifier[files] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[cache_dir] , literal[string] ))
identifier[records] =[]
identifier[outfiles] =[]
identifier[dna] = identifier[self] . identifier[collection] [ literal[int] ]. identifier[is_dna] ()
keyword[for] identifier[infile] keyword[in] identifier[files] :
identifier[id_] = identifier[fileIO] . identifier[strip_extensions] ( identifier[infile] )
identifier[outfile] = identifier[self] . identifier[get_result_file] ( identifier[id_] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[outfile] ):
identifier[record] = identifier[Alignment] ( identifier[infile] , literal[string] , keyword[True] )
identifier[records] . identifier[append] ( identifier[record] )
identifier[outfiles] . identifier[append] ( identifier[outfile] )
keyword[if] identifier[len] ( identifier[records] )== literal[int] :
keyword[return] []
identifier[args] , identifier[to_delete] = identifier[self] . identifier[task_interface] . identifier[scrape_args] ( identifier[records] , identifier[outfiles] = identifier[outfiles] ,** identifier[kwargs] )
keyword[with] identifier[fileIO] . identifier[TempFileList] ( identifier[to_delete] ):
identifier[result] = identifier[jobhandler] ( identifier[self] . identifier[task_interface] . identifier[get_task] (), identifier[args] , literal[string] , identifier[batchsize] )
keyword[for] ( identifier[out] , identifier[res] ) keyword[in] identifier[zip] ( identifier[outfiles] , identifier[result] ):
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[out] ) keyword[and] identifier[res] :
keyword[with] identifier[open] ( identifier[out] , literal[string] ) keyword[as] identifier[outfl] :
identifier[json] . identifier[dump] ( identifier[res] , identifier[outfl] )
keyword[return] identifier[result] | def analyse_cache_dir(self, jobhandler=None, batchsize=1, **kwargs):
"""
Scan the cache directory and launch analysis for all unscored alignments
using associated task handler. KWargs are passed to the tree calculating
task managed by the TaskInterface in self.task_interface.
Example kwargs:
TreeCollectionTaskInterface: scale=1, guide_tree=None,
niters=10, keep_topology=False
RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1
FastTreeTaskInterface: ----- No kwargs
"""
if jobhandler is None:
jobhandler = SequentialJobHandler() # depends on [control=['if'], data=['jobhandler']]
files = glob.glob(os.path.join(self.cache_dir, '*.phy'))
#logger.debug('Files - {}'.format(files))
records = []
outfiles = []
dna = self.collection[0].is_dna() # THIS IS ONLY A GUESS AT SEQ TYPE!!
for infile in files:
id_ = fileIO.strip_extensions(infile)
outfile = self.get_result_file(id_)
#logger.debug('Looking for {}: {}'.format(outfile, os.path.exists(outfile)))
if not os.path.exists(outfile):
record = Alignment(infile, 'phylip', True)
records.append(record)
outfiles.append(outfile) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['infile']]
if len(records) == 0:
return [] # depends on [control=['if'], data=[]]
(args, to_delete) = self.task_interface.scrape_args(records, outfiles=outfiles, **kwargs)
# logger.debug('Args - {}'.format(args))
with fileIO.TempFileList(to_delete):
result = jobhandler(self.task_interface.get_task(), args, 'Cache dir analysis', batchsize)
for (out, res) in zip(outfiles, result):
if not os.path.exists(out) and res:
with open(out, 'w') as outfl:
json.dump(res, outfl) # depends on [control=['with'], data=['outfl']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]]
return result |
def safe_better_repr(
self, obj, context=None, html=True, level=0, full=False
):
"""Repr with inspect links on objects"""
context = context and dict(context) or {}
recursion = id(obj) in context
if not recursion:
context[id(obj)] = obj
try:
rv = self.better_repr(obj, context, html, level + 1, full)
except Exception:
rv = None
if rv:
return rv
self.obj_cache[id(obj)] = obj
if html:
return '<a href="%d" class="inspect">%s%s</a>' % (
id(obj), 'Recursion of '
if recursion else '', escape(self.safe_repr(obj))
)
return '%s%s' % (
'Recursion of ' if recursion else '', self.safe_repr(obj)
) | def function[safe_better_repr, parameter[self, obj, context, html, level, full]]:
constant[Repr with inspect links on objects]
variable[context] assign[=] <ast.BoolOp object at 0x7da20c6a90f0>
variable[recursion] assign[=] compare[call[name[id], parameter[name[obj]]] in name[context]]
if <ast.UnaryOp object at 0x7da20c6ab460> begin[:]
call[name[context]][call[name[id], parameter[name[obj]]]] assign[=] name[obj]
<ast.Try object at 0x7da20c6ab580>
if name[rv] begin[:]
return[name[rv]]
call[name[self].obj_cache][call[name[id], parameter[name[obj]]]] assign[=] name[obj]
if name[html] begin[:]
return[binary_operation[constant[<a href="%d" class="inspect">%s%s</a>] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da207f019c0>, <ast.IfExp object at 0x7da207f02350>, <ast.Call object at 0x7da207f03520>]]]]
return[binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.IfExp object at 0x7da207f02950>, <ast.Call object at 0x7da207f00340>]]]] | keyword[def] identifier[safe_better_repr] (
identifier[self] , identifier[obj] , identifier[context] = keyword[None] , identifier[html] = keyword[True] , identifier[level] = literal[int] , identifier[full] = keyword[False]
):
literal[string]
identifier[context] = identifier[context] keyword[and] identifier[dict] ( identifier[context] ) keyword[or] {}
identifier[recursion] = identifier[id] ( identifier[obj] ) keyword[in] identifier[context]
keyword[if] keyword[not] identifier[recursion] :
identifier[context] [ identifier[id] ( identifier[obj] )]= identifier[obj]
keyword[try] :
identifier[rv] = identifier[self] . identifier[better_repr] ( identifier[obj] , identifier[context] , identifier[html] , identifier[level] + literal[int] , identifier[full] )
keyword[except] identifier[Exception] :
identifier[rv] = keyword[None]
keyword[if] identifier[rv] :
keyword[return] identifier[rv]
identifier[self] . identifier[obj_cache] [ identifier[id] ( identifier[obj] )]= identifier[obj]
keyword[if] identifier[html] :
keyword[return] literal[string] %(
identifier[id] ( identifier[obj] ), literal[string]
keyword[if] identifier[recursion] keyword[else] literal[string] , identifier[escape] ( identifier[self] . identifier[safe_repr] ( identifier[obj] ))
)
keyword[return] literal[string] %(
literal[string] keyword[if] identifier[recursion] keyword[else] literal[string] , identifier[self] . identifier[safe_repr] ( identifier[obj] )
) | def safe_better_repr(self, obj, context=None, html=True, level=0, full=False):
"""Repr with inspect links on objects"""
context = context and dict(context) or {}
recursion = id(obj) in context
if not recursion:
context[id(obj)] = obj
try:
rv = self.better_repr(obj, context, html, level + 1, full) # depends on [control=['try'], data=[]]
except Exception:
rv = None # depends on [control=['except'], data=[]]
if rv:
return rv # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.obj_cache[id(obj)] = obj
if html:
return '<a href="%d" class="inspect">%s%s</a>' % (id(obj), 'Recursion of ' if recursion else '', escape(self.safe_repr(obj))) # depends on [control=['if'], data=[]]
return '%s%s' % ('Recursion of ' if recursion else '', self.safe_repr(obj)) |
def data_recognise(self, data=None):
"""Returns an unicode string indicating the data type of the data paramater"""
data = data or self.data
data_lower = data.lower()
if data_lower.startswith(u"http://") or data_lower.startswith(u"https://"):
return u'url'
elif data_lower.startswith(u"mailto:"):
return u'email'
elif data_lower.startswith(u"matmsg:to:"):
return u'emailmessage'
elif data_lower.startswith(u"tel:"):
return u'telephone'
elif data_lower.startswith(u"smsto:"):
return u'sms'
elif data_lower.startswith(u"mmsto:"):
return u'mms'
elif data_lower.startswith(u"geo:"):
return u'geo'
elif data_lower.startswith(u"mebkm:title:"):
return u'bookmark'
elif data_lower.startswith(u"mecard:"):
return u'phonebook'
else:
return u'text' | def function[data_recognise, parameter[self, data]]:
constant[Returns an unicode string indicating the data type of the data paramater]
variable[data] assign[=] <ast.BoolOp object at 0x7da18bc70280>
variable[data_lower] assign[=] call[name[data].lower, parameter[]]
if <ast.BoolOp object at 0x7da18bc70910> begin[:]
return[constant[url]] | keyword[def] identifier[data_recognise] ( identifier[self] , identifier[data] = keyword[None] ):
literal[string]
identifier[data] = identifier[data] keyword[or] identifier[self] . identifier[data]
identifier[data_lower] = identifier[data] . identifier[lower] ()
keyword[if] identifier[data_lower] . identifier[startswith] ( literal[string] ) keyword[or] identifier[data_lower] . identifier[startswith] ( literal[string] ):
keyword[return] literal[string]
keyword[elif] identifier[data_lower] . identifier[startswith] ( literal[string] ):
keyword[return] literal[string]
keyword[elif] identifier[data_lower] . identifier[startswith] ( literal[string] ):
keyword[return] literal[string]
keyword[elif] identifier[data_lower] . identifier[startswith] ( literal[string] ):
keyword[return] literal[string]
keyword[elif] identifier[data_lower] . identifier[startswith] ( literal[string] ):
keyword[return] literal[string]
keyword[elif] identifier[data_lower] . identifier[startswith] ( literal[string] ):
keyword[return] literal[string]
keyword[elif] identifier[data_lower] . identifier[startswith] ( literal[string] ):
keyword[return] literal[string]
keyword[elif] identifier[data_lower] . identifier[startswith] ( literal[string] ):
keyword[return] literal[string]
keyword[elif] identifier[data_lower] . identifier[startswith] ( literal[string] ):
keyword[return] literal[string]
keyword[else] :
keyword[return] literal[string] | def data_recognise(self, data=None):
"""Returns an unicode string indicating the data type of the data paramater"""
data = data or self.data
data_lower = data.lower()
if data_lower.startswith(u'http://') or data_lower.startswith(u'https://'):
return u'url' # depends on [control=['if'], data=[]]
elif data_lower.startswith(u'mailto:'):
return u'email' # depends on [control=['if'], data=[]]
elif data_lower.startswith(u'matmsg:to:'):
return u'emailmessage' # depends on [control=['if'], data=[]]
elif data_lower.startswith(u'tel:'):
return u'telephone' # depends on [control=['if'], data=[]]
elif data_lower.startswith(u'smsto:'):
return u'sms' # depends on [control=['if'], data=[]]
elif data_lower.startswith(u'mmsto:'):
return u'mms' # depends on [control=['if'], data=[]]
elif data_lower.startswith(u'geo:'):
return u'geo' # depends on [control=['if'], data=[]]
elif data_lower.startswith(u'mebkm:title:'):
return u'bookmark' # depends on [control=['if'], data=[]]
elif data_lower.startswith(u'mecard:'):
return u'phonebook' # depends on [control=['if'], data=[]]
else:
return u'text' |
def add_chassis(self, chassis):
"""
:param ip: chassis object
"""
self.chassis_list[chassis] = XenaSocket(self.logger, chassis.ip, chassis.port)
self.chassis_list[chassis].connect()
KeepAliveThread(self.chassis_list[chassis]).start()
self.send_command(chassis, 'c_logon', '"{}"'.format(chassis.password))
self.send_command(chassis, 'c_owner', '"{}"'.format(chassis.owner)) | def function[add_chassis, parameter[self, chassis]]:
constant[
:param ip: chassis object
]
call[name[self].chassis_list][name[chassis]] assign[=] call[name[XenaSocket], parameter[name[self].logger, name[chassis].ip, name[chassis].port]]
call[call[name[self].chassis_list][name[chassis]].connect, parameter[]]
call[call[name[KeepAliveThread], parameter[call[name[self].chassis_list][name[chassis]]]].start, parameter[]]
call[name[self].send_command, parameter[name[chassis], constant[c_logon], call[constant["{}"].format, parameter[name[chassis].password]]]]
call[name[self].send_command, parameter[name[chassis], constant[c_owner], call[constant["{}"].format, parameter[name[chassis].owner]]]] | keyword[def] identifier[add_chassis] ( identifier[self] , identifier[chassis] ):
literal[string]
identifier[self] . identifier[chassis_list] [ identifier[chassis] ]= identifier[XenaSocket] ( identifier[self] . identifier[logger] , identifier[chassis] . identifier[ip] , identifier[chassis] . identifier[port] )
identifier[self] . identifier[chassis_list] [ identifier[chassis] ]. identifier[connect] ()
identifier[KeepAliveThread] ( identifier[self] . identifier[chassis_list] [ identifier[chassis] ]). identifier[start] ()
identifier[self] . identifier[send_command] ( identifier[chassis] , literal[string] , literal[string] . identifier[format] ( identifier[chassis] . identifier[password] ))
identifier[self] . identifier[send_command] ( identifier[chassis] , literal[string] , literal[string] . identifier[format] ( identifier[chassis] . identifier[owner] )) | def add_chassis(self, chassis):
"""
:param ip: chassis object
"""
self.chassis_list[chassis] = XenaSocket(self.logger, chassis.ip, chassis.port)
self.chassis_list[chassis].connect()
KeepAliveThread(self.chassis_list[chassis]).start()
self.send_command(chassis, 'c_logon', '"{}"'.format(chassis.password))
self.send_command(chassis, 'c_owner', '"{}"'.format(chassis.owner)) |
def register(self, task_spec):
"""
Registers a module specification with the LSID authority.
Validates that it possesses an LSID assigned by the authority.
Raises an exception if registration wasn't successful.
:param task_spec:
:return: boolean - True if registration was successful
"""
if self.validate(task_spec.lsid):
# Add the module name to the map
self.registered_modules[task_spec.lsid] = task_spec.name
# Increment module count
self.module_count += 1
# Write the updated LSID authority file and reload
with open(self.authority, 'w') as authority_file:
json.dump({
'base_lsid': self.base_lsid,
'module_count': self.module_count,
'registered_modules': self.registered_modules,
}, authority_file, sort_keys=True, indent=4, separators=(',', ': '))
self._load_lsid_authority()
else:
raise RuntimeError("Module LSID id not valid: " + str(task_spec.lsid))
return True | def function[register, parameter[self, task_spec]]:
constant[
Registers a module specification with the LSID authority.
Validates that it possesses an LSID assigned by the authority.
Raises an exception if registration wasn't successful.
:param task_spec:
:return: boolean - True if registration was successful
]
if call[name[self].validate, parameter[name[task_spec].lsid]] begin[:]
call[name[self].registered_modules][name[task_spec].lsid] assign[=] name[task_spec].name
<ast.AugAssign object at 0x7da204622c20>
with call[name[open], parameter[name[self].authority, constant[w]]] begin[:]
call[name[json].dump, parameter[dictionary[[<ast.Constant object at 0x7da204621990>, <ast.Constant object at 0x7da204620640>, <ast.Constant object at 0x7da204620a00>], [<ast.Attribute object at 0x7da2046208e0>, <ast.Attribute object at 0x7da204623a30>, <ast.Attribute object at 0x7da204621ab0>]], name[authority_file]]]
call[name[self]._load_lsid_authority, parameter[]]
return[constant[True]] | keyword[def] identifier[register] ( identifier[self] , identifier[task_spec] ):
literal[string]
keyword[if] identifier[self] . identifier[validate] ( identifier[task_spec] . identifier[lsid] ):
identifier[self] . identifier[registered_modules] [ identifier[task_spec] . identifier[lsid] ]= identifier[task_spec] . identifier[name]
identifier[self] . identifier[module_count] += literal[int]
keyword[with] identifier[open] ( identifier[self] . identifier[authority] , literal[string] ) keyword[as] identifier[authority_file] :
identifier[json] . identifier[dump] ({
literal[string] : identifier[self] . identifier[base_lsid] ,
literal[string] : identifier[self] . identifier[module_count] ,
literal[string] : identifier[self] . identifier[registered_modules] ,
}, identifier[authority_file] , identifier[sort_keys] = keyword[True] , identifier[indent] = literal[int] , identifier[separators] =( literal[string] , literal[string] ))
identifier[self] . identifier[_load_lsid_authority] ()
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] + identifier[str] ( identifier[task_spec] . identifier[lsid] ))
keyword[return] keyword[True] | def register(self, task_spec):
"""
Registers a module specification with the LSID authority.
Validates that it possesses an LSID assigned by the authority.
Raises an exception if registration wasn't successful.
:param task_spec:
:return: boolean - True if registration was successful
"""
if self.validate(task_spec.lsid):
# Add the module name to the map
self.registered_modules[task_spec.lsid] = task_spec.name
# Increment module count
self.module_count += 1
# Write the updated LSID authority file and reload
with open(self.authority, 'w') as authority_file:
json.dump({'base_lsid': self.base_lsid, 'module_count': self.module_count, 'registered_modules': self.registered_modules}, authority_file, sort_keys=True, indent=4, separators=(',', ': ')) # depends on [control=['with'], data=['authority_file']]
self._load_lsid_authority() # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Module LSID id not valid: ' + str(task_spec.lsid))
return True |
def login(self, login, password, set_auth=False):
"""
Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client
"""
rv = self.session.post(
self.host,
dumps({
"method": "common.db.login",
"params": [login, password]
}),
)
rv = loads(rv.content)['result']
if set_auth:
self.set_auth(
SessionAuth(login, *rv)
)
return rv | def function[login, parameter[self, login, password, set_auth]]:
constant[
Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client
]
variable[rv] assign[=] call[name[self].session.post, parameter[name[self].host, call[name[dumps], parameter[dictionary[[<ast.Constant object at 0x7da1b1a5c0a0>, <ast.Constant object at 0x7da1b1a5d180>], [<ast.Constant object at 0x7da1b1a5c3a0>, <ast.List object at 0x7da1b1a5cc70>]]]]]]
variable[rv] assign[=] call[call[name[loads], parameter[name[rv].content]]][constant[result]]
if name[set_auth] begin[:]
call[name[self].set_auth, parameter[call[name[SessionAuth], parameter[name[login], <ast.Starred object at 0x7da18ede6950>]]]]
return[name[rv]] | keyword[def] identifier[login] ( identifier[self] , identifier[login] , identifier[password] , identifier[set_auth] = keyword[False] ):
literal[string]
identifier[rv] = identifier[self] . identifier[session] . identifier[post] (
identifier[self] . identifier[host] ,
identifier[dumps] ({
literal[string] : literal[string] ,
literal[string] :[ identifier[login] , identifier[password] ]
}),
)
identifier[rv] = identifier[loads] ( identifier[rv] . identifier[content] )[ literal[string] ]
keyword[if] identifier[set_auth] :
identifier[self] . identifier[set_auth] (
identifier[SessionAuth] ( identifier[login] ,* identifier[rv] )
)
keyword[return] identifier[rv] | def login(self, login, password, set_auth=False):
"""
Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client
"""
rv = self.session.post(self.host, dumps({'method': 'common.db.login', 'params': [login, password]}))
rv = loads(rv.content)['result']
if set_auth:
self.set_auth(SessionAuth(login, *rv)) # depends on [control=['if'], data=[]]
return rv |
def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name):
"""对指定的图片进行性能测试."""
profiler = ProfileRecorder(0.05)
# 加载图片
profiler.load_images(search_file, screen_file)
# 传入待测试的方法列表
profiler.profile_methods(method_list)
# 将性能数据写入文件
profiler.wite_to_json(dir_path, file_name) | def function[profile_different_methods, parameter[search_file, screen_file, method_list, dir_path, file_name]]:
constant[对指定的图片进行性能测试.]
variable[profiler] assign[=] call[name[ProfileRecorder], parameter[constant[0.05]]]
call[name[profiler].load_images, parameter[name[search_file], name[screen_file]]]
call[name[profiler].profile_methods, parameter[name[method_list]]]
call[name[profiler].wite_to_json, parameter[name[dir_path], name[file_name]]] | keyword[def] identifier[profile_different_methods] ( identifier[search_file] , identifier[screen_file] , identifier[method_list] , identifier[dir_path] , identifier[file_name] ):
literal[string]
identifier[profiler] = identifier[ProfileRecorder] ( literal[int] )
identifier[profiler] . identifier[load_images] ( identifier[search_file] , identifier[screen_file] )
identifier[profiler] . identifier[profile_methods] ( identifier[method_list] )
identifier[profiler] . identifier[wite_to_json] ( identifier[dir_path] , identifier[file_name] ) | def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name):
"""对指定的图片进行性能测试."""
profiler = ProfileRecorder(0.05)
# 加载图片
profiler.load_images(search_file, screen_file)
# 传入待测试的方法列表
profiler.profile_methods(method_list)
# 将性能数据写入文件
profiler.wite_to_json(dir_path, file_name) |
def flow_tuple(data):
"""Tuple for flow (src, dst, sport, dport, proto)"""
src = net_utils.inet_to_str(data['packet'].get('src')) if data['packet'].get('src') else None
dst = net_utils.inet_to_str(data['packet'].get('dst')) if data['packet'].get('dst') else None
sport = data['transport'].get('sport') if data.get('transport') else None
dport = data['transport'].get('dport') if data.get('transport') else None
proto = data['transport'].get('type') if data.get('transport') else data['packet']['type']
return (src, dst, sport, dport, proto) | def function[flow_tuple, parameter[data]]:
constant[Tuple for flow (src, dst, sport, dport, proto)]
variable[src] assign[=] <ast.IfExp object at 0x7da1b1a3cee0>
variable[dst] assign[=] <ast.IfExp object at 0x7da1b1a3d600>
variable[sport] assign[=] <ast.IfExp object at 0x7da1b1a3fee0>
variable[dport] assign[=] <ast.IfExp object at 0x7da1b1a3fbe0>
variable[proto] assign[=] <ast.IfExp object at 0x7da1b1a3e800>
return[tuple[[<ast.Name object at 0x7da1b1a3f940>, <ast.Name object at 0x7da1b1a3d810>, <ast.Name object at 0x7da1b1a3e9e0>, <ast.Name object at 0x7da1b1a3f1f0>, <ast.Name object at 0x7da1b1a3f0d0>]]] | keyword[def] identifier[flow_tuple] ( identifier[data] ):
literal[string]
identifier[src] = identifier[net_utils] . identifier[inet_to_str] ( identifier[data] [ literal[string] ]. identifier[get] ( literal[string] )) keyword[if] identifier[data] [ literal[string] ]. identifier[get] ( literal[string] ) keyword[else] keyword[None]
identifier[dst] = identifier[net_utils] . identifier[inet_to_str] ( identifier[data] [ literal[string] ]. identifier[get] ( literal[string] )) keyword[if] identifier[data] [ literal[string] ]. identifier[get] ( literal[string] ) keyword[else] keyword[None]
identifier[sport] = identifier[data] [ literal[string] ]. identifier[get] ( literal[string] ) keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[else] keyword[None]
identifier[dport] = identifier[data] [ literal[string] ]. identifier[get] ( literal[string] ) keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[else] keyword[None]
identifier[proto] = identifier[data] [ literal[string] ]. identifier[get] ( literal[string] ) keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[else] identifier[data] [ literal[string] ][ literal[string] ]
keyword[return] ( identifier[src] , identifier[dst] , identifier[sport] , identifier[dport] , identifier[proto] ) | def flow_tuple(data):
"""Tuple for flow (src, dst, sport, dport, proto)"""
src = net_utils.inet_to_str(data['packet'].get('src')) if data['packet'].get('src') else None
dst = net_utils.inet_to_str(data['packet'].get('dst')) if data['packet'].get('dst') else None
sport = data['transport'].get('sport') if data.get('transport') else None
dport = data['transport'].get('dport') if data.get('transport') else None
proto = data['transport'].get('type') if data.get('transport') else data['packet']['type']
return (src, dst, sport, dport, proto) |
def get_branches(self, project, repository, base=None, filter=None, start=0, limit=99999, details=True,
order_by='MODIFICATION'):
"""
Retrieve the branches matching the supplied filterText param.
The authenticated user must have REPO_READ permission for the specified repository to call this resource.
:param start:
:param project:
:param repository:
:param base: base branch/tag to compare each branch to (for the metadata providers that uses that information)
:param filter:
:param limit: OPTIONAL: The limit of the number of branches to return, this may be restricted by
fixed system limits. Default by built-in method: 99999
:param details: whether to retrieve plugin-provided metadata about each branch
:param order_by: OPTIONAL: ordering of refs either ALPHABETICAL (by name) or MODIFICATION (last updated)
:return:
"""
url = 'rest/api/1.0/projects/{project}/repos/{repository}/branches'.format(project=project,
repository=repository)
params = {}
if start:
params['start'] = start
if limit:
params['limit'] = limit
if filter:
params['filterText'] = filter
if base:
params['base'] = base
if order_by:
params['orderBy'] = order_by
params['details'] = details
return (self.get(url, params=params) or {}).get('values') | def function[get_branches, parameter[self, project, repository, base, filter, start, limit, details, order_by]]:
constant[
Retrieve the branches matching the supplied filterText param.
The authenticated user must have REPO_READ permission for the specified repository to call this resource.
:param start:
:param project:
:param repository:
:param base: base branch/tag to compare each branch to (for the metadata providers that uses that information)
:param filter:
:param limit: OPTIONAL: The limit of the number of branches to return, this may be restricted by
fixed system limits. Default by built-in method: 99999
:param details: whether to retrieve plugin-provided metadata about each branch
:param order_by: OPTIONAL: ordering of refs either ALPHABETICAL (by name) or MODIFICATION (last updated)
:return:
]
variable[url] assign[=] call[constant[rest/api/1.0/projects/{project}/repos/{repository}/branches].format, parameter[]]
variable[params] assign[=] dictionary[[], []]
if name[start] begin[:]
call[name[params]][constant[start]] assign[=] name[start]
if name[limit] begin[:]
call[name[params]][constant[limit]] assign[=] name[limit]
if name[filter] begin[:]
call[name[params]][constant[filterText]] assign[=] name[filter]
if name[base] begin[:]
call[name[params]][constant[base]] assign[=] name[base]
if name[order_by] begin[:]
call[name[params]][constant[orderBy]] assign[=] name[order_by]
call[name[params]][constant[details]] assign[=] name[details]
return[call[<ast.BoolOp object at 0x7da20e957c10>.get, parameter[constant[values]]]] | keyword[def] identifier[get_branches] ( identifier[self] , identifier[project] , identifier[repository] , identifier[base] = keyword[None] , identifier[filter] = keyword[None] , identifier[start] = literal[int] , identifier[limit] = literal[int] , identifier[details] = keyword[True] ,
identifier[order_by] = literal[string] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[project] = identifier[project] ,
identifier[repository] = identifier[repository] )
identifier[params] ={}
keyword[if] identifier[start] :
identifier[params] [ literal[string] ]= identifier[start]
keyword[if] identifier[limit] :
identifier[params] [ literal[string] ]= identifier[limit]
keyword[if] identifier[filter] :
identifier[params] [ literal[string] ]= identifier[filter]
keyword[if] identifier[base] :
identifier[params] [ literal[string] ]= identifier[base]
keyword[if] identifier[order_by] :
identifier[params] [ literal[string] ]= identifier[order_by]
identifier[params] [ literal[string] ]= identifier[details]
keyword[return] ( identifier[self] . identifier[get] ( identifier[url] , identifier[params] = identifier[params] ) keyword[or] {}). identifier[get] ( literal[string] ) | def get_branches(self, project, repository, base=None, filter=None, start=0, limit=99999, details=True, order_by='MODIFICATION'):
"""
Retrieve the branches matching the supplied filterText param.
The authenticated user must have REPO_READ permission for the specified repository to call this resource.
:param start:
:param project:
:param repository:
:param base: base branch/tag to compare each branch to (for the metadata providers that uses that information)
:param filter:
:param limit: OPTIONAL: The limit of the number of branches to return, this may be restricted by
fixed system limits. Default by built-in method: 99999
:param details: whether to retrieve plugin-provided metadata about each branch
:param order_by: OPTIONAL: ordering of refs either ALPHABETICAL (by name) or MODIFICATION (last updated)
:return:
"""
url = 'rest/api/1.0/projects/{project}/repos/{repository}/branches'.format(project=project, repository=repository)
params = {}
if start:
params['start'] = start # depends on [control=['if'], data=[]]
if limit:
params['limit'] = limit # depends on [control=['if'], data=[]]
if filter:
params['filterText'] = filter # depends on [control=['if'], data=[]]
if base:
params['base'] = base # depends on [control=['if'], data=[]]
if order_by:
params['orderBy'] = order_by # depends on [control=['if'], data=[]]
params['details'] = details
return (self.get(url, params=params) or {}).get('values') |
def attrput(self, groupname, attrname, rownr, value, unit=[], meas=[]):
"""Put the value and optionally unit and measinfo
of an attribute in a row in a group."""
return self._attrput(groupname, attrname, rownr, value, unit, meas) | def function[attrput, parameter[self, groupname, attrname, rownr, value, unit, meas]]:
constant[Put the value and optionally unit and measinfo
of an attribute in a row in a group.]
return[call[name[self]._attrput, parameter[name[groupname], name[attrname], name[rownr], name[value], name[unit], name[meas]]]] | keyword[def] identifier[attrput] ( identifier[self] , identifier[groupname] , identifier[attrname] , identifier[rownr] , identifier[value] , identifier[unit] =[], identifier[meas] =[]):
literal[string]
keyword[return] identifier[self] . identifier[_attrput] ( identifier[groupname] , identifier[attrname] , identifier[rownr] , identifier[value] , identifier[unit] , identifier[meas] ) | def attrput(self, groupname, attrname, rownr, value, unit=[], meas=[]):
"""Put the value and optionally unit and measinfo
of an attribute in a row in a group."""
return self._attrput(groupname, attrname, rownr, value, unit, meas) |
def _make_header_body_handler(end_body_regex,
node_factory,
has_footer=True):
"""Utility function to make a handler for header-body node.
A header-body node is any node which has a single function-call
header and a body of statements inside of it
"""
def handler(tokens, tokens_len, body_index, function_call):
"""Handler function."""
def _end_header_body_definition(token_index, tokens):
"""Header body termination function."""
if end_body_regex.match(tokens[token_index].content):
try:
if tokens[token_index + 1].type == TokenType.LeftParen:
return True
except IndexError:
raise RuntimeError("Syntax Error")
return False
token_index, body = _ast_worker(tokens, tokens_len, body_index,
_end_header_body_definition)
extra_kwargs = {}
if has_footer:
# Handle footer
token_index, footer = _handle_function_call(tokens,
tokens_len,
token_index)
extra_kwargs = {"footer": footer}
return (token_index,
node_factory(header=function_call,
body=body.statements,
line=tokens[body_index].line,
col=tokens[body_index].col,
index=body_index,
**extra_kwargs))
return handler | def function[_make_header_body_handler, parameter[end_body_regex, node_factory, has_footer]]:
constant[Utility function to make a handler for header-body node.
A header-body node is any node which has a single function-call
header and a body of statements inside of it
]
def function[handler, parameter[tokens, tokens_len, body_index, function_call]]:
constant[Handler function.]
def function[_end_header_body_definition, parameter[token_index, tokens]]:
constant[Header body termination function.]
if call[name[end_body_regex].match, parameter[call[name[tokens]][name[token_index]].content]] begin[:]
<ast.Try object at 0x7da18bc70e50>
return[constant[False]]
<ast.Tuple object at 0x7da18bc706d0> assign[=] call[name[_ast_worker], parameter[name[tokens], name[tokens_len], name[body_index], name[_end_header_body_definition]]]
variable[extra_kwargs] assign[=] dictionary[[], []]
if name[has_footer] begin[:]
<ast.Tuple object at 0x7da18bc73010> assign[=] call[name[_handle_function_call], parameter[name[tokens], name[tokens_len], name[token_index]]]
variable[extra_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da18fe90160>], [<ast.Name object at 0x7da18fe91630>]]
return[tuple[[<ast.Name object at 0x7da18fe93940>, <ast.Call object at 0x7da18fe90490>]]]
return[name[handler]] | keyword[def] identifier[_make_header_body_handler] ( identifier[end_body_regex] ,
identifier[node_factory] ,
identifier[has_footer] = keyword[True] ):
literal[string]
keyword[def] identifier[handler] ( identifier[tokens] , identifier[tokens_len] , identifier[body_index] , identifier[function_call] ):
literal[string]
keyword[def] identifier[_end_header_body_definition] ( identifier[token_index] , identifier[tokens] ):
literal[string]
keyword[if] identifier[end_body_regex] . identifier[match] ( identifier[tokens] [ identifier[token_index] ]. identifier[content] ):
keyword[try] :
keyword[if] identifier[tokens] [ identifier[token_index] + literal[int] ]. identifier[type] == identifier[TokenType] . identifier[LeftParen] :
keyword[return] keyword[True]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] keyword[False]
identifier[token_index] , identifier[body] = identifier[_ast_worker] ( identifier[tokens] , identifier[tokens_len] , identifier[body_index] ,
identifier[_end_header_body_definition] )
identifier[extra_kwargs] ={}
keyword[if] identifier[has_footer] :
identifier[token_index] , identifier[footer] = identifier[_handle_function_call] ( identifier[tokens] ,
identifier[tokens_len] ,
identifier[token_index] )
identifier[extra_kwargs] ={ literal[string] : identifier[footer] }
keyword[return] ( identifier[token_index] ,
identifier[node_factory] ( identifier[header] = identifier[function_call] ,
identifier[body] = identifier[body] . identifier[statements] ,
identifier[line] = identifier[tokens] [ identifier[body_index] ]. identifier[line] ,
identifier[col] = identifier[tokens] [ identifier[body_index] ]. identifier[col] ,
identifier[index] = identifier[body_index] ,
** identifier[extra_kwargs] ))
keyword[return] identifier[handler] | def _make_header_body_handler(end_body_regex, node_factory, has_footer=True):
"""Utility function to make a handler for header-body node.
A header-body node is any node which has a single function-call
header and a body of statements inside of it
"""
def handler(tokens, tokens_len, body_index, function_call):
"""Handler function."""
def _end_header_body_definition(token_index, tokens):
"""Header body termination function."""
if end_body_regex.match(tokens[token_index].content):
try:
if tokens[token_index + 1].type == TokenType.LeftParen:
return True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except IndexError:
raise RuntimeError('Syntax Error') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return False
(token_index, body) = _ast_worker(tokens, tokens_len, body_index, _end_header_body_definition)
extra_kwargs = {}
if has_footer:
# Handle footer
(token_index, footer) = _handle_function_call(tokens, tokens_len, token_index)
extra_kwargs = {'footer': footer} # depends on [control=['if'], data=[]]
return (token_index, node_factory(header=function_call, body=body.statements, line=tokens[body_index].line, col=tokens[body_index].col, index=body_index, **extra_kwargs))
return handler |
def name(self, value):
"""Key name can be set by Key.key or Key.name. Key.key sets Key.name
internally, so just handle this property. When changing the key
name, try to load it's metadata from MimicDB. If it's not available,
the key hasn't been uploaded, downloaded or synced so don't add it to
the bucket set (it also might have just been deleted,
see boto.s3.bucket.py#785)
"""
self._name = value
if value:
meta = mimicdb.backend.hgetall(tpl.key % (self.bucket.name, value))
if meta:
mimicdb.backend.sadd(tpl.bucket % self.bucket.name, value)
self._load_meta(meta['size'], meta['md5']) | def function[name, parameter[self, value]]:
constant[Key name can be set by Key.key or Key.name. Key.key sets Key.name
internally, so just handle this property. When changing the key
name, try to load it's metadata from MimicDB. If it's not available,
the key hasn't been uploaded, downloaded or synced so don't add it to
the bucket set (it also might have just been deleted,
see boto.s3.bucket.py#785)
]
name[self]._name assign[=] name[value]
if name[value] begin[:]
variable[meta] assign[=] call[name[mimicdb].backend.hgetall, parameter[binary_operation[name[tpl].key <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b00889d0>, <ast.Name object at 0x7da1b0088a30>]]]]]
if name[meta] begin[:]
call[name[mimicdb].backend.sadd, parameter[binary_operation[name[tpl].bucket <ast.Mod object at 0x7da2590d6920> name[self].bucket.name], name[value]]]
call[name[self]._load_meta, parameter[call[name[meta]][constant[size]], call[name[meta]][constant[md5]]]] | keyword[def] identifier[name] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[_name] = identifier[value]
keyword[if] identifier[value] :
identifier[meta] = identifier[mimicdb] . identifier[backend] . identifier[hgetall] ( identifier[tpl] . identifier[key] %( identifier[self] . identifier[bucket] . identifier[name] , identifier[value] ))
keyword[if] identifier[meta] :
identifier[mimicdb] . identifier[backend] . identifier[sadd] ( identifier[tpl] . identifier[bucket] % identifier[self] . identifier[bucket] . identifier[name] , identifier[value] )
identifier[self] . identifier[_load_meta] ( identifier[meta] [ literal[string] ], identifier[meta] [ literal[string] ]) | def name(self, value):
"""Key name can be set by Key.key or Key.name. Key.key sets Key.name
internally, so just handle this property. When changing the key
name, try to load it's metadata from MimicDB. If it's not available,
the key hasn't been uploaded, downloaded or synced so don't add it to
the bucket set (it also might have just been deleted,
see boto.s3.bucket.py#785)
"""
self._name = value
if value:
meta = mimicdb.backend.hgetall(tpl.key % (self.bucket.name, value))
if meta:
mimicdb.backend.sadd(tpl.bucket % self.bucket.name, value)
self._load_meta(meta['size'], meta['md5']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def download_sdk(self, rest_api_id, output_dir,
api_gateway_stage=DEFAULT_STAGE_NAME,
sdk_type='javascript'):
# type: (str, str, str, str) -> None
"""Download an SDK to a directory.
This will generate an SDK and download it to the provided
``output_dir``. If you're using ``get_sdk_download_stream()``,
you have to handle downloading the stream and unzipping the
contents yourself. This method handles that for you.
"""
zip_stream = self.get_sdk_download_stream(
rest_api_id, api_gateway_stage=api_gateway_stage,
sdk_type=sdk_type)
tmpdir = tempfile.mkdtemp()
with open(os.path.join(tmpdir, 'sdk.zip'), 'wb') as f:
f.write(zip_stream.read())
tmp_extract = os.path.join(tmpdir, 'extracted')
with zipfile.ZipFile(os.path.join(tmpdir, 'sdk.zip')) as z:
z.extractall(tmp_extract)
# The extract zip dir will have a single directory:
# ['apiGateway-js-sdk']
dirnames = os.listdir(tmp_extract)
if len(dirnames) == 1:
full_dirname = os.path.join(tmp_extract, dirnames[0])
if os.path.isdir(full_dirname):
final_dirname = 'chalice-%s-sdk' % sdk_type
full_renamed_name = os.path.join(tmp_extract, final_dirname)
os.rename(full_dirname, full_renamed_name)
shutil.move(full_renamed_name, output_dir)
return
raise RuntimeError(
"The downloaded SDK had an unexpected directory structure: %s" %
(', '.join(dirnames))) | def function[download_sdk, parameter[self, rest_api_id, output_dir, api_gateway_stage, sdk_type]]:
constant[Download an SDK to a directory.
This will generate an SDK and download it to the provided
``output_dir``. If you're using ``get_sdk_download_stream()``,
you have to handle downloading the stream and unzipping the
contents yourself. This method handles that for you.
]
variable[zip_stream] assign[=] call[name[self].get_sdk_download_stream, parameter[name[rest_api_id]]]
variable[tmpdir] assign[=] call[name[tempfile].mkdtemp, parameter[]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[tmpdir], constant[sdk.zip]]], constant[wb]]] begin[:]
call[name[f].write, parameter[call[name[zip_stream].read, parameter[]]]]
variable[tmp_extract] assign[=] call[name[os].path.join, parameter[name[tmpdir], constant[extracted]]]
with call[name[zipfile].ZipFile, parameter[call[name[os].path.join, parameter[name[tmpdir], constant[sdk.zip]]]]] begin[:]
call[name[z].extractall, parameter[name[tmp_extract]]]
variable[dirnames] assign[=] call[name[os].listdir, parameter[name[tmp_extract]]]
if compare[call[name[len], parameter[name[dirnames]]] equal[==] constant[1]] begin[:]
variable[full_dirname] assign[=] call[name[os].path.join, parameter[name[tmp_extract], call[name[dirnames]][constant[0]]]]
if call[name[os].path.isdir, parameter[name[full_dirname]]] begin[:]
variable[final_dirname] assign[=] binary_operation[constant[chalice-%s-sdk] <ast.Mod object at 0x7da2590d6920> name[sdk_type]]
variable[full_renamed_name] assign[=] call[name[os].path.join, parameter[name[tmp_extract], name[final_dirname]]]
call[name[os].rename, parameter[name[full_dirname], name[full_renamed_name]]]
call[name[shutil].move, parameter[name[full_renamed_name], name[output_dir]]]
return[None]
<ast.Raise object at 0x7da2054a6830> | keyword[def] identifier[download_sdk] ( identifier[self] , identifier[rest_api_id] , identifier[output_dir] ,
identifier[api_gateway_stage] = identifier[DEFAULT_STAGE_NAME] ,
identifier[sdk_type] = literal[string] ):
literal[string]
identifier[zip_stream] = identifier[self] . identifier[get_sdk_download_stream] (
identifier[rest_api_id] , identifier[api_gateway_stage] = identifier[api_gateway_stage] ,
identifier[sdk_type] = identifier[sdk_type] )
identifier[tmpdir] = identifier[tempfile] . identifier[mkdtemp] ()
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[tmpdir] , literal[string] ), literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[zip_stream] . identifier[read] ())
identifier[tmp_extract] = identifier[os] . identifier[path] . identifier[join] ( identifier[tmpdir] , literal[string] )
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[tmpdir] , literal[string] )) keyword[as] identifier[z] :
identifier[z] . identifier[extractall] ( identifier[tmp_extract] )
identifier[dirnames] = identifier[os] . identifier[listdir] ( identifier[tmp_extract] )
keyword[if] identifier[len] ( identifier[dirnames] )== literal[int] :
identifier[full_dirname] = identifier[os] . identifier[path] . identifier[join] ( identifier[tmp_extract] , identifier[dirnames] [ literal[int] ])
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[full_dirname] ):
identifier[final_dirname] = literal[string] % identifier[sdk_type]
identifier[full_renamed_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[tmp_extract] , identifier[final_dirname] )
identifier[os] . identifier[rename] ( identifier[full_dirname] , identifier[full_renamed_name] )
identifier[shutil] . identifier[move] ( identifier[full_renamed_name] , identifier[output_dir] )
keyword[return]
keyword[raise] identifier[RuntimeError] (
literal[string] %
( literal[string] . identifier[join] ( identifier[dirnames] ))) | def download_sdk(self, rest_api_id, output_dir, api_gateway_stage=DEFAULT_STAGE_NAME, sdk_type='javascript'):
# type: (str, str, str, str) -> None
"Download an SDK to a directory.\n\n This will generate an SDK and download it to the provided\n ``output_dir``. If you're using ``get_sdk_download_stream()``,\n you have to handle downloading the stream and unzipping the\n contents yourself. This method handles that for you.\n\n "
zip_stream = self.get_sdk_download_stream(rest_api_id, api_gateway_stage=api_gateway_stage, sdk_type=sdk_type)
tmpdir = tempfile.mkdtemp()
with open(os.path.join(tmpdir, 'sdk.zip'), 'wb') as f:
f.write(zip_stream.read()) # depends on [control=['with'], data=['f']]
tmp_extract = os.path.join(tmpdir, 'extracted')
with zipfile.ZipFile(os.path.join(tmpdir, 'sdk.zip')) as z:
z.extractall(tmp_extract) # depends on [control=['with'], data=['z']]
# The extract zip dir will have a single directory:
# ['apiGateway-js-sdk']
dirnames = os.listdir(tmp_extract)
if len(dirnames) == 1:
full_dirname = os.path.join(tmp_extract, dirnames[0])
if os.path.isdir(full_dirname):
final_dirname = 'chalice-%s-sdk' % sdk_type
full_renamed_name = os.path.join(tmp_extract, final_dirname)
os.rename(full_dirname, full_renamed_name)
shutil.move(full_renamed_name, output_dir)
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
raise RuntimeError('The downloaded SDK had an unexpected directory structure: %s' % ', '.join(dirnames)) |
def check_for_executable(supernova_args, env_vars):
"""
It's possible that a user might set their custom executable via an
environment variable. If we detect one, we should add it to supernova's
arguments ONLY IF an executable wasn't set on the command line. The
command line executable must take priority.
"""
exe = supernova_args.get('executable', 'default')
if exe != 'default':
return supernova_args
if 'OS_EXECUTABLE' in env_vars.keys():
supernova_args['executable'] = env_vars['OS_EXECUTABLE']
return supernova_args
supernova_args['executable'] = 'nova'
return supernova_args | def function[check_for_executable, parameter[supernova_args, env_vars]]:
constant[
It's possible that a user might set their custom executable via an
environment variable. If we detect one, we should add it to supernova's
arguments ONLY IF an executable wasn't set on the command line. The
command line executable must take priority.
]
variable[exe] assign[=] call[name[supernova_args].get, parameter[constant[executable], constant[default]]]
if compare[name[exe] not_equal[!=] constant[default]] begin[:]
return[name[supernova_args]]
if compare[constant[OS_EXECUTABLE] in call[name[env_vars].keys, parameter[]]] begin[:]
call[name[supernova_args]][constant[executable]] assign[=] call[name[env_vars]][constant[OS_EXECUTABLE]]
return[name[supernova_args]]
call[name[supernova_args]][constant[executable]] assign[=] constant[nova]
return[name[supernova_args]] | keyword[def] identifier[check_for_executable] ( identifier[supernova_args] , identifier[env_vars] ):
literal[string]
identifier[exe] = identifier[supernova_args] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[exe] != literal[string] :
keyword[return] identifier[supernova_args]
keyword[if] literal[string] keyword[in] identifier[env_vars] . identifier[keys] ():
identifier[supernova_args] [ literal[string] ]= identifier[env_vars] [ literal[string] ]
keyword[return] identifier[supernova_args]
identifier[supernova_args] [ literal[string] ]= literal[string]
keyword[return] identifier[supernova_args] | def check_for_executable(supernova_args, env_vars):
"""
It's possible that a user might set their custom executable via an
environment variable. If we detect one, we should add it to supernova's
arguments ONLY IF an executable wasn't set on the command line. The
command line executable must take priority.
"""
exe = supernova_args.get('executable', 'default')
if exe != 'default':
return supernova_args # depends on [control=['if'], data=[]]
if 'OS_EXECUTABLE' in env_vars.keys():
supernova_args['executable'] = env_vars['OS_EXECUTABLE']
return supernova_args # depends on [control=['if'], data=[]]
supernova_args['executable'] = 'nova'
return supernova_args |
def getResponse(self, http_request, request):
"""
Processes the AMF request, returning an AMF response.
@param http_request: The underlying HTTP Request.
@type http_request: U{HTTPRequest<http://docs.djangoproject.com
/en/dev/ref/request-response/#httprequest-objects>}
@param request: The AMF Request.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
"""
response = remoting.Envelope(request.amfVersion)
for name, message in request:
http_request.amf_request = message
processor = self.getProcessor(message)
response[name] = processor(message, http_request=http_request)
return response | def function[getResponse, parameter[self, http_request, request]]:
constant[
Processes the AMF request, returning an AMF response.
@param http_request: The underlying HTTP Request.
@type http_request: U{HTTPRequest<http://docs.djangoproject.com
/en/dev/ref/request-response/#httprequest-objects>}
@param request: The AMF Request.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
]
variable[response] assign[=] call[name[remoting].Envelope, parameter[name[request].amfVersion]]
for taget[tuple[[<ast.Name object at 0x7da1b137cd00>, <ast.Name object at 0x7da1b137ece0>]]] in starred[name[request]] begin[:]
name[http_request].amf_request assign[=] name[message]
variable[processor] assign[=] call[name[self].getProcessor, parameter[name[message]]]
call[name[response]][name[name]] assign[=] call[name[processor], parameter[name[message]]]
return[name[response]] | keyword[def] identifier[getResponse] ( identifier[self] , identifier[http_request] , identifier[request] ):
literal[string]
identifier[response] = identifier[remoting] . identifier[Envelope] ( identifier[request] . identifier[amfVersion] )
keyword[for] identifier[name] , identifier[message] keyword[in] identifier[request] :
identifier[http_request] . identifier[amf_request] = identifier[message]
identifier[processor] = identifier[self] . identifier[getProcessor] ( identifier[message] )
identifier[response] [ identifier[name] ]= identifier[processor] ( identifier[message] , identifier[http_request] = identifier[http_request] )
keyword[return] identifier[response] | def getResponse(self, http_request, request):
"""
Processes the AMF request, returning an AMF response.
@param http_request: The underlying HTTP Request.
@type http_request: U{HTTPRequest<http://docs.djangoproject.com
/en/dev/ref/request-response/#httprequest-objects>}
@param request: The AMF Request.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
"""
response = remoting.Envelope(request.amfVersion)
for (name, message) in request:
http_request.amf_request = message
processor = self.getProcessor(message)
response[name] = processor(message, http_request=http_request) # depends on [control=['for'], data=[]]
return response |
def _generate_union_tag_access_signatures(self, union):
"""Emits the is<TAG_NAME> methods and tagName method signatures for
determining tag state and retrieving human-readable value of tag
state, respectively."""
for field in union.all_fields:
self.emit(comment_prefix)
base_str = 'Retrieves whether the union\'s current tag state has value "{}".'
self.emit_wrapped_text(
base_str.format(field.name), prefix=comment_prefix)
self.emit(comment_prefix)
if not is_void_type(field.data_type):
warning_str = (
'@note Call this method and ensure it returns true before '
'accessing the `{}` property, otherwise a runtime exception '
'will be thrown.')
self.emit_wrapped_text(
warning_str.format(fmt_var(field.name)),
prefix=comment_prefix)
self.emit(comment_prefix)
base_str = '@return Whether the union\'s current tag state has value "{}".'
self.emit_wrapped_text(
base_str.format(field.name), prefix=comment_prefix)
self.emit(comment_prefix)
is_tag_signature = fmt_signature(
func='is{}'.format(fmt_camel_upper(field.name)),
args=[],
return_type='BOOL')
self.emit('{};'.format(is_tag_signature))
self.emit()
get_tag_name_signature = fmt_signature(
func='tagName', args=None, return_type='NSString *')
self.emit(comment_prefix)
self.emit_wrapped_text(
"Retrieves string value of union's current tag state.",
prefix=comment_prefix)
self.emit(comment_prefix)
base_str = "@return A human-readable string representing the union's current tag state."
self.emit_wrapped_text(base_str, prefix=comment_prefix)
self.emit(comment_prefix)
self.emit('{};'.format(get_tag_name_signature))
self.emit() | def function[_generate_union_tag_access_signatures, parameter[self, union]]:
constant[Emits the is<TAG_NAME> methods and tagName method signatures for
determining tag state and retrieving human-readable value of tag
state, respectively.]
for taget[name[field]] in starred[name[union].all_fields] begin[:]
call[name[self].emit, parameter[name[comment_prefix]]]
variable[base_str] assign[=] constant[Retrieves whether the union's current tag state has value "{}".]
call[name[self].emit_wrapped_text, parameter[call[name[base_str].format, parameter[name[field].name]]]]
call[name[self].emit, parameter[name[comment_prefix]]]
if <ast.UnaryOp object at 0x7da204963cd0> begin[:]
variable[warning_str] assign[=] constant[@note Call this method and ensure it returns true before accessing the `{}` property, otherwise a runtime exception will be thrown.]
call[name[self].emit_wrapped_text, parameter[call[name[warning_str].format, parameter[call[name[fmt_var], parameter[name[field].name]]]]]]
call[name[self].emit, parameter[name[comment_prefix]]]
variable[base_str] assign[=] constant[@return Whether the union's current tag state has value "{}".]
call[name[self].emit_wrapped_text, parameter[call[name[base_str].format, parameter[name[field].name]]]]
call[name[self].emit, parameter[name[comment_prefix]]]
variable[is_tag_signature] assign[=] call[name[fmt_signature], parameter[]]
call[name[self].emit, parameter[call[constant[{};].format, parameter[name[is_tag_signature]]]]]
call[name[self].emit, parameter[]]
variable[get_tag_name_signature] assign[=] call[name[fmt_signature], parameter[]]
call[name[self].emit, parameter[name[comment_prefix]]]
call[name[self].emit_wrapped_text, parameter[constant[Retrieves string value of union's current tag state.]]]
call[name[self].emit, parameter[name[comment_prefix]]]
variable[base_str] assign[=] constant[@return A human-readable string representing the union's current tag state.]
call[name[self].emit_wrapped_text, parameter[name[base_str]]]
call[name[self].emit, parameter[name[comment_prefix]]]
call[name[self].emit, parameter[call[constant[{};].format, parameter[name[get_tag_name_signature]]]]]
call[name[self].emit, parameter[]] | keyword[def] identifier[_generate_union_tag_access_signatures] ( identifier[self] , identifier[union] ):
literal[string]
keyword[for] identifier[field] keyword[in] identifier[union] . identifier[all_fields] :
identifier[self] . identifier[emit] ( identifier[comment_prefix] )
identifier[base_str] = literal[string]
identifier[self] . identifier[emit_wrapped_text] (
identifier[base_str] . identifier[format] ( identifier[field] . identifier[name] ), identifier[prefix] = identifier[comment_prefix] )
identifier[self] . identifier[emit] ( identifier[comment_prefix] )
keyword[if] keyword[not] identifier[is_void_type] ( identifier[field] . identifier[data_type] ):
identifier[warning_str] =(
literal[string]
literal[string]
literal[string] )
identifier[self] . identifier[emit_wrapped_text] (
identifier[warning_str] . identifier[format] ( identifier[fmt_var] ( identifier[field] . identifier[name] )),
identifier[prefix] = identifier[comment_prefix] )
identifier[self] . identifier[emit] ( identifier[comment_prefix] )
identifier[base_str] = literal[string]
identifier[self] . identifier[emit_wrapped_text] (
identifier[base_str] . identifier[format] ( identifier[field] . identifier[name] ), identifier[prefix] = identifier[comment_prefix] )
identifier[self] . identifier[emit] ( identifier[comment_prefix] )
identifier[is_tag_signature] = identifier[fmt_signature] (
identifier[func] = literal[string] . identifier[format] ( identifier[fmt_camel_upper] ( identifier[field] . identifier[name] )),
identifier[args] =[],
identifier[return_type] = literal[string] )
identifier[self] . identifier[emit] ( literal[string] . identifier[format] ( identifier[is_tag_signature] ))
identifier[self] . identifier[emit] ()
identifier[get_tag_name_signature] = identifier[fmt_signature] (
identifier[func] = literal[string] , identifier[args] = keyword[None] , identifier[return_type] = literal[string] )
identifier[self] . identifier[emit] ( identifier[comment_prefix] )
identifier[self] . identifier[emit_wrapped_text] (
literal[string] ,
identifier[prefix] = identifier[comment_prefix] )
identifier[self] . identifier[emit] ( identifier[comment_prefix] )
identifier[base_str] = literal[string]
identifier[self] . identifier[emit_wrapped_text] ( identifier[base_str] , identifier[prefix] = identifier[comment_prefix] )
identifier[self] . identifier[emit] ( identifier[comment_prefix] )
identifier[self] . identifier[emit] ( literal[string] . identifier[format] ( identifier[get_tag_name_signature] ))
identifier[self] . identifier[emit] () | def _generate_union_tag_access_signatures(self, union):
"""Emits the is<TAG_NAME> methods and tagName method signatures for
determining tag state and retrieving human-readable value of tag
state, respectively."""
for field in union.all_fields:
self.emit(comment_prefix)
base_str = 'Retrieves whether the union\'s current tag state has value "{}".'
self.emit_wrapped_text(base_str.format(field.name), prefix=comment_prefix)
self.emit(comment_prefix)
if not is_void_type(field.data_type):
warning_str = '@note Call this method and ensure it returns true before accessing the `{}` property, otherwise a runtime exception will be thrown.'
self.emit_wrapped_text(warning_str.format(fmt_var(field.name)), prefix=comment_prefix)
self.emit(comment_prefix) # depends on [control=['if'], data=[]]
base_str = '@return Whether the union\'s current tag state has value "{}".'
self.emit_wrapped_text(base_str.format(field.name), prefix=comment_prefix)
self.emit(comment_prefix)
is_tag_signature = fmt_signature(func='is{}'.format(fmt_camel_upper(field.name)), args=[], return_type='BOOL')
self.emit('{};'.format(is_tag_signature))
self.emit() # depends on [control=['for'], data=['field']]
get_tag_name_signature = fmt_signature(func='tagName', args=None, return_type='NSString *')
self.emit(comment_prefix)
self.emit_wrapped_text("Retrieves string value of union's current tag state.", prefix=comment_prefix)
self.emit(comment_prefix)
base_str = "@return A human-readable string representing the union's current tag state."
self.emit_wrapped_text(base_str, prefix=comment_prefix)
self.emit(comment_prefix)
self.emit('{};'.format(get_tag_name_signature))
self.emit() |
def _pcca_connected(P, n, return_rot=False):
"""
PCCA+ spectral clustering method with optimized memberships [1]_
Clusters the first n_cluster eigenvectors of a transition matrix in order to cluster the states.
This function assumes that the transition matrix is fully connected.
Parameters
----------
P : ndarray (n,n)
Transition matrix.
n : int
Number of clusters to group to.
Returns
-------
chi by default, or (chi,rot) if return_rot = True
chi : ndarray (n x m)
A matrix containing the probability or membership of each state to be assigned to each cluster.
The rows sum to 1.
rot_mat : ndarray (m x m)
A rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships, i.e.:
chi = np.dot(evec, rot_matrix
References
----------
[1] S. Roeblitz and M. Weber, Fuzzy spectral clustering by PCCA+:
application to Markov state models and data classification.
Adv Data Anal Classif 7, 147-179 (2013).
"""
# test connectivity
from msmtools.estimation import connected_sets
labels = connected_sets(P)
n_components = len(labels) # (n_components, labels) = connected_components(P, connection='strong')
if (n_components > 1):
raise ValueError("Transition matrix is disconnected. Cannot use pcca_connected.")
from msmtools.analysis import stationary_distribution
pi = stationary_distribution(P)
# print "statdist = ",pi
from msmtools.analysis import is_reversible
if not is_reversible(P, mu=pi):
raise ValueError("Transition matrix does not fulfill detailed balance. "
"Make sure to call pcca with a reversible transition matrix estimate")
# TODO: Susanna mentioned that she has a potential fix for nonreversible matrices by replacing each complex conjugate
# pair by the real and imaginary components of one of the two vectors. We could use this but would then need to
# orthonormalize all eigenvectors e.g. using Gram-Schmidt orthonormalization. Currently there is no theoretical
# foundation for this, so I'll skip it for now.
# right eigenvectors, ordered
from msmtools.analysis import eigenvectors
evecs = eigenvectors(P, n)
# orthonormalize
for i in range(n):
evecs[:, i] /= math.sqrt(np.dot(evecs[:, i] * pi, evecs[:, i]))
# make first eigenvector positive
evecs[:, 0] = np.abs(evecs[:, 0])
# Is there a significant complex component?
if not np.alltrue(np.isreal(evecs)):
warnings.warn(
"The given transition matrix has complex eigenvectors, so it doesn't exactly fulfill detailed balance "
+ "forcing eigenvectors to be real and continuing. Be aware that this is not theoretically solid.")
evecs = np.real(evecs)
# create initial solution using PCCA+. This could have negative memberships
(chi, rot_matrix) = _pcca_connected_isa(evecs, n)
#print "initial chi = \n",chi
# optimize the rotation matrix with PCCA++.
rot_matrix = _opt_soft(evecs, rot_matrix, n)
# These memberships should be nonnegative
memberships = np.dot(evecs[:, :], rot_matrix)
# We might still have numerical errors. Force memberships to be in [0,1]
# print "memberships unnormalized: ",memberships
memberships = np.maximum(0.0, memberships)
memberships = np.minimum(1.0, memberships)
# print "memberships unnormalized: ",memberships
for i in range(0, np.shape(memberships)[0]):
memberships[i] /= np.sum(memberships[i])
# print "final chi = \n",chi
return memberships | def function[_pcca_connected, parameter[P, n, return_rot]]:
constant[
PCCA+ spectral clustering method with optimized memberships [1]_
Clusters the first n_cluster eigenvectors of a transition matrix in order to cluster the states.
This function assumes that the transition matrix is fully connected.
Parameters
----------
P : ndarray (n,n)
Transition matrix.
n : int
Number of clusters to group to.
Returns
-------
chi by default, or (chi,rot) if return_rot = True
chi : ndarray (n x m)
A matrix containing the probability or membership of each state to be assigned to each cluster.
The rows sum to 1.
rot_mat : ndarray (m x m)
A rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships, i.e.:
chi = np.dot(evec, rot_matrix
References
----------
[1] S. Roeblitz and M. Weber, Fuzzy spectral clustering by PCCA+:
application to Markov state models and data classification.
Adv Data Anal Classif 7, 147-179 (2013).
]
from relative_module[msmtools.estimation] import module[connected_sets]
variable[labels] assign[=] call[name[connected_sets], parameter[name[P]]]
variable[n_components] assign[=] call[name[len], parameter[name[labels]]]
if compare[name[n_components] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b2546890>
from relative_module[msmtools.analysis] import module[stationary_distribution]
variable[pi] assign[=] call[name[stationary_distribution], parameter[name[P]]]
from relative_module[msmtools.analysis] import module[is_reversible]
if <ast.UnaryOp object at 0x7da1b2545ff0> begin[:]
<ast.Raise object at 0x7da1b2547c40>
from relative_module[msmtools.analysis] import module[eigenvectors]
variable[evecs] assign[=] call[name[eigenvectors], parameter[name[P], name[n]]]
for taget[name[i]] in starred[call[name[range], parameter[name[n]]]] begin[:]
<ast.AugAssign object at 0x7da1b25468f0>
call[name[evecs]][tuple[[<ast.Slice object at 0x7da1b25473d0>, <ast.Constant object at 0x7da1b2545cc0>]]] assign[=] call[name[np].abs, parameter[call[name[evecs]][tuple[[<ast.Slice object at 0x7da1b25455d0>, <ast.Constant object at 0x7da1b2545810>]]]]]
if <ast.UnaryOp object at 0x7da1b2547e80> begin[:]
call[name[warnings].warn, parameter[binary_operation[constant[The given transition matrix has complex eigenvectors, so it doesn't exactly fulfill detailed balance ] + constant[forcing eigenvectors to be real and continuing. Be aware that this is not theoretically solid.]]]]
variable[evecs] assign[=] call[name[np].real, parameter[name[evecs]]]
<ast.Tuple object at 0x7da1b2547af0> assign[=] call[name[_pcca_connected_isa], parameter[name[evecs], name[n]]]
variable[rot_matrix] assign[=] call[name[_opt_soft], parameter[name[evecs], name[rot_matrix], name[n]]]
variable[memberships] assign[=] call[name[np].dot, parameter[call[name[evecs]][tuple[[<ast.Slice object at 0x7da1b2546920>, <ast.Slice object at 0x7da1b2546260>]]], name[rot_matrix]]]
variable[memberships] assign[=] call[name[np].maximum, parameter[constant[0.0], name[memberships]]]
variable[memberships] assign[=] call[name[np].minimum, parameter[constant[1.0], name[memberships]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[call[name[np].shape, parameter[name[memberships]]]][constant[0]]]]] begin[:]
<ast.AugAssign object at 0x7da1b2547820>
return[name[memberships]] | keyword[def] identifier[_pcca_connected] ( identifier[P] , identifier[n] , identifier[return_rot] = keyword[False] ):
literal[string]
keyword[from] identifier[msmtools] . identifier[estimation] keyword[import] identifier[connected_sets]
identifier[labels] = identifier[connected_sets] ( identifier[P] )
identifier[n_components] = identifier[len] ( identifier[labels] )
keyword[if] ( identifier[n_components] > literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[from] identifier[msmtools] . identifier[analysis] keyword[import] identifier[stationary_distribution]
identifier[pi] = identifier[stationary_distribution] ( identifier[P] )
keyword[from] identifier[msmtools] . identifier[analysis] keyword[import] identifier[is_reversible]
keyword[if] keyword[not] identifier[is_reversible] ( identifier[P] , identifier[mu] = identifier[pi] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[from] identifier[msmtools] . identifier[analysis] keyword[import] identifier[eigenvectors]
identifier[evecs] = identifier[eigenvectors] ( identifier[P] , identifier[n] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ):
identifier[evecs] [:, identifier[i] ]/= identifier[math] . identifier[sqrt] ( identifier[np] . identifier[dot] ( identifier[evecs] [:, identifier[i] ]* identifier[pi] , identifier[evecs] [:, identifier[i] ]))
identifier[evecs] [:, literal[int] ]= identifier[np] . identifier[abs] ( identifier[evecs] [:, literal[int] ])
keyword[if] keyword[not] identifier[np] . identifier[alltrue] ( identifier[np] . identifier[isreal] ( identifier[evecs] )):
identifier[warnings] . identifier[warn] (
literal[string]
+ literal[string] )
identifier[evecs] = identifier[np] . identifier[real] ( identifier[evecs] )
( identifier[chi] , identifier[rot_matrix] )= identifier[_pcca_connected_isa] ( identifier[evecs] , identifier[n] )
identifier[rot_matrix] = identifier[_opt_soft] ( identifier[evecs] , identifier[rot_matrix] , identifier[n] )
identifier[memberships] = identifier[np] . identifier[dot] ( identifier[evecs] [:,:], identifier[rot_matrix] )
identifier[memberships] = identifier[np] . identifier[maximum] ( literal[int] , identifier[memberships] )
identifier[memberships] = identifier[np] . identifier[minimum] ( literal[int] , identifier[memberships] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[np] . identifier[shape] ( identifier[memberships] )[ literal[int] ]):
identifier[memberships] [ identifier[i] ]/= identifier[np] . identifier[sum] ( identifier[memberships] [ identifier[i] ])
keyword[return] identifier[memberships] | def _pcca_connected(P, n, return_rot=False):
"""
PCCA+ spectral clustering method with optimized memberships [1]_
Clusters the first n_cluster eigenvectors of a transition matrix in order to cluster the states.
This function assumes that the transition matrix is fully connected.
Parameters
----------
P : ndarray (n,n)
Transition matrix.
n : int
Number of clusters to group to.
Returns
-------
chi by default, or (chi,rot) if return_rot = True
chi : ndarray (n x m)
A matrix containing the probability or membership of each state to be assigned to each cluster.
The rows sum to 1.
rot_mat : ndarray (m x m)
A rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships, i.e.:
chi = np.dot(evec, rot_matrix
References
----------
[1] S. Roeblitz and M. Weber, Fuzzy spectral clustering by PCCA+:
application to Markov state models and data classification.
Adv Data Anal Classif 7, 147-179 (2013).
"""
# test connectivity
from msmtools.estimation import connected_sets
labels = connected_sets(P)
n_components = len(labels) # (n_components, labels) = connected_components(P, connection='strong')
if n_components > 1:
raise ValueError('Transition matrix is disconnected. Cannot use pcca_connected.') # depends on [control=['if'], data=[]]
from msmtools.analysis import stationary_distribution
pi = stationary_distribution(P)
# print "statdist = ",pi
from msmtools.analysis import is_reversible
if not is_reversible(P, mu=pi):
raise ValueError('Transition matrix does not fulfill detailed balance. Make sure to call pcca with a reversible transition matrix estimate') # depends on [control=['if'], data=[]]
# TODO: Susanna mentioned that she has a potential fix for nonreversible matrices by replacing each complex conjugate
# pair by the real and imaginary components of one of the two vectors. We could use this but would then need to
# orthonormalize all eigenvectors e.g. using Gram-Schmidt orthonormalization. Currently there is no theoretical
# foundation for this, so I'll skip it for now.
# right eigenvectors, ordered
from msmtools.analysis import eigenvectors
evecs = eigenvectors(P, n)
# orthonormalize
for i in range(n):
evecs[:, i] /= math.sqrt(np.dot(evecs[:, i] * pi, evecs[:, i])) # depends on [control=['for'], data=['i']]
# make first eigenvector positive
evecs[:, 0] = np.abs(evecs[:, 0])
# Is there a significant complex component?
if not np.alltrue(np.isreal(evecs)):
warnings.warn("The given transition matrix has complex eigenvectors, so it doesn't exactly fulfill detailed balance " + 'forcing eigenvectors to be real and continuing. Be aware that this is not theoretically solid.') # depends on [control=['if'], data=[]]
evecs = np.real(evecs)
# create initial solution using PCCA+. This could have negative memberships
(chi, rot_matrix) = _pcca_connected_isa(evecs, n)
#print "initial chi = \n",chi
# optimize the rotation matrix with PCCA++.
rot_matrix = _opt_soft(evecs, rot_matrix, n)
# These memberships should be nonnegative
memberships = np.dot(evecs[:, :], rot_matrix)
# We might still have numerical errors. Force memberships to be in [0,1]
# print "memberships unnormalized: ",memberships
memberships = np.maximum(0.0, memberships)
memberships = np.minimum(1.0, memberships)
# print "memberships unnormalized: ",memberships
for i in range(0, np.shape(memberships)[0]):
memberships[i] /= np.sum(memberships[i]) # depends on [control=['for'], data=['i']]
# print "final chi = \n",chi
return memberships |
def petl(self, *args, **kwargs):
"""Return a PETL source object"""
import petl
t = self.resolved_url.get_resource().get_target()
if t.target_format == 'txt':
return petl.fromtext(str(t.fspath), *args, **kwargs)
elif t.target_format == 'csv':
return petl.fromcsv(str(t.fspath), *args, **kwargs)
else:
raise Exception("Can't handle") | def function[petl, parameter[self]]:
constant[Return a PETL source object]
import module[petl]
variable[t] assign[=] call[call[name[self].resolved_url.get_resource, parameter[]].get_target, parameter[]]
if compare[name[t].target_format equal[==] constant[txt]] begin[:]
return[call[name[petl].fromtext, parameter[call[name[str], parameter[name[t].fspath]], <ast.Starred object at 0x7da18f58dea0>]]] | keyword[def] identifier[petl] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[import] identifier[petl]
identifier[t] = identifier[self] . identifier[resolved_url] . identifier[get_resource] (). identifier[get_target] ()
keyword[if] identifier[t] . identifier[target_format] == literal[string] :
keyword[return] identifier[petl] . identifier[fromtext] ( identifier[str] ( identifier[t] . identifier[fspath] ),* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[t] . identifier[target_format] == literal[string] :
keyword[return] identifier[petl] . identifier[fromcsv] ( identifier[str] ( identifier[t] . identifier[fspath] ),* identifier[args] ,** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] ) | def petl(self, *args, **kwargs):
"""Return a PETL source object"""
import petl
t = self.resolved_url.get_resource().get_target()
if t.target_format == 'txt':
return petl.fromtext(str(t.fspath), *args, **kwargs) # depends on [control=['if'], data=[]]
elif t.target_format == 'csv':
return petl.fromcsv(str(t.fspath), *args, **kwargs) # depends on [control=['if'], data=[]]
else:
raise Exception("Can't handle") |
def quitter():
"""Overide the psiTurk quitter route."""
exp = experiment(session)
exp.log("Quitter route was hit.")
return Response(
dumps({"status": "success"}),
status=200,
mimetype='application/json') | def function[quitter, parameter[]]:
constant[Overide the psiTurk quitter route.]
variable[exp] assign[=] call[name[experiment], parameter[name[session]]]
call[name[exp].log, parameter[constant[Quitter route was hit.]]]
return[call[name[Response], parameter[call[name[dumps], parameter[dictionary[[<ast.Constant object at 0x7da18c4cfeb0>], [<ast.Constant object at 0x7da18c4cd7e0>]]]]]]] | keyword[def] identifier[quitter] ():
literal[string]
identifier[exp] = identifier[experiment] ( identifier[session] )
identifier[exp] . identifier[log] ( literal[string] )
keyword[return] identifier[Response] (
identifier[dumps] ({ literal[string] : literal[string] }),
identifier[status] = literal[int] ,
identifier[mimetype] = literal[string] ) | def quitter():
"""Overide the psiTurk quitter route."""
exp = experiment(session)
exp.log('Quitter route was hit.')
return Response(dumps({'status': 'success'}), status=200, mimetype='application/json') |
def repo_name(self):
"""
Returns a DataFrame of the repo names present in this project directory
:return: DataFrame
"""
ds = [[x.repo_name] for x in self.repos]
df = pd.DataFrame(ds, columns=['repository'])
return df | def function[repo_name, parameter[self]]:
constant[
Returns a DataFrame of the repo names present in this project directory
:return: DataFrame
]
variable[ds] assign[=] <ast.ListComp object at 0x7da20c6c7be0>
variable[df] assign[=] call[name[pd].DataFrame, parameter[name[ds]]]
return[name[df]] | keyword[def] identifier[repo_name] ( identifier[self] ):
literal[string]
identifier[ds] =[[ identifier[x] . identifier[repo_name] ] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[repos] ]
identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[ds] , identifier[columns] =[ literal[string] ])
keyword[return] identifier[df] | def repo_name(self):
"""
Returns a DataFrame of the repo names present in this project directory
:return: DataFrame
"""
ds = [[x.repo_name] for x in self.repos]
df = pd.DataFrame(ds, columns=['repository'])
return df |
def term_to_binary(term, compressed=False):
"""
Encode Python types into Erlang terms in binary data
"""
data_uncompressed = _term_to_binary(term)
if compressed is False:
return b_chr(_TAG_VERSION) + data_uncompressed
else:
if compressed is True:
compressed = 6
if compressed < 0 or compressed > 9:
raise InputException('compressed in [0..9]')
data_compressed = zlib.compress(data_uncompressed, compressed)
size_uncompressed = len(data_uncompressed)
if size_uncompressed > 4294967295:
raise OutputException('uint32 overflow')
return (
b_chr(_TAG_VERSION) + b_chr(_TAG_COMPRESSED_ZLIB) +
struct.pack(b'>I', size_uncompressed) + data_compressed
) | def function[term_to_binary, parameter[term, compressed]]:
constant[
Encode Python types into Erlang terms in binary data
]
variable[data_uncompressed] assign[=] call[name[_term_to_binary], parameter[name[term]]]
if compare[name[compressed] is constant[False]] begin[:]
return[binary_operation[call[name[b_chr], parameter[name[_TAG_VERSION]]] + name[data_uncompressed]]] | keyword[def] identifier[term_to_binary] ( identifier[term] , identifier[compressed] = keyword[False] ):
literal[string]
identifier[data_uncompressed] = identifier[_term_to_binary] ( identifier[term] )
keyword[if] identifier[compressed] keyword[is] keyword[False] :
keyword[return] identifier[b_chr] ( identifier[_TAG_VERSION] )+ identifier[data_uncompressed]
keyword[else] :
keyword[if] identifier[compressed] keyword[is] keyword[True] :
identifier[compressed] = literal[int]
keyword[if] identifier[compressed] < literal[int] keyword[or] identifier[compressed] > literal[int] :
keyword[raise] identifier[InputException] ( literal[string] )
identifier[data_compressed] = identifier[zlib] . identifier[compress] ( identifier[data_uncompressed] , identifier[compressed] )
identifier[size_uncompressed] = identifier[len] ( identifier[data_uncompressed] )
keyword[if] identifier[size_uncompressed] > literal[int] :
keyword[raise] identifier[OutputException] ( literal[string] )
keyword[return] (
identifier[b_chr] ( identifier[_TAG_VERSION] )+ identifier[b_chr] ( identifier[_TAG_COMPRESSED_ZLIB] )+
identifier[struct] . identifier[pack] ( literal[string] , identifier[size_uncompressed] )+ identifier[data_compressed]
) | def term_to_binary(term, compressed=False):
"""
Encode Python types into Erlang terms in binary data
"""
data_uncompressed = _term_to_binary(term)
if compressed is False:
return b_chr(_TAG_VERSION) + data_uncompressed # depends on [control=['if'], data=[]]
else:
if compressed is True:
compressed = 6 # depends on [control=['if'], data=['compressed']]
if compressed < 0 or compressed > 9:
raise InputException('compressed in [0..9]') # depends on [control=['if'], data=[]]
data_compressed = zlib.compress(data_uncompressed, compressed)
size_uncompressed = len(data_uncompressed)
if size_uncompressed > 4294967295:
raise OutputException('uint32 overflow') # depends on [control=['if'], data=[]]
return b_chr(_TAG_VERSION) + b_chr(_TAG_COMPRESSED_ZLIB) + struct.pack(b'>I', size_uncompressed) + data_compressed |
def plotline(plt, alpha, taus, style,label=""):
""" plot a line with the slope alpha """
y = [pow(tt, alpha) for tt in taus]
plt.loglog(taus, y, style,label=label) | def function[plotline, parameter[plt, alpha, taus, style, label]]:
constant[ plot a line with the slope alpha ]
variable[y] assign[=] <ast.ListComp object at 0x7da1b1503610>
call[name[plt].loglog, parameter[name[taus], name[y], name[style]]] | keyword[def] identifier[plotline] ( identifier[plt] , identifier[alpha] , identifier[taus] , identifier[style] , identifier[label] = literal[string] ):
literal[string]
identifier[y] =[ identifier[pow] ( identifier[tt] , identifier[alpha] ) keyword[for] identifier[tt] keyword[in] identifier[taus] ]
identifier[plt] . identifier[loglog] ( identifier[taus] , identifier[y] , identifier[style] , identifier[label] = identifier[label] ) | def plotline(plt, alpha, taus, style, label=''):
""" plot a line with the slope alpha """
y = [pow(tt, alpha) for tt in taus]
plt.loglog(taus, y, style, label=label) |
def load_or_create_vocab(data: str, vocab_path: Optional[str], num_words: int, word_min_count: int,
pad_to_multiple_of: Optional[int] = None) -> Vocab:
"""
If the vocabulary path is defined, the vocabulary is loaded from the path.
Otherwise, it is built from the data file. No writing to disk occurs.
"""
if vocab_path is None:
return build_from_paths(paths=[data], num_words=num_words, min_count=word_min_count,
pad_to_multiple_of=pad_to_multiple_of)
else:
return vocab_from_json(vocab_path) | def function[load_or_create_vocab, parameter[data, vocab_path, num_words, word_min_count, pad_to_multiple_of]]:
constant[
If the vocabulary path is defined, the vocabulary is loaded from the path.
Otherwise, it is built from the data file. No writing to disk occurs.
]
if compare[name[vocab_path] is constant[None]] begin[:]
return[call[name[build_from_paths], parameter[]]] | keyword[def] identifier[load_or_create_vocab] ( identifier[data] : identifier[str] , identifier[vocab_path] : identifier[Optional] [ identifier[str] ], identifier[num_words] : identifier[int] , identifier[word_min_count] : identifier[int] ,
identifier[pad_to_multiple_of] : identifier[Optional] [ identifier[int] ]= keyword[None] )-> identifier[Vocab] :
literal[string]
keyword[if] identifier[vocab_path] keyword[is] keyword[None] :
keyword[return] identifier[build_from_paths] ( identifier[paths] =[ identifier[data] ], identifier[num_words] = identifier[num_words] , identifier[min_count] = identifier[word_min_count] ,
identifier[pad_to_multiple_of] = identifier[pad_to_multiple_of] )
keyword[else] :
keyword[return] identifier[vocab_from_json] ( identifier[vocab_path] ) | def load_or_create_vocab(data: str, vocab_path: Optional[str], num_words: int, word_min_count: int, pad_to_multiple_of: Optional[int]=None) -> Vocab:
"""
If the vocabulary path is defined, the vocabulary is loaded from the path.
Otherwise, it is built from the data file. No writing to disk occurs.
"""
if vocab_path is None:
return build_from_paths(paths=[data], num_words=num_words, min_count=word_min_count, pad_to_multiple_of=pad_to_multiple_of) # depends on [control=['if'], data=[]]
else:
return vocab_from_json(vocab_path) |
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc) | def function[groupBy, parameter[self, f, numPartitions, partitionFunc]]:
constant[
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
]
return[call[call[name[self].map, parameter[<ast.Lambda object at 0x7da1b20a9a50>]].groupByKey, parameter[name[numPartitions], name[partitionFunc]]]] | keyword[def] identifier[groupBy] ( identifier[self] , identifier[f] , identifier[numPartitions] = keyword[None] , identifier[partitionFunc] = identifier[portable_hash] ):
literal[string]
keyword[return] identifier[self] . identifier[map] ( keyword[lambda] identifier[x] :( identifier[f] ( identifier[x] ), identifier[x] )). identifier[groupByKey] ( identifier[numPartitions] , identifier[partitionFunc] ) | def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc) |
def query_dqsegdb(cls, flag, *args, **kwargs):
"""Query the advanced LIGO DQSegDB for the given flag
Parameters
----------
flag : `str`
The name of the flag for which to query
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
Returns
-------
flag : `DataQualityFlag`
A new `DataQualityFlag`, with the `known` and `active` lists
filled appropriately.
"""
# parse arguments
qsegs = _parse_query_segments(args, cls.query_dqsegdb)
# get server
url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER)
# parse flag
out = cls(name=flag)
if out.ifo is None or out.tag is None:
raise ValueError("Cannot parse ifo or tag (name) for flag %r"
% flag)
# process query
for start, end in qsegs:
# handle infinities
if float(end) == +inf:
end = to_gps('now').seconds
# query
try:
data = query_segments(flag, int(start), int(end), host=url)
except HTTPError as exc:
if exc.code == 404: # if not found, annotate flag name
exc.msg += ' [{0}]'.format(flag)
raise
# read from json buffer
new = cls.read(
BytesIO(json.dumps(data).encode('utf-8')),
format='json',
)
# restrict to query segments
segl = SegmentList([Segment(start, end)])
new.known &= segl
new.active &= segl
out += new
# replace metadata
out.description = new.description
out.isgood = new.isgood
return out | def function[query_dqsegdb, parameter[cls, flag]]:
constant[Query the advanced LIGO DQSegDB for the given flag
Parameters
----------
flag : `str`
The name of the flag for which to query
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
Returns
-------
flag : `DataQualityFlag`
A new `DataQualityFlag`, with the `known` and `active` lists
filled appropriately.
]
variable[qsegs] assign[=] call[name[_parse_query_segments], parameter[name[args], name[cls].query_dqsegdb]]
variable[url] assign[=] call[name[kwargs].pop, parameter[constant[url], name[DEFAULT_SEGMENT_SERVER]]]
variable[out] assign[=] call[name[cls], parameter[]]
if <ast.BoolOp object at 0x7da204621f90> begin[:]
<ast.Raise object at 0x7da204621b10>
for taget[tuple[[<ast.Name object at 0x7da2041d9540>, <ast.Name object at 0x7da2041d8580>]]] in starred[name[qsegs]] begin[:]
if compare[call[name[float], parameter[name[end]]] equal[==] <ast.UnaryOp object at 0x7da2041d9870>] begin[:]
variable[end] assign[=] call[name[to_gps], parameter[constant[now]]].seconds
<ast.Try object at 0x7da2041db2e0>
variable[new] assign[=] call[name[cls].read, parameter[call[name[BytesIO], parameter[call[call[name[json].dumps, parameter[name[data]]].encode, parameter[constant[utf-8]]]]]]]
variable[segl] assign[=] call[name[SegmentList], parameter[list[[<ast.Call object at 0x7da204346410>]]]]
<ast.AugAssign object at 0x7da204345f00>
<ast.AugAssign object at 0x7da204344730>
<ast.AugAssign object at 0x7da204347490>
name[out].description assign[=] name[new].description
name[out].isgood assign[=] name[new].isgood
return[name[out]] | keyword[def] identifier[query_dqsegdb] ( identifier[cls] , identifier[flag] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[qsegs] = identifier[_parse_query_segments] ( identifier[args] , identifier[cls] . identifier[query_dqsegdb] )
identifier[url] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[DEFAULT_SEGMENT_SERVER] )
identifier[out] = identifier[cls] ( identifier[name] = identifier[flag] )
keyword[if] identifier[out] . identifier[ifo] keyword[is] keyword[None] keyword[or] identifier[out] . identifier[tag] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string]
% identifier[flag] )
keyword[for] identifier[start] , identifier[end] keyword[in] identifier[qsegs] :
keyword[if] identifier[float] ( identifier[end] )==+ identifier[inf] :
identifier[end] = identifier[to_gps] ( literal[string] ). identifier[seconds]
keyword[try] :
identifier[data] = identifier[query_segments] ( identifier[flag] , identifier[int] ( identifier[start] ), identifier[int] ( identifier[end] ), identifier[host] = identifier[url] )
keyword[except] identifier[HTTPError] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[code] == literal[int] :
identifier[exc] . identifier[msg] += literal[string] . identifier[format] ( identifier[flag] )
keyword[raise]
identifier[new] = identifier[cls] . identifier[read] (
identifier[BytesIO] ( identifier[json] . identifier[dumps] ( identifier[data] ). identifier[encode] ( literal[string] )),
identifier[format] = literal[string] ,
)
identifier[segl] = identifier[SegmentList] ([ identifier[Segment] ( identifier[start] , identifier[end] )])
identifier[new] . identifier[known] &= identifier[segl]
identifier[new] . identifier[active] &= identifier[segl]
identifier[out] += identifier[new]
identifier[out] . identifier[description] = identifier[new] . identifier[description]
identifier[out] . identifier[isgood] = identifier[new] . identifier[isgood]
keyword[return] identifier[out] | def query_dqsegdb(cls, flag, *args, **kwargs):
"""Query the advanced LIGO DQSegDB for the given flag
Parameters
----------
flag : `str`
The name of the flag for which to query
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
Returns
-------
flag : `DataQualityFlag`
A new `DataQualityFlag`, with the `known` and `active` lists
filled appropriately.
"""
# parse arguments
qsegs = _parse_query_segments(args, cls.query_dqsegdb)
# get server
url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER)
# parse flag
out = cls(name=flag)
if out.ifo is None or out.tag is None:
raise ValueError('Cannot parse ifo or tag (name) for flag %r' % flag) # depends on [control=['if'], data=[]]
# process query
for (start, end) in qsegs:
# handle infinities
if float(end) == +inf:
end = to_gps('now').seconds # depends on [control=['if'], data=[]]
# query
try:
data = query_segments(flag, int(start), int(end), host=url) # depends on [control=['try'], data=[]]
except HTTPError as exc:
if exc.code == 404: # if not found, annotate flag name
exc.msg += ' [{0}]'.format(flag) # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['exc']]
# read from json buffer
new = cls.read(BytesIO(json.dumps(data).encode('utf-8')), format='json')
# restrict to query segments
segl = SegmentList([Segment(start, end)])
new.known &= segl
new.active &= segl
out += new
# replace metadata
out.description = new.description
out.isgood = new.isgood # depends on [control=['for'], data=[]]
return out |
def run(self):
"""Start thread run here
"""
try:
if self.command == "pxer":
self.ipmi_method(command="pxe")
if self.status == 0 or self.status == None:
self.command = "reboot"
else:
return
self.ipmi_method(self.command)
except Exception as e:
self.error = str(e) | def function[run, parameter[self]]:
constant[Start thread run here
]
<ast.Try object at 0x7da1b0bd30a0> | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[command] == literal[string] :
identifier[self] . identifier[ipmi_method] ( identifier[command] = literal[string] )
keyword[if] identifier[self] . identifier[status] == literal[int] keyword[or] identifier[self] . identifier[status] == keyword[None] :
identifier[self] . identifier[command] = literal[string]
keyword[else] :
keyword[return]
identifier[self] . identifier[ipmi_method] ( identifier[self] . identifier[command] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[error] = identifier[str] ( identifier[e] ) | def run(self):
"""Start thread run here
"""
try:
if self.command == 'pxer':
self.ipmi_method(command='pxe')
if self.status == 0 or self.status == None:
self.command = 'reboot' # depends on [control=['if'], data=[]]
else:
return # depends on [control=['if'], data=[]]
self.ipmi_method(self.command) # depends on [control=['try'], data=[]]
except Exception as e:
self.error = str(e) # depends on [control=['except'], data=['e']] |
def color_palette(name=None, n_colors=6, desat=None):
"""Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any matplotlib palette
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
name: None, string, or sequence
Name of palette or None to return current palette. If a
sequence, input colors are used but possibly cycled and
desaturated.
n_colors : int
Number of colors in the palette. If larger than the number of
colors in the palette, they will cycle.
desat : float
Value to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette.
Examples
--------
>>> p = color_palette("muted")
>>> p = color_palette("Blues_d", 10)
>>> p = color_palette("Set1", desat=.7)
>>> import matplotlib.pyplot as plt
>>> with color_palette("husl", 8):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_palette : set the default color cycle for all plots.
axes_style : define parameters to set the style of plots
plotting_context : define parameters to scale plot elements
"""
seaborn_palettes = dict(
deep=["#4C72B0", "#55A868", "#C44E52",
"#8172B2", "#CCB974", "#64B5CD"],
muted=["#4878CF", "#6ACC65", "#D65F5F",
"#B47CC7", "#C4AD66", "#77BEDB"],
pastel=["#92C6FF", "#97F0AA", "#FF9F9A",
"#D0BBFF", "#FFFEA3", "#B0E0E6"],
bright=["#003FFF", "#03ED3A", "#E8000B",
"#8A2BE2", "#FFC400", "#00D7FF"],
dark=["#001C7F", "#017517", "#8C0900",
"#7600A1", "#B8860B", "#006374"],
colorblind=["#0072B2", "#009E73", "#D55E00",
"#CC79A7", "#F0E442", "#56B4E9"],
)
if name is None:
palette = mpl.rcParams["axes.color_cycle"]
elif not isinstance(name, string_types):
palette = name
elif name == "hls":
palette = hls_palette(n_colors)
elif name == "husl":
palette = husl_palette(n_colors)
elif name in seaborn_palettes:
palette = seaborn_palettes[name]
elif name in dir(mpl.cm):
palette = mpl_palette(name, n_colors)
elif name[:-2] in dir(mpl.cm):
palette = mpl_palette(name, n_colors)
else:
raise ValueError("%s is not a valid palette name" % name)
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError("Could not generate a palette for %s" % str(name))
return palette | def function[color_palette, parameter[name, n_colors, desat]]:
constant[Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any matplotlib palette
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
name: None, string, or sequence
Name of palette or None to return current palette. If a
sequence, input colors are used but possibly cycled and
desaturated.
n_colors : int
Number of colors in the palette. If larger than the number of
colors in the palette, they will cycle.
desat : float
Value to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette.
Examples
--------
>>> p = color_palette("muted")
>>> p = color_palette("Blues_d", 10)
>>> p = color_palette("Set1", desat=.7)
>>> import matplotlib.pyplot as plt
>>> with color_palette("husl", 8):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_palette : set the default color cycle for all plots.
axes_style : define parameters to set the style of plots
plotting_context : define parameters to scale plot elements
]
variable[seaborn_palettes] assign[=] call[name[dict], parameter[]]
if compare[name[name] is constant[None]] begin[:]
variable[palette] assign[=] call[name[mpl].rcParams][constant[axes.color_cycle]]
if compare[name[desat] is_not constant[None]] begin[:]
variable[palette] assign[=] <ast.ListComp object at 0x7da18fe90f70>
variable[pal_cycle] assign[=] call[name[cycle], parameter[name[palette]]]
variable[palette] assign[=] <ast.ListComp object at 0x7da18fe91330>
<ast.Try object at 0x7da18fe92e60>
return[name[palette]] | keyword[def] identifier[color_palette] ( identifier[name] = keyword[None] , identifier[n_colors] = literal[int] , identifier[desat] = keyword[None] ):
literal[string]
identifier[seaborn_palettes] = identifier[dict] (
identifier[deep] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ],
identifier[muted] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ],
identifier[pastel] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ],
identifier[bright] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ],
identifier[dark] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ],
identifier[colorblind] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ],
)
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[palette] = identifier[mpl] . identifier[rcParams] [ literal[string] ]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[name] , identifier[string_types] ):
identifier[palette] = identifier[name]
keyword[elif] identifier[name] == literal[string] :
identifier[palette] = identifier[hls_palette] ( identifier[n_colors] )
keyword[elif] identifier[name] == literal[string] :
identifier[palette] = identifier[husl_palette] ( identifier[n_colors] )
keyword[elif] identifier[name] keyword[in] identifier[seaborn_palettes] :
identifier[palette] = identifier[seaborn_palettes] [ identifier[name] ]
keyword[elif] identifier[name] keyword[in] identifier[dir] ( identifier[mpl] . identifier[cm] ):
identifier[palette] = identifier[mpl_palette] ( identifier[name] , identifier[n_colors] )
keyword[elif] identifier[name] [:- literal[int] ] keyword[in] identifier[dir] ( identifier[mpl] . identifier[cm] ):
identifier[palette] = identifier[mpl_palette] ( identifier[name] , identifier[n_colors] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[name] )
keyword[if] identifier[desat] keyword[is] keyword[not] keyword[None] :
identifier[palette] =[ identifier[desaturate] ( identifier[c] , identifier[desat] ) keyword[for] identifier[c] keyword[in] identifier[palette] ]
identifier[pal_cycle] = identifier[cycle] ( identifier[palette] )
identifier[palette] =[ identifier[next] ( identifier[pal_cycle] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[n_colors] )]
keyword[try] :
identifier[palette] = identifier[map] ( identifier[mpl] . identifier[colors] . identifier[colorConverter] . identifier[to_rgb] , identifier[palette] )
identifier[palette] = identifier[_ColorPalette] ( identifier[palette] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[str] ( identifier[name] ))
keyword[return] identifier[palette] | def color_palette(name=None, n_colors=6, desat=None):
"""Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any matplotlib palette
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
name: None, string, or sequence
Name of palette or None to return current palette. If a
sequence, input colors are used but possibly cycled and
desaturated.
n_colors : int
Number of colors in the palette. If larger than the number of
colors in the palette, they will cycle.
desat : float
Value to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette.
Examples
--------
>>> p = color_palette("muted")
>>> p = color_palette("Blues_d", 10)
>>> p = color_palette("Set1", desat=.7)
>>> import matplotlib.pyplot as plt
>>> with color_palette("husl", 8):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_palette : set the default color cycle for all plots.
axes_style : define parameters to set the style of plots
plotting_context : define parameters to scale plot elements
"""
seaborn_palettes = dict(deep=['#4C72B0', '#55A868', '#C44E52', '#8172B2', '#CCB974', '#64B5CD'], muted=['#4878CF', '#6ACC65', '#D65F5F', '#B47CC7', '#C4AD66', '#77BEDB'], pastel=['#92C6FF', '#97F0AA', '#FF9F9A', '#D0BBFF', '#FFFEA3', '#B0E0E6'], bright=['#003FFF', '#03ED3A', '#E8000B', '#8A2BE2', '#FFC400', '#00D7FF'], dark=['#001C7F', '#017517', '#8C0900', '#7600A1', '#B8860B', '#006374'], colorblind=['#0072B2', '#009E73', '#D55E00', '#CC79A7', '#F0E442', '#56B4E9'])
if name is None:
palette = mpl.rcParams['axes.color_cycle'] # depends on [control=['if'], data=[]]
elif not isinstance(name, string_types):
palette = name # depends on [control=['if'], data=[]]
elif name == 'hls':
palette = hls_palette(n_colors) # depends on [control=['if'], data=[]]
elif name == 'husl':
palette = husl_palette(n_colors) # depends on [control=['if'], data=[]]
elif name in seaborn_palettes:
palette = seaborn_palettes[name] # depends on [control=['if'], data=['name', 'seaborn_palettes']]
elif name in dir(mpl.cm):
palette = mpl_palette(name, n_colors) # depends on [control=['if'], data=['name']]
elif name[:-2] in dir(mpl.cm):
palette = mpl_palette(name, n_colors) # depends on [control=['if'], data=[]]
else:
raise ValueError('%s is not a valid palette name' % name)
if desat is not None:
palette = [desaturate(c, desat) for c in palette] # depends on [control=['if'], data=['desat']]
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('Could not generate a palette for %s' % str(name)) # depends on [control=['except'], data=[]]
return palette |
def validate_labels(known_classes, passed_labels, argument_name):
"""Validates the labels passed into the true_labels or pred_labels
arguments in the plot_confusion_matrix function.
Raises a ValueError exception if any of the passed labels are not in the
set of known classes or if there are duplicate labels. Otherwise returns
None.
Args:
known_classes (array-like):
The classes that are known to appear in the data.
passed_labels (array-like):
The labels that were passed in through the argument.
argument_name (str):
The name of the argument being validated.
Example:
>>> known_classes = ["A", "B", "C"]
>>> passed_labels = ["A", "B"]
>>> validate_labels(known_classes, passed_labels, "true_labels")
"""
known_classes = np.array(known_classes)
passed_labels = np.array(passed_labels)
unique_labels, unique_indexes = np.unique(passed_labels, return_index=True)
if len(passed_labels) != len(unique_labels):
indexes = np.arange(0, len(passed_labels))
duplicate_indexes = indexes[~np.in1d(indexes, unique_indexes)]
duplicate_labels = [str(x) for x in passed_labels[duplicate_indexes]]
msg = "The following duplicate labels were passed into {0}: {1}" \
.format(argument_name, ", ".join(duplicate_labels))
raise ValueError(msg)
passed_labels_absent = ~np.in1d(passed_labels, known_classes)
if np.any(passed_labels_absent):
absent_labels = [str(x) for x in passed_labels[passed_labels_absent]]
msg = ("The following labels "
"were passed into {0}, "
"but were not found in "
"labels: {1}").format(argument_name, ", ".join(absent_labels))
raise ValueError(msg)
return | def function[validate_labels, parameter[known_classes, passed_labels, argument_name]]:
constant[Validates the labels passed into the true_labels or pred_labels
arguments in the plot_confusion_matrix function.
Raises a ValueError exception if any of the passed labels are not in the
set of known classes or if there are duplicate labels. Otherwise returns
None.
Args:
known_classes (array-like):
The classes that are known to appear in the data.
passed_labels (array-like):
The labels that were passed in through the argument.
argument_name (str):
The name of the argument being validated.
Example:
>>> known_classes = ["A", "B", "C"]
>>> passed_labels = ["A", "B"]
>>> validate_labels(known_classes, passed_labels, "true_labels")
]
variable[known_classes] assign[=] call[name[np].array, parameter[name[known_classes]]]
variable[passed_labels] assign[=] call[name[np].array, parameter[name[passed_labels]]]
<ast.Tuple object at 0x7da1b17d7100> assign[=] call[name[np].unique, parameter[name[passed_labels]]]
if compare[call[name[len], parameter[name[passed_labels]]] not_equal[!=] call[name[len], parameter[name[unique_labels]]]] begin[:]
variable[indexes] assign[=] call[name[np].arange, parameter[constant[0], call[name[len], parameter[name[passed_labels]]]]]
variable[duplicate_indexes] assign[=] call[name[indexes]][<ast.UnaryOp object at 0x7da1b17d53f0>]
variable[duplicate_labels] assign[=] <ast.ListComp object at 0x7da1b17d5570>
variable[msg] assign[=] call[constant[The following duplicate labels were passed into {0}: {1}].format, parameter[name[argument_name], call[constant[, ].join, parameter[name[duplicate_labels]]]]]
<ast.Raise object at 0x7da1b180d3f0>
variable[passed_labels_absent] assign[=] <ast.UnaryOp object at 0x7da1b180d090>
if call[name[np].any, parameter[name[passed_labels_absent]]] begin[:]
variable[absent_labels] assign[=] <ast.ListComp object at 0x7da1b16472b0>
variable[msg] assign[=] call[constant[The following labels were passed into {0}, but were not found in labels: {1}].format, parameter[name[argument_name], call[constant[, ].join, parameter[name[absent_labels]]]]]
<ast.Raise object at 0x7da1b1645870>
return[None] | keyword[def] identifier[validate_labels] ( identifier[known_classes] , identifier[passed_labels] , identifier[argument_name] ):
literal[string]
identifier[known_classes] = identifier[np] . identifier[array] ( identifier[known_classes] )
identifier[passed_labels] = identifier[np] . identifier[array] ( identifier[passed_labels] )
identifier[unique_labels] , identifier[unique_indexes] = identifier[np] . identifier[unique] ( identifier[passed_labels] , identifier[return_index] = keyword[True] )
keyword[if] identifier[len] ( identifier[passed_labels] )!= identifier[len] ( identifier[unique_labels] ):
identifier[indexes] = identifier[np] . identifier[arange] ( literal[int] , identifier[len] ( identifier[passed_labels] ))
identifier[duplicate_indexes] = identifier[indexes] [~ identifier[np] . identifier[in1d] ( identifier[indexes] , identifier[unique_indexes] )]
identifier[duplicate_labels] =[ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[passed_labels] [ identifier[duplicate_indexes] ]]
identifier[msg] = literal[string] . identifier[format] ( identifier[argument_name] , literal[string] . identifier[join] ( identifier[duplicate_labels] ))
keyword[raise] identifier[ValueError] ( identifier[msg] )
identifier[passed_labels_absent] =~ identifier[np] . identifier[in1d] ( identifier[passed_labels] , identifier[known_classes] )
keyword[if] identifier[np] . identifier[any] ( identifier[passed_labels_absent] ):
identifier[absent_labels] =[ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[passed_labels] [ identifier[passed_labels_absent] ]]
identifier[msg] =( literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[argument_name] , literal[string] . identifier[join] ( identifier[absent_labels] ))
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[return] | def validate_labels(known_classes, passed_labels, argument_name):
"""Validates the labels passed into the true_labels or pred_labels
arguments in the plot_confusion_matrix function.
Raises a ValueError exception if any of the passed labels are not in the
set of known classes or if there are duplicate labels. Otherwise returns
None.
Args:
known_classes (array-like):
The classes that are known to appear in the data.
passed_labels (array-like):
The labels that were passed in through the argument.
argument_name (str):
The name of the argument being validated.
Example:
>>> known_classes = ["A", "B", "C"]
>>> passed_labels = ["A", "B"]
>>> validate_labels(known_classes, passed_labels, "true_labels")
"""
known_classes = np.array(known_classes)
passed_labels = np.array(passed_labels)
(unique_labels, unique_indexes) = np.unique(passed_labels, return_index=True)
if len(passed_labels) != len(unique_labels):
indexes = np.arange(0, len(passed_labels))
duplicate_indexes = indexes[~np.in1d(indexes, unique_indexes)]
duplicate_labels = [str(x) for x in passed_labels[duplicate_indexes]]
msg = 'The following duplicate labels were passed into {0}: {1}'.format(argument_name, ', '.join(duplicate_labels))
raise ValueError(msg) # depends on [control=['if'], data=[]]
passed_labels_absent = ~np.in1d(passed_labels, known_classes)
if np.any(passed_labels_absent):
absent_labels = [str(x) for x in passed_labels[passed_labels_absent]]
msg = 'The following labels were passed into {0}, but were not found in labels: {1}'.format(argument_name, ', '.join(absent_labels))
raise ValueError(msg) # depends on [control=['if'], data=[]]
return |
def vbkv_dumps(obj):
"""
Serialize ``obj`` to a VBKV formatted ``bytes``.
"""
data = b''.join(_binary_dump_gen(obj, alt_format=True))
checksum = crc32(data)
return b'VBKV' + struct.pack('<i', checksum) + data | def function[vbkv_dumps, parameter[obj]]:
constant[
Serialize ``obj`` to a VBKV formatted ``bytes``.
]
variable[data] assign[=] call[constant[b''].join, parameter[call[name[_binary_dump_gen], parameter[name[obj]]]]]
variable[checksum] assign[=] call[name[crc32], parameter[name[data]]]
return[binary_operation[binary_operation[constant[b'VBKV'] + call[name[struct].pack, parameter[constant[<i], name[checksum]]]] + name[data]]] | keyword[def] identifier[vbkv_dumps] ( identifier[obj] ):
literal[string]
identifier[data] = literal[string] . identifier[join] ( identifier[_binary_dump_gen] ( identifier[obj] , identifier[alt_format] = keyword[True] ))
identifier[checksum] = identifier[crc32] ( identifier[data] )
keyword[return] literal[string] + identifier[struct] . identifier[pack] ( literal[string] , identifier[checksum] )+ identifier[data] | def vbkv_dumps(obj):
"""
Serialize ``obj`` to a VBKV formatted ``bytes``.
"""
data = b''.join(_binary_dump_gen(obj, alt_format=True))
checksum = crc32(data)
return b'VBKV' + struct.pack('<i', checksum) + data |
def main(argv, stdin, stdout):
"""Execute, when user call turingmarkov."""
if len(argv) > 1 and argv[1:3] == ["compile", "markov"]:
algo = load_markov(argv, stdin)
print(algo.compile(), file=stdout)
elif len(argv) == 4 and argv[1:3] == ["run", "markov"]:
algo = load_markov(argv, stdin)
for line in stdin:
print(algo.execute(''.join(line.split())), file=stdout)
elif len(argv) > 1 and argv[1:3] == ["compile", "turing"]:
machine = load_turing(argv, stdin)
print(machine.compile(), file=stdout)
elif len(argv) == 4 and argv[1:3] == ["run", "turing"]:
machine = load_turing(argv, stdin)
for line in stdin:
print(machine.execute(line), file=stdout)
elif len(argv) == 2 and argv[1] == "test":
path = os.path.abspath(os.path.dirname(__file__))
argv[1] = path
pytest.main()
elif len(argv) == 2 and argv[1] == "version":
print("TuringMarkov", VERSION, file=stdout)
else:
print(USAGE, file=stdout)
if not (len(argv) == 2 and argv[1] == "help"):
exit(1) | def function[main, parameter[argv, stdin, stdout]]:
constant[Execute, when user call turingmarkov.]
if <ast.BoolOp object at 0x7da1b23491e0> begin[:]
variable[algo] assign[=] call[name[load_markov], parameter[name[argv], name[stdin]]]
call[name[print], parameter[call[name[algo].compile, parameter[]]]] | keyword[def] identifier[main] ( identifier[argv] , identifier[stdin] , identifier[stdout] ):
literal[string]
keyword[if] identifier[len] ( identifier[argv] )> literal[int] keyword[and] identifier[argv] [ literal[int] : literal[int] ]==[ literal[string] , literal[string] ]:
identifier[algo] = identifier[load_markov] ( identifier[argv] , identifier[stdin] )
identifier[print] ( identifier[algo] . identifier[compile] (), identifier[file] = identifier[stdout] )
keyword[elif] identifier[len] ( identifier[argv] )== literal[int] keyword[and] identifier[argv] [ literal[int] : literal[int] ]==[ literal[string] , literal[string] ]:
identifier[algo] = identifier[load_markov] ( identifier[argv] , identifier[stdin] )
keyword[for] identifier[line] keyword[in] identifier[stdin] :
identifier[print] ( identifier[algo] . identifier[execute] ( literal[string] . identifier[join] ( identifier[line] . identifier[split] ())), identifier[file] = identifier[stdout] )
keyword[elif] identifier[len] ( identifier[argv] )> literal[int] keyword[and] identifier[argv] [ literal[int] : literal[int] ]==[ literal[string] , literal[string] ]:
identifier[machine] = identifier[load_turing] ( identifier[argv] , identifier[stdin] )
identifier[print] ( identifier[machine] . identifier[compile] (), identifier[file] = identifier[stdout] )
keyword[elif] identifier[len] ( identifier[argv] )== literal[int] keyword[and] identifier[argv] [ literal[int] : literal[int] ]==[ literal[string] , literal[string] ]:
identifier[machine] = identifier[load_turing] ( identifier[argv] , identifier[stdin] )
keyword[for] identifier[line] keyword[in] identifier[stdin] :
identifier[print] ( identifier[machine] . identifier[execute] ( identifier[line] ), identifier[file] = identifier[stdout] )
keyword[elif] identifier[len] ( identifier[argv] )== literal[int] keyword[and] identifier[argv] [ literal[int] ]== literal[string] :
identifier[path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ))
identifier[argv] [ literal[int] ]= identifier[path]
identifier[pytest] . identifier[main] ()
keyword[elif] identifier[len] ( identifier[argv] )== literal[int] keyword[and] identifier[argv] [ literal[int] ]== literal[string] :
identifier[print] ( literal[string] , identifier[VERSION] , identifier[file] = identifier[stdout] )
keyword[else] :
identifier[print] ( identifier[USAGE] , identifier[file] = identifier[stdout] )
keyword[if] keyword[not] ( identifier[len] ( identifier[argv] )== literal[int] keyword[and] identifier[argv] [ literal[int] ]== literal[string] ):
identifier[exit] ( literal[int] ) | def main(argv, stdin, stdout):
"""Execute, when user call turingmarkov."""
if len(argv) > 1 and argv[1:3] == ['compile', 'markov']:
algo = load_markov(argv, stdin)
print(algo.compile(), file=stdout) # depends on [control=['if'], data=[]]
elif len(argv) == 4 and argv[1:3] == ['run', 'markov']:
algo = load_markov(argv, stdin)
for line in stdin:
print(algo.execute(''.join(line.split())), file=stdout) # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]]
elif len(argv) > 1 and argv[1:3] == ['compile', 'turing']:
machine = load_turing(argv, stdin)
print(machine.compile(), file=stdout) # depends on [control=['if'], data=[]]
elif len(argv) == 4 and argv[1:3] == ['run', 'turing']:
machine = load_turing(argv, stdin)
for line in stdin:
print(machine.execute(line), file=stdout) # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]]
elif len(argv) == 2 and argv[1] == 'test':
path = os.path.abspath(os.path.dirname(__file__))
argv[1] = path
pytest.main() # depends on [control=['if'], data=[]]
elif len(argv) == 2 and argv[1] == 'version':
print('TuringMarkov', VERSION, file=stdout) # depends on [control=['if'], data=[]]
else:
print(USAGE, file=stdout)
if not (len(argv) == 2 and argv[1] == 'help'):
exit(1) # depends on [control=['if'], data=[]] |
def get_authorization_url(self):
"""Get the authorization URL to redirect the user"""
url = self._get_oauth_url('authenticate')
query = {
'client_id': self._client_id,
'response_type': 'code',
'redirect_uri': self.callback
}
query_str = self.urlencode(query)
return url + '?' + query_str | def function[get_authorization_url, parameter[self]]:
constant[Get the authorization URL to redirect the user]
variable[url] assign[=] call[name[self]._get_oauth_url, parameter[constant[authenticate]]]
variable[query] assign[=] dictionary[[<ast.Constant object at 0x7da1b255dd50>, <ast.Constant object at 0x7da1b255c8e0>, <ast.Constant object at 0x7da1b255da50>], [<ast.Attribute object at 0x7da1b255d510>, <ast.Constant object at 0x7da1b255dba0>, <ast.Attribute object at 0x7da1b255d180>]]
variable[query_str] assign[=] call[name[self].urlencode, parameter[name[query]]]
return[binary_operation[binary_operation[name[url] + constant[?]] + name[query_str]]] | keyword[def] identifier[get_authorization_url] ( identifier[self] ):
literal[string]
identifier[url] = identifier[self] . identifier[_get_oauth_url] ( literal[string] )
identifier[query] ={
literal[string] : identifier[self] . identifier[_client_id] ,
literal[string] : literal[string] ,
literal[string] : identifier[self] . identifier[callback]
}
identifier[query_str] = identifier[self] . identifier[urlencode] ( identifier[query] )
keyword[return] identifier[url] + literal[string] + identifier[query_str] | def get_authorization_url(self):
"""Get the authorization URL to redirect the user"""
url = self._get_oauth_url('authenticate')
query = {'client_id': self._client_id, 'response_type': 'code', 'redirect_uri': self.callback}
query_str = self.urlencode(query)
return url + '?' + query_str |
def select_distinct_column(engine, *columns):
"""
Select distinct column(columns).
:returns: if single column, return list, if multiple column, return matrix.
**中文文档**
distinct语句的语法糖函数。
"""
if isinstance(columns[0], Column):
pass
elif isinstance(columns[0], (list, tuple)): # pragma: no cover
columns = columns[0]
s = select(columns).distinct()
if len(columns) == 1:
return [row[0] for row in engine.execute(s)]
else:
return [tuple(row) for row in engine.execute(s)] | def function[select_distinct_column, parameter[engine]]:
constant[
Select distinct column(columns).
:returns: if single column, return list, if multiple column, return matrix.
**中文文档**
distinct语句的语法糖函数。
]
if call[name[isinstance], parameter[call[name[columns]][constant[0]], name[Column]]] begin[:]
pass
variable[s] assign[=] call[call[name[select], parameter[name[columns]]].distinct, parameter[]]
if compare[call[name[len], parameter[name[columns]]] equal[==] constant[1]] begin[:]
return[<ast.ListComp object at 0x7da207f03610>] | keyword[def] identifier[select_distinct_column] ( identifier[engine] ,* identifier[columns] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[columns] [ literal[int] ], identifier[Column] ):
keyword[pass]
keyword[elif] identifier[isinstance] ( identifier[columns] [ literal[int] ],( identifier[list] , identifier[tuple] )):
identifier[columns] = identifier[columns] [ literal[int] ]
identifier[s] = identifier[select] ( identifier[columns] ). identifier[distinct] ()
keyword[if] identifier[len] ( identifier[columns] )== literal[int] :
keyword[return] [ identifier[row] [ literal[int] ] keyword[for] identifier[row] keyword[in] identifier[engine] . identifier[execute] ( identifier[s] )]
keyword[else] :
keyword[return] [ identifier[tuple] ( identifier[row] ) keyword[for] identifier[row] keyword[in] identifier[engine] . identifier[execute] ( identifier[s] )] | def select_distinct_column(engine, *columns):
"""
Select distinct column(columns).
:returns: if single column, return list, if multiple column, return matrix.
**中文文档**
distinct语句的语法糖函数。
"""
if isinstance(columns[0], Column):
pass # depends on [control=['if'], data=[]]
elif isinstance(columns[0], (list, tuple)): # pragma: no cover
columns = columns[0] # depends on [control=['if'], data=[]]
s = select(columns).distinct()
if len(columns) == 1:
return [row[0] for row in engine.execute(s)] # depends on [control=['if'], data=[]]
else:
return [tuple(row) for row in engine.execute(s)] |
def _get_mvar(cls, df, column, windows):
""" get moving variance
:param df: data
:param column: column to calculate
:param windows: collection of window of moving variance
:return: None
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_mvar'.format(column, window)
df[column_name] = df[column].rolling(
min_periods=1, window=window, center=False).var() | def function[_get_mvar, parameter[cls, df, column, windows]]:
constant[ get moving variance
:param df: data
:param column: column to calculate
:param windows: collection of window of moving variance
:return: None
]
variable[window] assign[=] call[name[cls].get_only_one_positive_int, parameter[name[windows]]]
variable[column_name] assign[=] call[constant[{}_{}_mvar].format, parameter[name[column], name[window]]]
call[name[df]][name[column_name]] assign[=] call[call[call[name[df]][name[column]].rolling, parameter[]].var, parameter[]] | keyword[def] identifier[_get_mvar] ( identifier[cls] , identifier[df] , identifier[column] , identifier[windows] ):
literal[string]
identifier[window] = identifier[cls] . identifier[get_only_one_positive_int] ( identifier[windows] )
identifier[column_name] = literal[string] . identifier[format] ( identifier[column] , identifier[window] )
identifier[df] [ identifier[column_name] ]= identifier[df] [ identifier[column] ]. identifier[rolling] (
identifier[min_periods] = literal[int] , identifier[window] = identifier[window] , identifier[center] = keyword[False] ). identifier[var] () | def _get_mvar(cls, df, column, windows):
""" get moving variance
:param df: data
:param column: column to calculate
:param windows: collection of window of moving variance
:return: None
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_mvar'.format(column, window)
df[column_name] = df[column].rolling(min_periods=1, window=window, center=False).var() |
def retention_policy_exists(database, name, **client_args):
'''
Check if retention policy with given name exists.
database
Name of the database for which the retention policy was
defined.
name
Name of the retention policy to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.retention_policy_exists metrics default
'''
if get_retention_policy(database, name, **client_args):
return True
return False | def function[retention_policy_exists, parameter[database, name]]:
constant[
Check if retention policy with given name exists.
database
Name of the database for which the retention policy was
defined.
name
Name of the retention policy to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.retention_policy_exists metrics default
]
if call[name[get_retention_policy], parameter[name[database], name[name]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[retention_policy_exists] ( identifier[database] , identifier[name] ,** identifier[client_args] ):
literal[string]
keyword[if] identifier[get_retention_policy] ( identifier[database] , identifier[name] ,** identifier[client_args] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def retention_policy_exists(database, name, **client_args):
"""
Check if retention policy with given name exists.
database
Name of the database for which the retention policy was
defined.
name
Name of the retention policy to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.retention_policy_exists metrics default
"""
if get_retention_policy(database, name, **client_args):
return True # depends on [control=['if'], data=[]]
return False |
def to_bel(graph, file: Optional[TextIO] = None) -> None:
"""Output the BEL graph as canonical BEL to the given file/file-like/stream.
:param BELGraph graph: the BEL Graph to output as a BEL Script
:param file: A writable file-like object. If None, defaults to standard out.
"""
for line in to_bel_lines(graph):
print(line, file=file) | def function[to_bel, parameter[graph, file]]:
constant[Output the BEL graph as canonical BEL to the given file/file-like/stream.
:param BELGraph graph: the BEL Graph to output as a BEL Script
:param file: A writable file-like object. If None, defaults to standard out.
]
for taget[name[line]] in starred[call[name[to_bel_lines], parameter[name[graph]]]] begin[:]
call[name[print], parameter[name[line]]] | keyword[def] identifier[to_bel] ( identifier[graph] , identifier[file] : identifier[Optional] [ identifier[TextIO] ]= keyword[None] )-> keyword[None] :
literal[string]
keyword[for] identifier[line] keyword[in] identifier[to_bel_lines] ( identifier[graph] ):
identifier[print] ( identifier[line] , identifier[file] = identifier[file] ) | def to_bel(graph, file: Optional[TextIO]=None) -> None:
"""Output the BEL graph as canonical BEL to the given file/file-like/stream.
:param BELGraph graph: the BEL Graph to output as a BEL Script
:param file: A writable file-like object. If None, defaults to standard out.
"""
for line in to_bel_lines(graph):
print(line, file=file) # depends on [control=['for'], data=['line']] |
def description(self):
"""Attribute that returns the plugin description from its docstring."""
lines = []
for line in self.__doc__.split('\n')[2:]:
line = line.strip()
if line:
lines.append(line)
return ' '.join(lines) | def function[description, parameter[self]]:
constant[Attribute that returns the plugin description from its docstring.]
variable[lines] assign[=] list[[]]
for taget[name[line]] in starred[call[call[name[self].__doc__.split, parameter[constant[
]]]][<ast.Slice object at 0x7da2041d9e40>]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
if name[line] begin[:]
call[name[lines].append, parameter[name[line]]]
return[call[constant[ ].join, parameter[name[lines]]]] | keyword[def] identifier[description] ( identifier[self] ):
literal[string]
identifier[lines] =[]
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[__doc__] . identifier[split] ( literal[string] )[ literal[int] :]:
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] identifier[line] :
identifier[lines] . identifier[append] ( identifier[line] )
keyword[return] literal[string] . identifier[join] ( identifier[lines] ) | def description(self):
"""Attribute that returns the plugin description from its docstring."""
lines = []
for line in self.__doc__.split('\n')[2:]:
line = line.strip()
if line:
lines.append(line) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return ' '.join(lines) |
def get_issns_for_journal(nlm_id):
"""Get a list of the ISSN numbers for a journal given its NLM ID.
Information on NLM XML DTDs is available at
https://www.nlm.nih.gov/databases/dtd/
"""
params = {'db': 'nlmcatalog',
'retmode': 'xml',
'id': nlm_id}
tree = send_request(pubmed_fetch, params)
if tree is None:
return None
issn_list = tree.findall('.//ISSN')
issn_linking = tree.findall('.//ISSNLinking')
issns = issn_list + issn_linking
# No ISSNs found!
if not issns:
return None
else:
return [issn.text for issn in issns] | def function[get_issns_for_journal, parameter[nlm_id]]:
constant[Get a list of the ISSN numbers for a journal given its NLM ID.
Information on NLM XML DTDs is available at
https://www.nlm.nih.gov/databases/dtd/
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2044c3d90>, <ast.Constant object at 0x7da2044c22f0>, <ast.Constant object at 0x7da2044c3130>], [<ast.Constant object at 0x7da2044c1030>, <ast.Constant object at 0x7da2044c1ae0>, <ast.Name object at 0x7da2044c01f0>]]
variable[tree] assign[=] call[name[send_request], parameter[name[pubmed_fetch], name[params]]]
if compare[name[tree] is constant[None]] begin[:]
return[constant[None]]
variable[issn_list] assign[=] call[name[tree].findall, parameter[constant[.//ISSN]]]
variable[issn_linking] assign[=] call[name[tree].findall, parameter[constant[.//ISSNLinking]]]
variable[issns] assign[=] binary_operation[name[issn_list] + name[issn_linking]]
if <ast.UnaryOp object at 0x7da2044c0550> begin[:]
return[constant[None]] | keyword[def] identifier[get_issns_for_journal] ( identifier[nlm_id] ):
literal[string]
identifier[params] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[nlm_id] }
identifier[tree] = identifier[send_request] ( identifier[pubmed_fetch] , identifier[params] )
keyword[if] identifier[tree] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[issn_list] = identifier[tree] . identifier[findall] ( literal[string] )
identifier[issn_linking] = identifier[tree] . identifier[findall] ( literal[string] )
identifier[issns] = identifier[issn_list] + identifier[issn_linking]
keyword[if] keyword[not] identifier[issns] :
keyword[return] keyword[None]
keyword[else] :
keyword[return] [ identifier[issn] . identifier[text] keyword[for] identifier[issn] keyword[in] identifier[issns] ] | def get_issns_for_journal(nlm_id):
"""Get a list of the ISSN numbers for a journal given its NLM ID.
Information on NLM XML DTDs is available at
https://www.nlm.nih.gov/databases/dtd/
"""
params = {'db': 'nlmcatalog', 'retmode': 'xml', 'id': nlm_id}
tree = send_request(pubmed_fetch, params)
if tree is None:
return None # depends on [control=['if'], data=[]]
issn_list = tree.findall('.//ISSN')
issn_linking = tree.findall('.//ISSNLinking')
issns = issn_list + issn_linking
# No ISSNs found!
if not issns:
return None # depends on [control=['if'], data=[]]
else:
return [issn.text for issn in issns] |
def sync(self):
"""
Synchronize the registered IP with the detected IP (if needed).
This can be expensive, mostly depending on the detector, but also
because updating the dynamic ip in itself is costly. Therefore, this
method should usually only be called on startup or when the state changes.
"""
detected_ip = self.detector.detect()
if detected_ip is None:
LOG.debug("Couldn't detect the current IP using detector %r", self.detector.names()[-1])
# we don't have a value to set it to, so don't update! Still shouldn't happen though
elif self.dns.detect() != detected_ip:
LOG.info("%s: dns IP '%s' does not match detected IP '%s', updating",
self.updater.hostname, self.dns.get_current_value(), detected_ip)
self.status = self.updater.update(detected_ip)
self.plugins.after_remote_ip_update(detected_ip, self.status)
else:
self.status = 0
LOG.debug("%s: nothing to do, dns '%s' equals detection '%s'",
self.updater.hostname,
self.dns.get_current_value(),
self.detector.get_current_value()) | def function[sync, parameter[self]]:
constant[
Synchronize the registered IP with the detected IP (if needed).
This can be expensive, mostly depending on the detector, but also
because updating the dynamic ip in itself is costly. Therefore, this
method should usually only be called on startup or when the state changes.
]
variable[detected_ip] assign[=] call[name[self].detector.detect, parameter[]]
if compare[name[detected_ip] is constant[None]] begin[:]
call[name[LOG].debug, parameter[constant[Couldn't detect the current IP using detector %r], call[call[name[self].detector.names, parameter[]]][<ast.UnaryOp object at 0x7da1b19132b0>]]] | keyword[def] identifier[sync] ( identifier[self] ):
literal[string]
identifier[detected_ip] = identifier[self] . identifier[detector] . identifier[detect] ()
keyword[if] identifier[detected_ip] keyword[is] keyword[None] :
identifier[LOG] . identifier[debug] ( literal[string] , identifier[self] . identifier[detector] . identifier[names] ()[- literal[int] ])
keyword[elif] identifier[self] . identifier[dns] . identifier[detect] ()!= identifier[detected_ip] :
identifier[LOG] . identifier[info] ( literal[string] ,
identifier[self] . identifier[updater] . identifier[hostname] , identifier[self] . identifier[dns] . identifier[get_current_value] (), identifier[detected_ip] )
identifier[self] . identifier[status] = identifier[self] . identifier[updater] . identifier[update] ( identifier[detected_ip] )
identifier[self] . identifier[plugins] . identifier[after_remote_ip_update] ( identifier[detected_ip] , identifier[self] . identifier[status] )
keyword[else] :
identifier[self] . identifier[status] = literal[int]
identifier[LOG] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[updater] . identifier[hostname] ,
identifier[self] . identifier[dns] . identifier[get_current_value] (),
identifier[self] . identifier[detector] . identifier[get_current_value] ()) | def sync(self):
"""
Synchronize the registered IP with the detected IP (if needed).
This can be expensive, mostly depending on the detector, but also
because updating the dynamic ip in itself is costly. Therefore, this
method should usually only be called on startup or when the state changes.
"""
detected_ip = self.detector.detect()
if detected_ip is None:
LOG.debug("Couldn't detect the current IP using detector %r", self.detector.names()[-1]) # depends on [control=['if'], data=[]]
# we don't have a value to set it to, so don't update! Still shouldn't happen though
elif self.dns.detect() != detected_ip:
LOG.info("%s: dns IP '%s' does not match detected IP '%s', updating", self.updater.hostname, self.dns.get_current_value(), detected_ip)
self.status = self.updater.update(detected_ip)
self.plugins.after_remote_ip_update(detected_ip, self.status) # depends on [control=['if'], data=['detected_ip']]
else:
self.status = 0
LOG.debug("%s: nothing to do, dns '%s' equals detection '%s'", self.updater.hostname, self.dns.get_current_value(), self.detector.get_current_value()) |
def initQApplication():
""" Initializes the QtWidgets.QApplication instance. Creates one if it doesn't exist.
Sets Argos specific attributes, such as the OrganizationName, so that the application
persistent settings are read/written to the correct settings file/winreg. It is therefore
important to call this function at startup. The ArgosApplication constructor does this.
Returns the application.
"""
# PyQtGraph recommends raster graphics system for OS-X.
if 'darwin' in sys.platform:
graphicsSystem = "raster" # raster, native or opengl
os.environ.setdefault('QT_GRAPHICSSYSTEM', graphicsSystem)
logger.info("Setting QT_GRAPHICSSYSTEM to: {}".format(graphicsSystem))
app = QtWidgets.QApplication(sys.argv)
initArgosApplicationSettings(app)
return app | def function[initQApplication, parameter[]]:
constant[ Initializes the QtWidgets.QApplication instance. Creates one if it doesn't exist.
Sets Argos specific attributes, such as the OrganizationName, so that the application
persistent settings are read/written to the correct settings file/winreg. It is therefore
important to call this function at startup. The ArgosApplication constructor does this.
Returns the application.
]
if compare[constant[darwin] in name[sys].platform] begin[:]
variable[graphicsSystem] assign[=] constant[raster]
call[name[os].environ.setdefault, parameter[constant[QT_GRAPHICSSYSTEM], name[graphicsSystem]]]
call[name[logger].info, parameter[call[constant[Setting QT_GRAPHICSSYSTEM to: {}].format, parameter[name[graphicsSystem]]]]]
variable[app] assign[=] call[name[QtWidgets].QApplication, parameter[name[sys].argv]]
call[name[initArgosApplicationSettings], parameter[name[app]]]
return[name[app]] | keyword[def] identifier[initQApplication] ():
literal[string]
keyword[if] literal[string] keyword[in] identifier[sys] . identifier[platform] :
identifier[graphicsSystem] = literal[string]
identifier[os] . identifier[environ] . identifier[setdefault] ( literal[string] , identifier[graphicsSystem] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[graphicsSystem] ))
identifier[app] = identifier[QtWidgets] . identifier[QApplication] ( identifier[sys] . identifier[argv] )
identifier[initArgosApplicationSettings] ( identifier[app] )
keyword[return] identifier[app] | def initQApplication():
""" Initializes the QtWidgets.QApplication instance. Creates one if it doesn't exist.
Sets Argos specific attributes, such as the OrganizationName, so that the application
persistent settings are read/written to the correct settings file/winreg. It is therefore
important to call this function at startup. The ArgosApplication constructor does this.
Returns the application.
"""
# PyQtGraph recommends raster graphics system for OS-X.
if 'darwin' in sys.platform:
graphicsSystem = 'raster' # raster, native or opengl
os.environ.setdefault('QT_GRAPHICSSYSTEM', graphicsSystem)
logger.info('Setting QT_GRAPHICSSYSTEM to: {}'.format(graphicsSystem)) # depends on [control=['if'], data=[]]
app = QtWidgets.QApplication(sys.argv)
initArgosApplicationSettings(app)
return app |
def timer_tick(self):
"""Callback executed every self.base_interval_msec to check timer
expirations.
"""
# TODO: should exceptions thrown from this be caught and ignored
self.process_timers()
delta = datetime.timedelta(milliseconds=self.base_interval_msec)
self._timeout = IOLoop.current().add_timeout(delta, self.timer_tick) | def function[timer_tick, parameter[self]]:
constant[Callback executed every self.base_interval_msec to check timer
expirations.
]
call[name[self].process_timers, parameter[]]
variable[delta] assign[=] call[name[datetime].timedelta, parameter[]]
name[self]._timeout assign[=] call[call[name[IOLoop].current, parameter[]].add_timeout, parameter[name[delta], name[self].timer_tick]] | keyword[def] identifier[timer_tick] ( identifier[self] ):
literal[string]
identifier[self] . identifier[process_timers] ()
identifier[delta] = identifier[datetime] . identifier[timedelta] ( identifier[milliseconds] = identifier[self] . identifier[base_interval_msec] )
identifier[self] . identifier[_timeout] = identifier[IOLoop] . identifier[current] (). identifier[add_timeout] ( identifier[delta] , identifier[self] . identifier[timer_tick] ) | def timer_tick(self):
"""Callback executed every self.base_interval_msec to check timer
expirations.
"""
# TODO: should exceptions thrown from this be caught and ignored
self.process_timers()
delta = datetime.timedelta(milliseconds=self.base_interval_msec)
self._timeout = IOLoop.current().add_timeout(delta, self.timer_tick) |
def _parse_seqs(self, LOS):
"""
m._parse_seqs(LOS) -- [utility] Build a matrix of counts from a list of sequences
"""
self.nseqs = len(LOS)
self.width = len(LOS[0])
for i in range(self.width):
Dc = {'A': 0, 'C': 0, 'T': 0, 'G': 0, 'N': 0}
for seq in LOS:
key = seq[i]
Dc[key] = Dc[key] + 1
del(Dc['N'])
self.counts.append(Dc) | def function[_parse_seqs, parameter[self, LOS]]:
constant[
m._parse_seqs(LOS) -- [utility] Build a matrix of counts from a list of sequences
]
name[self].nseqs assign[=] call[name[len], parameter[name[LOS]]]
name[self].width assign[=] call[name[len], parameter[call[name[LOS]][constant[0]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[self].width]]] begin[:]
variable[Dc] assign[=] dictionary[[<ast.Constant object at 0x7da1b2767970>, <ast.Constant object at 0x7da1b2767790>, <ast.Constant object at 0x7da1b2767c10>, <ast.Constant object at 0x7da1b2766a40>, <ast.Constant object at 0x7da1b2767250>], [<ast.Constant object at 0x7da1b2767490>, <ast.Constant object at 0x7da1b27649d0>, <ast.Constant object at 0x7da1b2766ec0>, <ast.Constant object at 0x7da1b27668c0>, <ast.Constant object at 0x7da1b2766950>]]
for taget[name[seq]] in starred[name[LOS]] begin[:]
variable[key] assign[=] call[name[seq]][name[i]]
call[name[Dc]][name[key]] assign[=] binary_operation[call[name[Dc]][name[key]] + constant[1]]
<ast.Delete object at 0x7da1b2765db0>
call[name[self].counts.append, parameter[name[Dc]]] | keyword[def] identifier[_parse_seqs] ( identifier[self] , identifier[LOS] ):
literal[string]
identifier[self] . identifier[nseqs] = identifier[len] ( identifier[LOS] )
identifier[self] . identifier[width] = identifier[len] ( identifier[LOS] [ literal[int] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[width] ):
identifier[Dc] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }
keyword[for] identifier[seq] keyword[in] identifier[LOS] :
identifier[key] = identifier[seq] [ identifier[i] ]
identifier[Dc] [ identifier[key] ]= identifier[Dc] [ identifier[key] ]+ literal[int]
keyword[del] ( identifier[Dc] [ literal[string] ])
identifier[self] . identifier[counts] . identifier[append] ( identifier[Dc] ) | def _parse_seqs(self, LOS):
"""
m._parse_seqs(LOS) -- [utility] Build a matrix of counts from a list of sequences
"""
self.nseqs = len(LOS)
self.width = len(LOS[0])
for i in range(self.width):
Dc = {'A': 0, 'C': 0, 'T': 0, 'G': 0, 'N': 0}
for seq in LOS:
key = seq[i]
Dc[key] = Dc[key] + 1 # depends on [control=['for'], data=['seq']]
del Dc['N']
self.counts.append(Dc) # depends on [control=['for'], data=['i']] |
def ctox(self):
"""Main method for the environment.
Parse the tox.ini config, install the dependancies and run the
commands. The output of the commands is printed.
Returns 0 if they ran successfully, 1 if there was an error
(either in setup or whilst running the commands), 2 if the build
was skipped.
"""
# TODO make this less of a hack e.g. using basepython from config
# if it exists (and use an attribute directly).
if self.name[:4] not in SUPPORTED_ENVS:
from colorama import Style
cprint(Style.BRIGHT +
"Skipping unsupported python version %s\n" % self.name,
'warn')
return 2
# TODO don't remove env if there's a dependancy mis-match
# rather "clean" it to the empty state (the hope being to keep
# the dist build around - so not all files need to be rebuilt)
# TODO extract this as a method (for readability)
if not self.env_exists() or self.reusableable():
cprint("%s create: %s" % (self.name, self.envdir))
self.create_env(force_remove=True)
cprint("%s installdeps: %s" % (self.name, ', '.join(self.deps)))
if not self.install_deps():
cprint(" deps installation failed, aborted.\n", 'err')
return 1
else:
cprint("%s cached (deps unchanged): %s" % (self.name, self.envdir))
# install the project from the zipped file
# TODO think more carefully about where it should be installed
# specifically we want to be able this to include the test files (which
# are not always unpacked when installed so as to run the tests there)
# if there are build files (e.g. cython) then tests must run where
# the build was. Also, reinstalling should not overwrite the builds
# e.g. setup.py will skip rebuilding cython files if they are unchanged
cprint("%s inst: %s" % (self.name, self.envdistdir))
if not self.install_dist():
cprint(" install failed.\n", 'err')
return 1
cprint("%s runtests" % self.name)
# return False if all commands were successfully run
# otherwise returns True if at least one command exited badly
return self.run_commands() | def function[ctox, parameter[self]]:
constant[Main method for the environment.
Parse the tox.ini config, install the dependancies and run the
commands. The output of the commands is printed.
Returns 0 if they ran successfully, 1 if there was an error
(either in setup or whilst running the commands), 2 if the build
was skipped.
]
if compare[call[name[self].name][<ast.Slice object at 0x7da20e9b38e0>] <ast.NotIn object at 0x7da2590d7190> name[SUPPORTED_ENVS]] begin[:]
from relative_module[colorama] import module[Style]
call[name[cprint], parameter[binary_operation[name[Style].BRIGHT + binary_operation[constant[Skipping unsupported python version %s
] <ast.Mod object at 0x7da2590d6920> name[self].name]], constant[warn]]]
return[constant[2]]
if <ast.BoolOp object at 0x7da18ede5870> begin[:]
call[name[cprint], parameter[binary_operation[constant[%s create: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18ede5cf0>, <ast.Attribute object at 0x7da18ede4520>]]]]]
call[name[self].create_env, parameter[]]
call[name[cprint], parameter[binary_operation[constant[%s installdeps: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18ede4c40>, <ast.Call object at 0x7da18ede78b0>]]]]]
if <ast.UnaryOp object at 0x7da18ede5690> begin[:]
call[name[cprint], parameter[constant[ deps installation failed, aborted.
], constant[err]]]
return[constant[1]]
call[name[cprint], parameter[binary_operation[constant[%s inst: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18ede7f70>, <ast.Attribute object at 0x7da18ede4a90>]]]]]
if <ast.UnaryOp object at 0x7da18ede4f40> begin[:]
call[name[cprint], parameter[constant[ install failed.
], constant[err]]]
return[constant[1]]
call[name[cprint], parameter[binary_operation[constant[%s runtests] <ast.Mod object at 0x7da2590d6920> name[self].name]]]
return[call[name[self].run_commands, parameter[]]] | keyword[def] identifier[ctox] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[name] [: literal[int] ] keyword[not] keyword[in] identifier[SUPPORTED_ENVS] :
keyword[from] identifier[colorama] keyword[import] identifier[Style]
identifier[cprint] ( identifier[Style] . identifier[BRIGHT] +
literal[string] % identifier[self] . identifier[name] ,
literal[string] )
keyword[return] literal[int]
keyword[if] keyword[not] identifier[self] . identifier[env_exists] () keyword[or] identifier[self] . identifier[reusableable] ():
identifier[cprint] ( literal[string] %( identifier[self] . identifier[name] , identifier[self] . identifier[envdir] ))
identifier[self] . identifier[create_env] ( identifier[force_remove] = keyword[True] )
identifier[cprint] ( literal[string] %( identifier[self] . identifier[name] , literal[string] . identifier[join] ( identifier[self] . identifier[deps] )))
keyword[if] keyword[not] identifier[self] . identifier[install_deps] ():
identifier[cprint] ( literal[string] , literal[string] )
keyword[return] literal[int]
keyword[else] :
identifier[cprint] ( literal[string] %( identifier[self] . identifier[name] , identifier[self] . identifier[envdir] ))
identifier[cprint] ( literal[string] %( identifier[self] . identifier[name] , identifier[self] . identifier[envdistdir] ))
keyword[if] keyword[not] identifier[self] . identifier[install_dist] ():
identifier[cprint] ( literal[string] , literal[string] )
keyword[return] literal[int]
identifier[cprint] ( literal[string] % identifier[self] . identifier[name] )
keyword[return] identifier[self] . identifier[run_commands] () | def ctox(self):
"""Main method for the environment.
Parse the tox.ini config, install the dependancies and run the
commands. The output of the commands is printed.
Returns 0 if they ran successfully, 1 if there was an error
(either in setup or whilst running the commands), 2 if the build
was skipped.
"""
# TODO make this less of a hack e.g. using basepython from config
# if it exists (and use an attribute directly).
if self.name[:4] not in SUPPORTED_ENVS:
from colorama import Style
cprint(Style.BRIGHT + 'Skipping unsupported python version %s\n' % self.name, 'warn')
return 2 # depends on [control=['if'], data=[]]
# TODO don't remove env if there's a dependancy mis-match
# rather "clean" it to the empty state (the hope being to keep
# the dist build around - so not all files need to be rebuilt)
# TODO extract this as a method (for readability)
if not self.env_exists() or self.reusableable():
cprint('%s create: %s' % (self.name, self.envdir))
self.create_env(force_remove=True)
cprint('%s installdeps: %s' % (self.name, ', '.join(self.deps)))
if not self.install_deps():
cprint(' deps installation failed, aborted.\n', 'err')
return 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
cprint('%s cached (deps unchanged): %s' % (self.name, self.envdir))
# install the project from the zipped file
# TODO think more carefully about where it should be installed
# specifically we want to be able this to include the test files (which
# are not always unpacked when installed so as to run the tests there)
# if there are build files (e.g. cython) then tests must run where
# the build was. Also, reinstalling should not overwrite the builds
# e.g. setup.py will skip rebuilding cython files if they are unchanged
cprint('%s inst: %s' % (self.name, self.envdistdir))
if not self.install_dist():
cprint(' install failed.\n', 'err')
return 1 # depends on [control=['if'], data=[]]
cprint('%s runtests' % self.name)
# return False if all commands were successfully run
# otherwise returns True if at least one command exited badly
return self.run_commands() |
def get_class_instance_key(cls, args, kwargs):
"""
Returns a unique identifier for a class instantiation.
"""
l = [id(cls)]
for arg in args:
l.append(id(arg))
l.extend((k, id(v)) for k, v in kwargs.items())
return tuple(sorted(l)) | def function[get_class_instance_key, parameter[cls, args, kwargs]]:
constant[
Returns a unique identifier for a class instantiation.
]
variable[l] assign[=] list[[<ast.Call object at 0x7da1b01c2f50>]]
for taget[name[arg]] in starred[name[args]] begin[:]
call[name[l].append, parameter[call[name[id], parameter[name[arg]]]]]
call[name[l].extend, parameter[<ast.GeneratorExp object at 0x7da1b01c32b0>]]
return[call[name[tuple], parameter[call[name[sorted], parameter[name[l]]]]]] | keyword[def] identifier[get_class_instance_key] ( identifier[cls] , identifier[args] , identifier[kwargs] ):
literal[string]
identifier[l] =[ identifier[id] ( identifier[cls] )]
keyword[for] identifier[arg] keyword[in] identifier[args] :
identifier[l] . identifier[append] ( identifier[id] ( identifier[arg] ))
identifier[l] . identifier[extend] (( identifier[k] , identifier[id] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] ())
keyword[return] identifier[tuple] ( identifier[sorted] ( identifier[l] )) | def get_class_instance_key(cls, args, kwargs):
"""
Returns a unique identifier for a class instantiation.
"""
l = [id(cls)]
for arg in args:
l.append(id(arg)) # depends on [control=['for'], data=['arg']]
l.extend(((k, id(v)) for (k, v) in kwargs.items()))
return tuple(sorted(l)) |
def normalize(num, lower=0, upper=360, b=False):
"""Normalize number to range [lower, upper) or [lower, upper].
Parameters
----------
num : float
The number to be normalized.
lower : int
Lower limit of range. Default is 0.
upper : int
Upper limit of range. Default is 360.
b : bool
Type of normalization. Default is False. See notes.
When b=True, the range must be symmetric about 0.
When b=False, the range must be symmetric about 0 or ``lower`` must
be equal to 0.
Returns
-------
n : float
A number in the range [lower, upper) or [lower, upper].
Raises
------
ValueError
If lower >= upper.
Notes
-----
If the keyword `b == False`, then the normalization is done in the
following way. Consider the numbers to be arranged in a circle,
with the lower and upper ends sitting on top of each other. Moving
past one limit, takes the number into the beginning of the other
end. For example, if range is [0 - 360), then 361 becomes 1 and 360
becomes 0. Negative numbers move from higher to lower numbers. So,
-1 normalized to [0 - 360) becomes 359.
When b=False range must be symmetric about 0 or lower=0.
If the keyword `b == True`, then the given number is considered to
"bounce" between the two limits. So, -91 normalized to [-90, 90],
becomes -89, instead of 89. In this case the range is [lower,
upper]. This code is based on the function `fmt_delta` of `TPM`.
When b=True range must be symmetric about 0.
Examples
--------
>>> normalize(-270,-180,180)
90.0
>>> import math
>>> math.degrees(normalize(-2*math.pi,-math.pi,math.pi))
0.0
>>> normalize(-180, -180, 180)
-180.0
>>> normalize(180, -180, 180)
-180.0
>>> normalize(180, -180, 180, b=True)
180.0
>>> normalize(181,-180,180)
-179.0
>>> normalize(181, -180, 180, b=True)
179.0
>>> normalize(-180,0,360)
180.0
>>> normalize(36,0,24)
12.0
>>> normalize(368.5,-180,180)
8.5
>>> normalize(-100, -90, 90)
80.0
>>> normalize(-100, -90, 90, b=True)
-80.0
>>> normalize(100, -90, 90, b=True)
80.0
>>> normalize(181, -90, 90, b=True)
-1.0
>>> normalize(270, -90, 90, b=True)
-90.0
>>> normalize(271, -90, 90, b=True)
-89.0
"""
if lower >= upper:
ValueError("lower must be lesser than upper")
if not b:
if not ((lower + upper == 0) or (lower == 0)):
raise ValueError('When b=False lower=0 or range must be symmetric about 0.')
else:
if not (lower + upper == 0):
raise ValueError('When b=True range must be symmetric about 0.')
from math import floor, ceil
# abs(num + upper) and abs(num - lower) are needed, instead of
# abs(num), since the lower and upper limits need not be 0. We need
# to add half size of the range, so that the final result is lower +
# <value> or upper - <value>, respectively.
res = num
if not b:
res = num
if num > upper or num == lower:
num = lower + abs(num + upper) % (abs(lower) + abs(upper))
if num < lower or num == upper:
num = upper - abs(num - lower) % (abs(lower) + abs(upper))
res = lower if num == upper else num
else:
total_length = abs(lower) + abs(upper)
if num < -total_length:
num += ceil(num / (-2 * total_length)) * 2 * total_length
if num > total_length:
num -= floor(num / (2 * total_length)) * 2 * total_length
if num > upper:
num = total_length - num
if num < lower:
num = -total_length - num
res = num
res *= 1.0 # Make all numbers float, to be consistent
return res | def function[normalize, parameter[num, lower, upper, b]]:
constant[Normalize number to range [lower, upper) or [lower, upper].
Parameters
----------
num : float
The number to be normalized.
lower : int
Lower limit of range. Default is 0.
upper : int
Upper limit of range. Default is 360.
b : bool
Type of normalization. Default is False. See notes.
When b=True, the range must be symmetric about 0.
When b=False, the range must be symmetric about 0 or ``lower`` must
be equal to 0.
Returns
-------
n : float
A number in the range [lower, upper) or [lower, upper].
Raises
------
ValueError
If lower >= upper.
Notes
-----
If the keyword `b == False`, then the normalization is done in the
following way. Consider the numbers to be arranged in a circle,
with the lower and upper ends sitting on top of each other. Moving
past one limit, takes the number into the beginning of the other
end. For example, if range is [0 - 360), then 361 becomes 1 and 360
becomes 0. Negative numbers move from higher to lower numbers. So,
-1 normalized to [0 - 360) becomes 359.
When b=False range must be symmetric about 0 or lower=0.
If the keyword `b == True`, then the given number is considered to
"bounce" between the two limits. So, -91 normalized to [-90, 90],
becomes -89, instead of 89. In this case the range is [lower,
upper]. This code is based on the function `fmt_delta` of `TPM`.
When b=True range must be symmetric about 0.
Examples
--------
>>> normalize(-270,-180,180)
90.0
>>> import math
>>> math.degrees(normalize(-2*math.pi,-math.pi,math.pi))
0.0
>>> normalize(-180, -180, 180)
-180.0
>>> normalize(180, -180, 180)
-180.0
>>> normalize(180, -180, 180, b=True)
180.0
>>> normalize(181,-180,180)
-179.0
>>> normalize(181, -180, 180, b=True)
179.0
>>> normalize(-180,0,360)
180.0
>>> normalize(36,0,24)
12.0
>>> normalize(368.5,-180,180)
8.5
>>> normalize(-100, -90, 90)
80.0
>>> normalize(-100, -90, 90, b=True)
-80.0
>>> normalize(100, -90, 90, b=True)
80.0
>>> normalize(181, -90, 90, b=True)
-1.0
>>> normalize(270, -90, 90, b=True)
-90.0
>>> normalize(271, -90, 90, b=True)
-89.0
]
if compare[name[lower] greater_or_equal[>=] name[upper]] begin[:]
call[name[ValueError], parameter[constant[lower must be lesser than upper]]]
if <ast.UnaryOp object at 0x7da1b23e8910> begin[:]
if <ast.UnaryOp object at 0x7da1b23eabf0> begin[:]
<ast.Raise object at 0x7da1b23e9060>
from relative_module[math] import module[floor], module[ceil]
variable[res] assign[=] name[num]
if <ast.UnaryOp object at 0x7da1b23e96c0> begin[:]
variable[res] assign[=] name[num]
if <ast.BoolOp object at 0x7da1b23e9000> begin[:]
variable[num] assign[=] binary_operation[name[lower] + binary_operation[call[name[abs], parameter[binary_operation[name[num] + name[upper]]]] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[abs], parameter[name[lower]]] + call[name[abs], parameter[name[upper]]]]]]
if <ast.BoolOp object at 0x7da1b23ea020> begin[:]
variable[num] assign[=] binary_operation[name[upper] - binary_operation[call[name[abs], parameter[binary_operation[name[num] - name[lower]]]] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[abs], parameter[name[lower]]] + call[name[abs], parameter[name[upper]]]]]]
variable[res] assign[=] <ast.IfExp object at 0x7da1b23e9750>
<ast.AugAssign object at 0x7da1b23e9c90>
return[name[res]] | keyword[def] identifier[normalize] ( identifier[num] , identifier[lower] = literal[int] , identifier[upper] = literal[int] , identifier[b] = keyword[False] ):
literal[string]
keyword[if] identifier[lower] >= identifier[upper] :
identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[b] :
keyword[if] keyword[not] (( identifier[lower] + identifier[upper] == literal[int] ) keyword[or] ( identifier[lower] == literal[int] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[if] keyword[not] ( identifier[lower] + identifier[upper] == literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[from] identifier[math] keyword[import] identifier[floor] , identifier[ceil]
identifier[res] = identifier[num]
keyword[if] keyword[not] identifier[b] :
identifier[res] = identifier[num]
keyword[if] identifier[num] > identifier[upper] keyword[or] identifier[num] == identifier[lower] :
identifier[num] = identifier[lower] + identifier[abs] ( identifier[num] + identifier[upper] )%( identifier[abs] ( identifier[lower] )+ identifier[abs] ( identifier[upper] ))
keyword[if] identifier[num] < identifier[lower] keyword[or] identifier[num] == identifier[upper] :
identifier[num] = identifier[upper] - identifier[abs] ( identifier[num] - identifier[lower] )%( identifier[abs] ( identifier[lower] )+ identifier[abs] ( identifier[upper] ))
identifier[res] = identifier[lower] keyword[if] identifier[num] == identifier[upper] keyword[else] identifier[num]
keyword[else] :
identifier[total_length] = identifier[abs] ( identifier[lower] )+ identifier[abs] ( identifier[upper] )
keyword[if] identifier[num] <- identifier[total_length] :
identifier[num] += identifier[ceil] ( identifier[num] /(- literal[int] * identifier[total_length] ))* literal[int] * identifier[total_length]
keyword[if] identifier[num] > identifier[total_length] :
identifier[num] -= identifier[floor] ( identifier[num] /( literal[int] * identifier[total_length] ))* literal[int] * identifier[total_length]
keyword[if] identifier[num] > identifier[upper] :
identifier[num] = identifier[total_length] - identifier[num]
keyword[if] identifier[num] < identifier[lower] :
identifier[num] =- identifier[total_length] - identifier[num]
identifier[res] = identifier[num]
identifier[res] *= literal[int]
keyword[return] identifier[res] | def normalize(num, lower=0, upper=360, b=False):
"""Normalize number to range [lower, upper) or [lower, upper].
Parameters
----------
num : float
The number to be normalized.
lower : int
Lower limit of range. Default is 0.
upper : int
Upper limit of range. Default is 360.
b : bool
Type of normalization. Default is False. See notes.
When b=True, the range must be symmetric about 0.
When b=False, the range must be symmetric about 0 or ``lower`` must
be equal to 0.
Returns
-------
n : float
A number in the range [lower, upper) or [lower, upper].
Raises
------
ValueError
If lower >= upper.
Notes
-----
If the keyword `b == False`, then the normalization is done in the
following way. Consider the numbers to be arranged in a circle,
with the lower and upper ends sitting on top of each other. Moving
past one limit, takes the number into the beginning of the other
end. For example, if range is [0 - 360), then 361 becomes 1 and 360
becomes 0. Negative numbers move from higher to lower numbers. So,
-1 normalized to [0 - 360) becomes 359.
When b=False range must be symmetric about 0 or lower=0.
If the keyword `b == True`, then the given number is considered to
"bounce" between the two limits. So, -91 normalized to [-90, 90],
becomes -89, instead of 89. In this case the range is [lower,
upper]. This code is based on the function `fmt_delta` of `TPM`.
When b=True range must be symmetric about 0.
Examples
--------
>>> normalize(-270,-180,180)
90.0
>>> import math
>>> math.degrees(normalize(-2*math.pi,-math.pi,math.pi))
0.0
>>> normalize(-180, -180, 180)
-180.0
>>> normalize(180, -180, 180)
-180.0
>>> normalize(180, -180, 180, b=True)
180.0
>>> normalize(181,-180,180)
-179.0
>>> normalize(181, -180, 180, b=True)
179.0
>>> normalize(-180,0,360)
180.0
>>> normalize(36,0,24)
12.0
>>> normalize(368.5,-180,180)
8.5
>>> normalize(-100, -90, 90)
80.0
>>> normalize(-100, -90, 90, b=True)
-80.0
>>> normalize(100, -90, 90, b=True)
80.0
>>> normalize(181, -90, 90, b=True)
-1.0
>>> normalize(270, -90, 90, b=True)
-90.0
>>> normalize(271, -90, 90, b=True)
-89.0
"""
if lower >= upper:
ValueError('lower must be lesser than upper') # depends on [control=['if'], data=[]]
if not b:
if not (lower + upper == 0 or lower == 0):
raise ValueError('When b=False lower=0 or range must be symmetric about 0.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not lower + upper == 0:
raise ValueError('When b=True range must be symmetric about 0.') # depends on [control=['if'], data=[]]
from math import floor, ceil
# abs(num + upper) and abs(num - lower) are needed, instead of
# abs(num), since the lower and upper limits need not be 0. We need
# to add half size of the range, so that the final result is lower +
# <value> or upper - <value>, respectively.
res = num
if not b:
res = num
if num > upper or num == lower:
num = lower + abs(num + upper) % (abs(lower) + abs(upper)) # depends on [control=['if'], data=[]]
if num < lower or num == upper:
num = upper - abs(num - lower) % (abs(lower) + abs(upper)) # depends on [control=['if'], data=[]]
res = lower if num == upper else num # depends on [control=['if'], data=[]]
else:
total_length = abs(lower) + abs(upper)
if num < -total_length:
num += ceil(num / (-2 * total_length)) * 2 * total_length # depends on [control=['if'], data=['num']]
if num > total_length:
num -= floor(num / (2 * total_length)) * 2 * total_length # depends on [control=['if'], data=['num', 'total_length']]
if num > upper:
num = total_length - num # depends on [control=['if'], data=['num']]
if num < lower:
num = -total_length - num # depends on [control=['if'], data=['num']]
res = num
res *= 1.0 # Make all numbers float, to be consistent
return res |
def bind(self, destination='', source='', routing_key='',
arguments=None):
"""Bind an Exchange.
:param str destination: Exchange name
:param str source: Exchange to bind to
:param str routing_key: The routing key to use
:param dict arguments: Bind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(destination):
raise AMQPInvalidArgument('destination should be a string')
elif not compatibility.is_string(source):
raise AMQPInvalidArgument('source should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
bind_frame = pamqp_exchange.Bind(destination=destination,
source=source,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(bind_frame) | def function[bind, parameter[self, destination, source, routing_key, arguments]]:
constant[Bind an Exchange.
:param str destination: Exchange name
:param str source: Exchange to bind to
:param str routing_key: The routing key to use
:param dict arguments: Bind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
]
if <ast.UnaryOp object at 0x7da18f722f20> begin[:]
<ast.Raise object at 0x7da18f722680>
variable[bind_frame] assign[=] call[name[pamqp_exchange].Bind, parameter[]]
return[call[name[self]._channel.rpc_request, parameter[name[bind_frame]]]] | keyword[def] identifier[bind] ( identifier[self] , identifier[destination] = literal[string] , identifier[source] = literal[string] , identifier[routing_key] = literal[string] ,
identifier[arguments] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[compatibility] . identifier[is_string] ( identifier[destination] ):
keyword[raise] identifier[AMQPInvalidArgument] ( literal[string] )
keyword[elif] keyword[not] identifier[compatibility] . identifier[is_string] ( identifier[source] ):
keyword[raise] identifier[AMQPInvalidArgument] ( literal[string] )
keyword[elif] keyword[not] identifier[compatibility] . identifier[is_string] ( identifier[routing_key] ):
keyword[raise] identifier[AMQPInvalidArgument] ( literal[string] )
keyword[elif] identifier[arguments] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[arguments] , identifier[dict] ):
keyword[raise] identifier[AMQPInvalidArgument] ( literal[string] )
identifier[bind_frame] = identifier[pamqp_exchange] . identifier[Bind] ( identifier[destination] = identifier[destination] ,
identifier[source] = identifier[source] ,
identifier[routing_key] = identifier[routing_key] ,
identifier[arguments] = identifier[arguments] )
keyword[return] identifier[self] . identifier[_channel] . identifier[rpc_request] ( identifier[bind_frame] ) | def bind(self, destination='', source='', routing_key='', arguments=None):
"""Bind an Exchange.
:param str destination: Exchange name
:param str source: Exchange to bind to
:param str routing_key: The routing key to use
:param dict arguments: Bind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(destination):
raise AMQPInvalidArgument('destination should be a string') # depends on [control=['if'], data=[]]
elif not compatibility.is_string(source):
raise AMQPInvalidArgument('source should be a string') # depends on [control=['if'], data=[]]
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string') # depends on [control=['if'], data=[]]
elif arguments is not None and (not isinstance(arguments, dict)):
raise AMQPInvalidArgument('arguments should be a dict or None') # depends on [control=['if'], data=[]]
bind_frame = pamqp_exchange.Bind(destination=destination, source=source, routing_key=routing_key, arguments=arguments)
return self._channel.rpc_request(bind_frame) |
def random_string_lower_numeric(size):
"""
Generate a random string of *size* length consisting of lowercase letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(size)) | def function[random_string_lower_numeric, parameter[size]]:
constant[
Generate a random string of *size* length consisting of lowercase letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
]
return[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da20e955c00>]]] | keyword[def] identifier[random_string_lower_numeric] ( identifier[size] ):
literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[random] . identifier[choice] ( identifier[string] . identifier[ascii_lowercase] + identifier[string] . identifier[digits] ) keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[size] )) | def random_string_lower_numeric(size):
"""
Generate a random string of *size* length consisting of lowercase letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
""" # requirements = random, string
return ''.join((random.choice(string.ascii_lowercase + string.digits) for x in range(size))) |
def naturalize_person(self, string):
"""
Attempt to make a version of the string that has the surname, if any,
at the start.
'John, Brown' to 'Brown, John'
'Sir John Brown Jr' to 'Brown, Sir John Jr'
'Prince' to 'Prince'
string -- The string to change.
"""
suffixes = [
'Jr', 'Jr.', 'Sr', 'Sr.',
'I', 'II', 'III', 'IV', 'V',
]
# Add lowercase versions:
suffixes = suffixes + [s.lower() for s in suffixes]
# If a name has a capitalised particle in we use that to sort.
# So 'Le Carre, John' but 'Carre, John le'.
particles = [
'Le', 'La',
'Von', 'Van',
'Du', 'De',
]
surname = '' # Smith
names = '' # Fred James
suffix = '' # Jr
sort_string = string
parts = string.split(' ')
if parts[-1] in suffixes:
# Remove suffixes entirely, as we'll add them back on the end.
suffix = parts[-1]
parts = parts[0:-1] # Remove suffix from parts
sort_string = ' '.join(parts)
if len(parts) > 1:
if parts[-2] in particles:
# From ['Alan', 'Barry', 'Le', 'Carré']
# to ['Alan', 'Barry', 'Le Carré']:
parts = parts[0:-2] + [ ' '.join(parts[-2:]) ]
# From 'David Foster Wallace' to 'Wallace, David Foster':
sort_string = '{}, {}'.format(parts[-1], ' '.join(parts[:-1]))
if suffix:
# Add it back on.
sort_string = '{} {}'.format(sort_string, suffix)
# In case this name has any numbers in it.
sort_string = self._naturalize_numbers(sort_string)
return sort_string | def function[naturalize_person, parameter[self, string]]:
constant[
Attempt to make a version of the string that has the surname, if any,
at the start.
'John, Brown' to 'Brown, John'
'Sir John Brown Jr' to 'Brown, Sir John Jr'
'Prince' to 'Prince'
string -- The string to change.
]
variable[suffixes] assign[=] list[[<ast.Constant object at 0x7da207f9b8e0>, <ast.Constant object at 0x7da207f99c90>, <ast.Constant object at 0x7da20e9b3160>, <ast.Constant object at 0x7da20e9b1750>, <ast.Constant object at 0x7da20e9b09a0>, <ast.Constant object at 0x7da20e9b22f0>, <ast.Constant object at 0x7da20e9b1150>, <ast.Constant object at 0x7da20e9b3c40>, <ast.Constant object at 0x7da20e9b0fd0>]]
variable[suffixes] assign[=] binary_operation[name[suffixes] + <ast.ListComp object at 0x7da20e9b2d70>]
variable[particles] assign[=] list[[<ast.Constant object at 0x7da20e9b2c80>, <ast.Constant object at 0x7da20e9b06a0>, <ast.Constant object at 0x7da20e9b39d0>, <ast.Constant object at 0x7da20e9b2680>, <ast.Constant object at 0x7da20e9b3e20>, <ast.Constant object at 0x7da20e9b1420>]]
variable[surname] assign[=] constant[]
variable[names] assign[=] constant[]
variable[suffix] assign[=] constant[]
variable[sort_string] assign[=] name[string]
variable[parts] assign[=] call[name[string].split, parameter[constant[ ]]]
if compare[call[name[parts]][<ast.UnaryOp object at 0x7da207f9af80>] in name[suffixes]] begin[:]
variable[suffix] assign[=] call[name[parts]][<ast.UnaryOp object at 0x7da207f9b640>]
variable[parts] assign[=] call[name[parts]][<ast.Slice object at 0x7da207f9af20>]
variable[sort_string] assign[=] call[constant[ ].join, parameter[name[parts]]]
if compare[call[name[len], parameter[name[parts]]] greater[>] constant[1]] begin[:]
if compare[call[name[parts]][<ast.UnaryOp object at 0x7da207f99a80>] in name[particles]] begin[:]
variable[parts] assign[=] binary_operation[call[name[parts]][<ast.Slice object at 0x7da207f9b880>] + list[[<ast.Call object at 0x7da207f9be50>]]]
variable[sort_string] assign[=] call[constant[{}, {}].format, parameter[call[name[parts]][<ast.UnaryOp object at 0x7da207f99c00>], call[constant[ ].join, parameter[call[name[parts]][<ast.Slice object at 0x7da207f99db0>]]]]]
if name[suffix] begin[:]
variable[sort_string] assign[=] call[constant[{} {}].format, parameter[name[sort_string], name[suffix]]]
variable[sort_string] assign[=] call[name[self]._naturalize_numbers, parameter[name[sort_string]]]
return[name[sort_string]] | keyword[def] identifier[naturalize_person] ( identifier[self] , identifier[string] ):
literal[string]
identifier[suffixes] =[
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
]
identifier[suffixes] = identifier[suffixes] +[ identifier[s] . identifier[lower] () keyword[for] identifier[s] keyword[in] identifier[suffixes] ]
identifier[particles] =[
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
]
identifier[surname] = literal[string]
identifier[names] = literal[string]
identifier[suffix] = literal[string]
identifier[sort_string] = identifier[string]
identifier[parts] = identifier[string] . identifier[split] ( literal[string] )
keyword[if] identifier[parts] [- literal[int] ] keyword[in] identifier[suffixes] :
identifier[suffix] = identifier[parts] [- literal[int] ]
identifier[parts] = identifier[parts] [ literal[int] :- literal[int] ]
identifier[sort_string] = literal[string] . identifier[join] ( identifier[parts] )
keyword[if] identifier[len] ( identifier[parts] )> literal[int] :
keyword[if] identifier[parts] [- literal[int] ] keyword[in] identifier[particles] :
identifier[parts] = identifier[parts] [ literal[int] :- literal[int] ]+[ literal[string] . identifier[join] ( identifier[parts] [- literal[int] :])]
identifier[sort_string] = literal[string] . identifier[format] ( identifier[parts] [- literal[int] ], literal[string] . identifier[join] ( identifier[parts] [:- literal[int] ]))
keyword[if] identifier[suffix] :
identifier[sort_string] = literal[string] . identifier[format] ( identifier[sort_string] , identifier[suffix] )
identifier[sort_string] = identifier[self] . identifier[_naturalize_numbers] ( identifier[sort_string] )
keyword[return] identifier[sort_string] | def naturalize_person(self, string):
"""
Attempt to make a version of the string that has the surname, if any,
at the start.
'John, Brown' to 'Brown, John'
'Sir John Brown Jr' to 'Brown, Sir John Jr'
'Prince' to 'Prince'
string -- The string to change.
"""
suffixes = ['Jr', 'Jr.', 'Sr', 'Sr.', 'I', 'II', 'III', 'IV', 'V']
# Add lowercase versions:
suffixes = suffixes + [s.lower() for s in suffixes]
# If a name has a capitalised particle in we use that to sort.
# So 'Le Carre, John' but 'Carre, John le'.
particles = ['Le', 'La', 'Von', 'Van', 'Du', 'De']
surname = '' # Smith
names = '' # Fred James
suffix = '' # Jr
sort_string = string
parts = string.split(' ')
if parts[-1] in suffixes:
# Remove suffixes entirely, as we'll add them back on the end.
suffix = parts[-1]
parts = parts[0:-1] # Remove suffix from parts
sort_string = ' '.join(parts) # depends on [control=['if'], data=[]]
if len(parts) > 1:
if parts[-2] in particles:
# From ['Alan', 'Barry', 'Le', 'Carré']
# to ['Alan', 'Barry', 'Le Carré']:
parts = parts[0:-2] + [' '.join(parts[-2:])] # depends on [control=['if'], data=[]]
# From 'David Foster Wallace' to 'Wallace, David Foster':
sort_string = '{}, {}'.format(parts[-1], ' '.join(parts[:-1])) # depends on [control=['if'], data=[]]
if suffix:
# Add it back on.
sort_string = '{} {}'.format(sort_string, suffix) # depends on [control=['if'], data=[]]
# In case this name has any numbers in it.
sort_string = self._naturalize_numbers(sort_string)
return sort_string |
def jaccard(seq1, seq2):
"""Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
"""
set1, set2 = set(seq1), set(seq2)
return 1 - len(set1 & set2) / float(len(set1 | set2)) | def function[jaccard, parameter[seq1, seq2]]:
constant[Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
]
<ast.Tuple object at 0x7da2054a4730> assign[=] tuple[[<ast.Call object at 0x7da2054a5390>, <ast.Call object at 0x7da2054a6470>]]
return[binary_operation[constant[1] - binary_operation[call[name[len], parameter[binary_operation[name[set1] <ast.BitAnd object at 0x7da2590d6b60> name[set2]]]] / call[name[float], parameter[call[name[len], parameter[binary_operation[name[set1] <ast.BitOr object at 0x7da2590d6aa0> name[set2]]]]]]]]] | keyword[def] identifier[jaccard] ( identifier[seq1] , identifier[seq2] ):
literal[string]
identifier[set1] , identifier[set2] = identifier[set] ( identifier[seq1] ), identifier[set] ( identifier[seq2] )
keyword[return] literal[int] - identifier[len] ( identifier[set1] & identifier[set2] )/ identifier[float] ( identifier[len] ( identifier[set1] | identifier[set2] )) | def jaccard(seq1, seq2):
"""Compute the Jaccard distance between the two sequences `seq1` and `seq2`.
They should contain hashable items.
The return value is a float between 0 and 1, where 0 means equal, and 1 totally different.
"""
(set1, set2) = (set(seq1), set(seq2))
return 1 - len(set1 & set2) / float(len(set1 | set2)) |
def schema_key_for(self, seq_no: int) -> SchemaKey:
"""
Get schema key for schema by sequence number if known, None for no such schema in cache.
:param seq_no: sequence number
:return: corresponding schema key or None
"""
LOGGER.debug('SchemaCache.schema_key_for >>> seq_no: %s', seq_no)
rv = self._seq_no2schema_key.get(seq_no, None)
LOGGER.debug('SchemaCache.schema_key_for <<< %s', rv)
return rv | def function[schema_key_for, parameter[self, seq_no]]:
constant[
Get schema key for schema by sequence number if known, None for no such schema in cache.
:param seq_no: sequence number
:return: corresponding schema key or None
]
call[name[LOGGER].debug, parameter[constant[SchemaCache.schema_key_for >>> seq_no: %s], name[seq_no]]]
variable[rv] assign[=] call[name[self]._seq_no2schema_key.get, parameter[name[seq_no], constant[None]]]
call[name[LOGGER].debug, parameter[constant[SchemaCache.schema_key_for <<< %s], name[rv]]]
return[name[rv]] | keyword[def] identifier[schema_key_for] ( identifier[self] , identifier[seq_no] : identifier[int] )-> identifier[SchemaKey] :
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[seq_no] )
identifier[rv] = identifier[self] . identifier[_seq_no2schema_key] . identifier[get] ( identifier[seq_no] , keyword[None] )
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[rv] )
keyword[return] identifier[rv] | def schema_key_for(self, seq_no: int) -> SchemaKey:
"""
Get schema key for schema by sequence number if known, None for no such schema in cache.
:param seq_no: sequence number
:return: corresponding schema key or None
"""
LOGGER.debug('SchemaCache.schema_key_for >>> seq_no: %s', seq_no)
rv = self._seq_no2schema_key.get(seq_no, None)
LOGGER.debug('SchemaCache.schema_key_for <<< %s', rv)
return rv |
def cxxRecordDecl(*args):
"""Matches C++ class declarations.
>>> from glud import *
>>> config = '''
... class W;
... template<typename T> class X {};
... struct Y {};
... union Z {};
... '''
>>> m = cxxRecordDecl()
>>> for c in walk(m, parse_string(config).cursor):
... print(c.spelling)
W
X
"""
kinds = [
CursorKind.CLASS_DECL,
CursorKind.CLASS_TEMPLATE,
]
inner = [ PredMatcher(is_kind(k)) for k in kinds ]
return allOf(anyOf(*inner), *args) | def function[cxxRecordDecl, parameter[]]:
constant[Matches C++ class declarations.
>>> from glud import *
>>> config = '''
... class W;
... template<typename T> class X {};
... struct Y {};
... union Z {};
... '''
>>> m = cxxRecordDecl()
>>> for c in walk(m, parse_string(config).cursor):
... print(c.spelling)
W
X
]
variable[kinds] assign[=] list[[<ast.Attribute object at 0x7da1b190c520>, <ast.Attribute object at 0x7da1b190eb90>]]
variable[inner] assign[=] <ast.ListComp object at 0x7da1b190e0b0>
return[call[name[allOf], parameter[call[name[anyOf], parameter[<ast.Starred object at 0x7da1b190cee0>]], <ast.Starred object at 0x7da1b190c3a0>]]] | keyword[def] identifier[cxxRecordDecl] (* identifier[args] ):
literal[string]
identifier[kinds] =[
identifier[CursorKind] . identifier[CLASS_DECL] ,
identifier[CursorKind] . identifier[CLASS_TEMPLATE] ,
]
identifier[inner] =[ identifier[PredMatcher] ( identifier[is_kind] ( identifier[k] )) keyword[for] identifier[k] keyword[in] identifier[kinds] ]
keyword[return] identifier[allOf] ( identifier[anyOf] (* identifier[inner] ),* identifier[args] ) | def cxxRecordDecl(*args):
"""Matches C++ class declarations.
>>> from glud import *
>>> config = '''
... class W;
... template<typename T> class X {};
... struct Y {};
... union Z {};
... '''
>>> m = cxxRecordDecl()
>>> for c in walk(m, parse_string(config).cursor):
... print(c.spelling)
W
X
"""
kinds = [CursorKind.CLASS_DECL, CursorKind.CLASS_TEMPLATE]
inner = [PredMatcher(is_kind(k)) for k in kinds]
return allOf(anyOf(*inner), *args) |
def write(self, filename="mesh.vtk", binary=True):
"""
Write actor's polydata in its current transformation to file.
"""
import vtkplotter.vtkio as vtkio
return vtkio.write(self, filename, binary) | def function[write, parameter[self, filename, binary]]:
constant[
Write actor's polydata in its current transformation to file.
]
import module[vtkplotter.vtkio] as alias[vtkio]
return[call[name[vtkio].write, parameter[name[self], name[filename], name[binary]]]] | keyword[def] identifier[write] ( identifier[self] , identifier[filename] = literal[string] , identifier[binary] = keyword[True] ):
literal[string]
keyword[import] identifier[vtkplotter] . identifier[vtkio] keyword[as] identifier[vtkio]
keyword[return] identifier[vtkio] . identifier[write] ( identifier[self] , identifier[filename] , identifier[binary] ) | def write(self, filename='mesh.vtk', binary=True):
"""
Write actor's polydata in its current transformation to file.
"""
import vtkplotter.vtkio as vtkio
return vtkio.write(self, filename, binary) |
def get_core(self):
"""
Extract unsatisfiable core. The result of the procedure is
stored in variable ``self.core``. If necessary, core
trimming and also heuristic core reduction is applied
depending on the command-line options. A *minimum weight*
of the core is computed and stored in ``self.minw``.
Finally, the core is divided into two parts:
1. clause selectors (``self.core_sels``),
2. sum assumptions (``self.core_sums``).
"""
# extracting the core
self.core = self.oracle.get_core()
if self.core:
# try to reduce the core by trimming
self.trim_core()
# and by heuristic minimization
self.minimize_core()
# core weight
self.minw = min(map(lambda l: self.wght[l], self.core))
# dividing the core into two parts
iter1, iter2 = itertools.tee(self.core)
self.core_sels = list(l for l in iter1 if l in self.sels_set)
self.core_sums = list(l for l in iter2 if l not in self.sels_set) | def function[get_core, parameter[self]]:
constant[
Extract unsatisfiable core. The result of the procedure is
stored in variable ``self.core``. If necessary, core
trimming and also heuristic core reduction is applied
depending on the command-line options. A *minimum weight*
of the core is computed and stored in ``self.minw``.
Finally, the core is divided into two parts:
1. clause selectors (``self.core_sels``),
2. sum assumptions (``self.core_sums``).
]
name[self].core assign[=] call[name[self].oracle.get_core, parameter[]]
if name[self].core begin[:]
call[name[self].trim_core, parameter[]]
call[name[self].minimize_core, parameter[]]
name[self].minw assign[=] call[name[min], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b11a2110>, name[self].core]]]]
<ast.Tuple object at 0x7da1b11a0e80> assign[=] call[name[itertools].tee, parameter[name[self].core]]
name[self].core_sels assign[=] call[name[list], parameter[<ast.GeneratorExp object at 0x7da1b11a13c0>]]
name[self].core_sums assign[=] call[name[list], parameter[<ast.GeneratorExp object at 0x7da1b11a0610>]] | keyword[def] identifier[get_core] ( identifier[self] ):
literal[string]
identifier[self] . identifier[core] = identifier[self] . identifier[oracle] . identifier[get_core] ()
keyword[if] identifier[self] . identifier[core] :
identifier[self] . identifier[trim_core] ()
identifier[self] . identifier[minimize_core] ()
identifier[self] . identifier[minw] = identifier[min] ( identifier[map] ( keyword[lambda] identifier[l] : identifier[self] . identifier[wght] [ identifier[l] ], identifier[self] . identifier[core] ))
identifier[iter1] , identifier[iter2] = identifier[itertools] . identifier[tee] ( identifier[self] . identifier[core] )
identifier[self] . identifier[core_sels] = identifier[list] ( identifier[l] keyword[for] identifier[l] keyword[in] identifier[iter1] keyword[if] identifier[l] keyword[in] identifier[self] . identifier[sels_set] )
identifier[self] . identifier[core_sums] = identifier[list] ( identifier[l] keyword[for] identifier[l] keyword[in] identifier[iter2] keyword[if] identifier[l] keyword[not] keyword[in] identifier[self] . identifier[sels_set] ) | def get_core(self):
"""
Extract unsatisfiable core. The result of the procedure is
stored in variable ``self.core``. If necessary, core
trimming and also heuristic core reduction is applied
depending on the command-line options. A *minimum weight*
of the core is computed and stored in ``self.minw``.
Finally, the core is divided into two parts:
1. clause selectors (``self.core_sels``),
2. sum assumptions (``self.core_sums``).
"""
# extracting the core
self.core = self.oracle.get_core()
if self.core:
# try to reduce the core by trimming
self.trim_core()
# and by heuristic minimization
self.minimize_core()
# core weight
self.minw = min(map(lambda l: self.wght[l], self.core))
# dividing the core into two parts
(iter1, iter2) = itertools.tee(self.core)
self.core_sels = list((l for l in iter1 if l in self.sels_set))
self.core_sums = list((l for l in iter2 if l not in self.sels_set)) # depends on [control=['if'], data=[]] |
def _delete_resource(name, name_param, desc, res_type, wait=0, status_param=None,
status_gone='deleted', region=None, key=None, keyid=None, profile=None,
**args):
'''
Delete a generic Elasticache resource.
'''
try:
wait = int(wait)
except Exception:
raise SaltInvocationError("Bad value ('{0}') passed for 'wait' param - must be an "
"int or boolean.".format(wait))
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if name_param in args:
log.info(
"'name: %s' param being overridden by explicitly provided '%s: %s'",
name, name_param, args[name_param]
)
name = args[name_param]
else:
args[name_param] = name
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
try:
func = 'delete_'+res_type
f = getattr(conn, func)
if wait:
func = 'describe_'+res_type+'s'
s = globals()[func]
except (AttributeError, KeyError) as e:
raise SaltInvocationError("No function '{0}()' found: {1}".format(func, e.message))
try:
f(**args)
if not wait:
log.info('%s %s deletion requested.', desc.title(), name)
return True
log.info('Waiting up to %s seconds for %s %s to be deleted.', wait, desc, name)
orig_wait = wait
while wait > 0:
r = s(name=name, conn=conn)
if not r or r[0].get(status_param) == status_gone:
log.info('%s %s deleted.', desc.title(), name)
return True
sleep = wait if wait % 60 == wait else 60
log.info('Sleeping %s seconds for %s %s to be deleted.',
sleep, desc, name)
time.sleep(sleep)
wait -= sleep
log.error('%s %s not deleted after %s seconds!', desc.title(), name, orig_wait)
return False
except botocore.exceptions.ClientError as e:
log.error('Failed to delete %s %s: %s', desc, name, e)
return False | def function[_delete_resource, parameter[name, name_param, desc, res_type, wait, status_param, status_gone, region, key, keyid, profile]]:
constant[
Delete a generic Elasticache resource.
]
<ast.Try object at 0x7da2046221d0>
variable[conn] assign[=] call[name[_get_conn], parameter[]]
if compare[name[name_param] in name[args]] begin[:]
call[name[log].info, parameter[constant['name: %s' param being overridden by explicitly provided '%s: %s'], name[name], name[name_param], call[name[args]][name[name_param]]]]
variable[name] assign[=] call[name[args]][name[name_param]]
variable[args] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da204622800>]]
<ast.Try object at 0x7da2046210f0>
<ast.Try object at 0x7da204620eb0> | keyword[def] identifier[_delete_resource] ( identifier[name] , identifier[name_param] , identifier[desc] , identifier[res_type] , identifier[wait] = literal[int] , identifier[status_param] = keyword[None] ,
identifier[status_gone] = literal[string] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ,
** identifier[args] ):
literal[string]
keyword[try] :
identifier[wait] = identifier[int] ( identifier[wait] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[SaltInvocationError] ( literal[string]
literal[string] . identifier[format] ( identifier[wait] ))
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
keyword[if] identifier[name_param] keyword[in] identifier[args] :
identifier[log] . identifier[info] (
literal[string] ,
identifier[name] , identifier[name_param] , identifier[args] [ identifier[name_param] ]
)
identifier[name] = identifier[args] [ identifier[name_param] ]
keyword[else] :
identifier[args] [ identifier[name_param] ]= identifier[name]
identifier[args] = identifier[dict] ([( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[args] . identifier[items] () keyword[if] keyword[not] identifier[k] . identifier[startswith] ( literal[string] )])
keyword[try] :
identifier[func] = literal[string] + identifier[res_type]
identifier[f] = identifier[getattr] ( identifier[conn] , identifier[func] )
keyword[if] identifier[wait] :
identifier[func] = literal[string] + identifier[res_type] + literal[string]
identifier[s] = identifier[globals] ()[ identifier[func] ]
keyword[except] ( identifier[AttributeError] , identifier[KeyError] ) keyword[as] identifier[e] :
keyword[raise] identifier[SaltInvocationError] ( literal[string] . identifier[format] ( identifier[func] , identifier[e] . identifier[message] ))
keyword[try] :
identifier[f] (** identifier[args] )
keyword[if] keyword[not] identifier[wait] :
identifier[log] . identifier[info] ( literal[string] , identifier[desc] . identifier[title] (), identifier[name] )
keyword[return] keyword[True]
identifier[log] . identifier[info] ( literal[string] , identifier[wait] , identifier[desc] , identifier[name] )
identifier[orig_wait] = identifier[wait]
keyword[while] identifier[wait] > literal[int] :
identifier[r] = identifier[s] ( identifier[name] = identifier[name] , identifier[conn] = identifier[conn] )
keyword[if] keyword[not] identifier[r] keyword[or] identifier[r] [ literal[int] ]. identifier[get] ( identifier[status_param] )== identifier[status_gone] :
identifier[log] . identifier[info] ( literal[string] , identifier[desc] . identifier[title] (), identifier[name] )
keyword[return] keyword[True]
identifier[sleep] = identifier[wait] keyword[if] identifier[wait] % literal[int] == identifier[wait] keyword[else] literal[int]
identifier[log] . identifier[info] ( literal[string] ,
identifier[sleep] , identifier[desc] , identifier[name] )
identifier[time] . identifier[sleep] ( identifier[sleep] )
identifier[wait] -= identifier[sleep]
identifier[log] . identifier[error] ( literal[string] , identifier[desc] . identifier[title] (), identifier[name] , identifier[orig_wait] )
keyword[return] keyword[False]
keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( literal[string] , identifier[desc] , identifier[name] , identifier[e] )
keyword[return] keyword[False] | def _delete_resource(name, name_param, desc, res_type, wait=0, status_param=None, status_gone='deleted', region=None, key=None, keyid=None, profile=None, **args):
"""
Delete a generic Elasticache resource.
"""
try:
wait = int(wait) # depends on [control=['try'], data=[]]
except Exception:
raise SaltInvocationError("Bad value ('{0}') passed for 'wait' param - must be an int or boolean.".format(wait)) # depends on [control=['except'], data=[]]
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if name_param in args:
log.info("'name: %s' param being overridden by explicitly provided '%s: %s'", name, name_param, args[name_param])
name = args[name_param] # depends on [control=['if'], data=['name_param', 'args']]
else:
args[name_param] = name
args = dict([(k, v) for (k, v) in args.items() if not k.startswith('_')])
try:
func = 'delete_' + res_type
f = getattr(conn, func)
if wait:
func = 'describe_' + res_type + 's'
s = globals()[func] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (AttributeError, KeyError) as e:
raise SaltInvocationError("No function '{0}()' found: {1}".format(func, e.message)) # depends on [control=['except'], data=['e']]
try:
f(**args)
if not wait:
log.info('%s %s deletion requested.', desc.title(), name)
return True # depends on [control=['if'], data=[]]
log.info('Waiting up to %s seconds for %s %s to be deleted.', wait, desc, name)
orig_wait = wait
while wait > 0:
r = s(name=name, conn=conn)
if not r or r[0].get(status_param) == status_gone:
log.info('%s %s deleted.', desc.title(), name)
return True # depends on [control=['if'], data=[]]
sleep = wait if wait % 60 == wait else 60
log.info('Sleeping %s seconds for %s %s to be deleted.', sleep, desc, name)
time.sleep(sleep)
wait -= sleep # depends on [control=['while'], data=['wait']]
log.error('%s %s not deleted after %s seconds!', desc.title(), name, orig_wait)
return False # depends on [control=['try'], data=[]]
except botocore.exceptions.ClientError as e:
log.error('Failed to delete %s %s: %s', desc, name, e)
return False # depends on [control=['except'], data=['e']] |
def get_theme(self):
"""
Gets theme settings from settings service. Falls back to default (LMS) theme
if settings service is not available, xblock theme settings are not set or does
contain mentoring theme settings.
"""
xblock_settings = self.get_xblock_settings(default={})
if xblock_settings and self.theme_key in xblock_settings:
return xblock_settings[self.theme_key]
return self.default_theme_config | def function[get_theme, parameter[self]]:
constant[
Gets theme settings from settings service. Falls back to default (LMS) theme
if settings service is not available, xblock theme settings are not set or does
contain mentoring theme settings.
]
variable[xblock_settings] assign[=] call[name[self].get_xblock_settings, parameter[]]
if <ast.BoolOp object at 0x7da1b0ef6860> begin[:]
return[call[name[xblock_settings]][name[self].theme_key]]
return[name[self].default_theme_config] | keyword[def] identifier[get_theme] ( identifier[self] ):
literal[string]
identifier[xblock_settings] = identifier[self] . identifier[get_xblock_settings] ( identifier[default] ={})
keyword[if] identifier[xblock_settings] keyword[and] identifier[self] . identifier[theme_key] keyword[in] identifier[xblock_settings] :
keyword[return] identifier[xblock_settings] [ identifier[self] . identifier[theme_key] ]
keyword[return] identifier[self] . identifier[default_theme_config] | def get_theme(self):
"""
Gets theme settings from settings service. Falls back to default (LMS) theme
if settings service is not available, xblock theme settings are not set or does
contain mentoring theme settings.
"""
xblock_settings = self.get_xblock_settings(default={})
if xblock_settings and self.theme_key in xblock_settings:
return xblock_settings[self.theme_key] # depends on [control=['if'], data=[]]
return self.default_theme_config |
def init_with_brain(self, brain):
"""Initialize with a catalog brain
"""
self._uid = api.get_uid(brain)
self._brain = brain
self._catalog = self.get_catalog_for(brain)
self._instance = None | def function[init_with_brain, parameter[self, brain]]:
constant[Initialize with a catalog brain
]
name[self]._uid assign[=] call[name[api].get_uid, parameter[name[brain]]]
name[self]._brain assign[=] name[brain]
name[self]._catalog assign[=] call[name[self].get_catalog_for, parameter[name[brain]]]
name[self]._instance assign[=] constant[None] | keyword[def] identifier[init_with_brain] ( identifier[self] , identifier[brain] ):
literal[string]
identifier[self] . identifier[_uid] = identifier[api] . identifier[get_uid] ( identifier[brain] )
identifier[self] . identifier[_brain] = identifier[brain]
identifier[self] . identifier[_catalog] = identifier[self] . identifier[get_catalog_for] ( identifier[brain] )
identifier[self] . identifier[_instance] = keyword[None] | def init_with_brain(self, brain):
"""Initialize with a catalog brain
"""
self._uid = api.get_uid(brain)
self._brain = brain
self._catalog = self.get_catalog_for(brain)
self._instance = None |
def run_display_app_output(self, out):
"""Print any App output.
Args:
out (str): One or more lines of output messages.
"""
if not self.profile.get('quiet') and not self.args.quiet:
print('App Output:')
for o in out.decode('utf-8').split('\n'):
print(' {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, o))
self.log.debug('[tcrun] App output: {}'.format(o)) | def function[run_display_app_output, parameter[self, out]]:
constant[Print any App output.
Args:
out (str): One or more lines of output messages.
]
if <ast.BoolOp object at 0x7da20c990580> begin[:]
call[name[print], parameter[constant[App Output:]]]
for taget[name[o]] in starred[call[call[name[out].decode, parameter[constant[utf-8]]].split, parameter[constant[
]]]] begin[:]
call[name[print], parameter[call[constant[ {}{}{}].format, parameter[name[c].Style.BRIGHT, name[c].Fore.CYAN, name[o]]]]]
call[name[self].log.debug, parameter[call[constant[[tcrun] App output: {}].format, parameter[name[o]]]]] | keyword[def] identifier[run_display_app_output] ( identifier[self] , identifier[out] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[profile] . identifier[get] ( literal[string] ) keyword[and] keyword[not] identifier[self] . identifier[args] . identifier[quiet] :
identifier[print] ( literal[string] )
keyword[for] identifier[o] keyword[in] identifier[out] . identifier[decode] ( literal[string] ). identifier[split] ( literal[string] ):
identifier[print] ( literal[string] . identifier[format] ( identifier[c] . identifier[Style] . identifier[BRIGHT] , identifier[c] . identifier[Fore] . identifier[CYAN] , identifier[o] ))
identifier[self] . identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[o] )) | def run_display_app_output(self, out):
"""Print any App output.
Args:
out (str): One or more lines of output messages.
"""
if not self.profile.get('quiet') and (not self.args.quiet):
print('App Output:')
for o in out.decode('utf-8').split('\n'):
print(' {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, o))
self.log.debug('[tcrun] App output: {}'.format(o)) # depends on [control=['for'], data=['o']] # depends on [control=['if'], data=[]] |
def remove(self, models):
""" Removed the passed model(s) from the selection"""
models = self._check_model_types(models)
for model in models:
if model in self._selected:
self._selected.remove(model) | def function[remove, parameter[self, models]]:
constant[ Removed the passed model(s) from the selection]
variable[models] assign[=] call[name[self]._check_model_types, parameter[name[models]]]
for taget[name[model]] in starred[name[models]] begin[:]
if compare[name[model] in name[self]._selected] begin[:]
call[name[self]._selected.remove, parameter[name[model]]] | keyword[def] identifier[remove] ( identifier[self] , identifier[models] ):
literal[string]
identifier[models] = identifier[self] . identifier[_check_model_types] ( identifier[models] )
keyword[for] identifier[model] keyword[in] identifier[models] :
keyword[if] identifier[model] keyword[in] identifier[self] . identifier[_selected] :
identifier[self] . identifier[_selected] . identifier[remove] ( identifier[model] ) | def remove(self, models):
""" Removed the passed model(s) from the selection"""
models = self._check_model_types(models)
for model in models:
if model in self._selected:
self._selected.remove(model) # depends on [control=['if'], data=['model']] # depends on [control=['for'], data=['model']] |
def _from_dict(cls, _dict):
"""Initialize a MessageRequest object from a json dictionary."""
args = {}
if 'input' in _dict:
args['input'] = MessageInput._from_dict(_dict.get('input'))
if 'intents' in _dict:
args['intents'] = [
RuntimeIntent._from_dict(x) for x in (_dict.get('intents'))
]
if 'entities' in _dict:
args['entities'] = [
RuntimeEntity._from_dict(x) for x in (_dict.get('entities'))
]
if 'alternate_intents' in _dict:
args['alternate_intents'] = _dict.get('alternate_intents')
if 'context' in _dict:
args['context'] = Context._from_dict(_dict.get('context'))
if 'output' in _dict:
args['output'] = OutputData._from_dict(_dict.get('output'))
if 'actions' in _dict:
args['actions'] = [
DialogNodeAction._from_dict(x) for x in (_dict.get('actions'))
]
return cls(**args) | def function[_from_dict, parameter[cls, _dict]]:
constant[Initialize a MessageRequest object from a json dictionary.]
variable[args] assign[=] dictionary[[], []]
if compare[constant[input] in name[_dict]] begin[:]
call[name[args]][constant[input]] assign[=] call[name[MessageInput]._from_dict, parameter[call[name[_dict].get, parameter[constant[input]]]]]
if compare[constant[intents] in name[_dict]] begin[:]
call[name[args]][constant[intents]] assign[=] <ast.ListComp object at 0x7da18f09ea40>
if compare[constant[entities] in name[_dict]] begin[:]
call[name[args]][constant[entities]] assign[=] <ast.ListComp object at 0x7da18f09ce50>
if compare[constant[alternate_intents] in name[_dict]] begin[:]
call[name[args]][constant[alternate_intents]] assign[=] call[name[_dict].get, parameter[constant[alternate_intents]]]
if compare[constant[context] in name[_dict]] begin[:]
call[name[args]][constant[context]] assign[=] call[name[Context]._from_dict, parameter[call[name[_dict].get, parameter[constant[context]]]]]
if compare[constant[output] in name[_dict]] begin[:]
call[name[args]][constant[output]] assign[=] call[name[OutputData]._from_dict, parameter[call[name[_dict].get, parameter[constant[output]]]]]
if compare[constant[actions] in name[_dict]] begin[:]
call[name[args]][constant[actions]] assign[=] <ast.ListComp object at 0x7da18f09cdc0>
return[call[name[cls], parameter[]]] | keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ):
literal[string]
identifier[args] ={}
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[MessageInput] . identifier[_from_dict] ( identifier[_dict] . identifier[get] ( literal[string] ))
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[RuntimeIntent] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[RuntimeEntity] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[Context] . identifier[_from_dict] ( identifier[_dict] . identifier[get] ( literal[string] ))
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[OutputData] . identifier[_from_dict] ( identifier[_dict] . identifier[get] ( literal[string] ))
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[DialogNodeAction] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[return] identifier[cls] (** identifier[args] ) | def _from_dict(cls, _dict):
"""Initialize a MessageRequest object from a json dictionary."""
args = {}
if 'input' in _dict:
args['input'] = MessageInput._from_dict(_dict.get('input')) # depends on [control=['if'], data=['_dict']]
if 'intents' in _dict:
args['intents'] = [RuntimeIntent._from_dict(x) for x in _dict.get('intents')] # depends on [control=['if'], data=['_dict']]
if 'entities' in _dict:
args['entities'] = [RuntimeEntity._from_dict(x) for x in _dict.get('entities')] # depends on [control=['if'], data=['_dict']]
if 'alternate_intents' in _dict:
args['alternate_intents'] = _dict.get('alternate_intents') # depends on [control=['if'], data=['_dict']]
if 'context' in _dict:
args['context'] = Context._from_dict(_dict.get('context')) # depends on [control=['if'], data=['_dict']]
if 'output' in _dict:
args['output'] = OutputData._from_dict(_dict.get('output')) # depends on [control=['if'], data=['_dict']]
if 'actions' in _dict:
args['actions'] = [DialogNodeAction._from_dict(x) for x in _dict.get('actions')] # depends on [control=['if'], data=['_dict']]
return cls(**args) |
def update(args):
"""
cdstarcat update OID [KEY=VALUE]+
Update the metadata of an object.
"""
with _catalog(args) as cat:
cat.update_metadata(
args.args[0], dict([arg.split('=', 1) for arg in args.args[1:]])) | def function[update, parameter[args]]:
constant[
cdstarcat update OID [KEY=VALUE]+
Update the metadata of an object.
]
with call[name[_catalog], parameter[name[args]]] begin[:]
call[name[cat].update_metadata, parameter[call[name[args].args][constant[0]], call[name[dict], parameter[<ast.ListComp object at 0x7da1b13500d0>]]]] | keyword[def] identifier[update] ( identifier[args] ):
literal[string]
keyword[with] identifier[_catalog] ( identifier[args] ) keyword[as] identifier[cat] :
identifier[cat] . identifier[update_metadata] (
identifier[args] . identifier[args] [ literal[int] ], identifier[dict] ([ identifier[arg] . identifier[split] ( literal[string] , literal[int] ) keyword[for] identifier[arg] keyword[in] identifier[args] . identifier[args] [ literal[int] :]])) | def update(args):
"""
cdstarcat update OID [KEY=VALUE]+
Update the metadata of an object.
"""
with _catalog(args) as cat:
cat.update_metadata(args.args[0], dict([arg.split('=', 1) for arg in args.args[1:]])) # depends on [control=['with'], data=['cat']] |
def append_sources_from(self, other):
"""Merge the source alias lists of two CatDicts."""
# Get aliases lists from this `CatDict` and other
self_aliases = self[self._KEYS.SOURCE].split(',')
other_aliases = other[self._KEYS.SOURCE].split(',')
# Store alias to `self`
self[self._KEYS.SOURCE] = uniq_cdl(self_aliases + other_aliases)
return | def function[append_sources_from, parameter[self, other]]:
constant[Merge the source alias lists of two CatDicts.]
variable[self_aliases] assign[=] call[call[name[self]][name[self]._KEYS.SOURCE].split, parameter[constant[,]]]
variable[other_aliases] assign[=] call[call[name[other]][name[self]._KEYS.SOURCE].split, parameter[constant[,]]]
call[name[self]][name[self]._KEYS.SOURCE] assign[=] call[name[uniq_cdl], parameter[binary_operation[name[self_aliases] + name[other_aliases]]]]
return[None] | keyword[def] identifier[append_sources_from] ( identifier[self] , identifier[other] ):
literal[string]
identifier[self_aliases] = identifier[self] [ identifier[self] . identifier[_KEYS] . identifier[SOURCE] ]. identifier[split] ( literal[string] )
identifier[other_aliases] = identifier[other] [ identifier[self] . identifier[_KEYS] . identifier[SOURCE] ]. identifier[split] ( literal[string] )
identifier[self] [ identifier[self] . identifier[_KEYS] . identifier[SOURCE] ]= identifier[uniq_cdl] ( identifier[self_aliases] + identifier[other_aliases] )
keyword[return] | def append_sources_from(self, other):
"""Merge the source alias lists of two CatDicts."""
# Get aliases lists from this `CatDict` and other
self_aliases = self[self._KEYS.SOURCE].split(',')
other_aliases = other[self._KEYS.SOURCE].split(',')
# Store alias to `self`
self[self._KEYS.SOURCE] = uniq_cdl(self_aliases + other_aliases)
return |
def paths2edges(paths):
"""[8079, 8135, 3723, 3676, 1901363, 5488, 3674] """
edges_all = set()
for path in paths:
for edge in path2edges(path):
edges_all.add(edge)
return edges_all | def function[paths2edges, parameter[paths]]:
constant[[8079, 8135, 3723, 3676, 1901363, 5488, 3674] ]
variable[edges_all] assign[=] call[name[set], parameter[]]
for taget[name[path]] in starred[name[paths]] begin[:]
for taget[name[edge]] in starred[call[name[path2edges], parameter[name[path]]]] begin[:]
call[name[edges_all].add, parameter[name[edge]]]
return[name[edges_all]] | keyword[def] identifier[paths2edges] ( identifier[paths] ):
literal[string]
identifier[edges_all] = identifier[set] ()
keyword[for] identifier[path] keyword[in] identifier[paths] :
keyword[for] identifier[edge] keyword[in] identifier[path2edges] ( identifier[path] ):
identifier[edges_all] . identifier[add] ( identifier[edge] )
keyword[return] identifier[edges_all] | def paths2edges(paths):
"""[8079, 8135, 3723, 3676, 1901363, 5488, 3674] """
edges_all = set()
for path in paths:
for edge in path2edges(path):
edges_all.add(edge) # depends on [control=['for'], data=['edge']] # depends on [control=['for'], data=['path']]
return edges_all |
def parse_state_variable(self, node):
"""
Parses <StateVariable>
@param node: Node containing the <StateVariable> element
@type node: xml.etree.Element
@raise ParseError: Raised when the state variable is not
being defined in the context of a component type.
"""
if 'name' in node.lattrib:
name = node.lattrib['name']
else:
self.raise_error('<StateVariable> must specify a name')
if 'dimension' in node.lattrib:
dimension = node.lattrib['dimension']
else:
self.raise_error("State variable '{0}' must specify a dimension", name)
if 'exposure' in node.lattrib:
exposure = node.lattrib['exposure']
else:
exposure = None
self.current_regime.add_state_variable(StateVariable(name, dimension, exposure)) | def function[parse_state_variable, parameter[self, node]]:
constant[
Parses <StateVariable>
@param node: Node containing the <StateVariable> element
@type node: xml.etree.Element
@raise ParseError: Raised when the state variable is not
being defined in the context of a component type.
]
if compare[constant[name] in name[node].lattrib] begin[:]
variable[name] assign[=] call[name[node].lattrib][constant[name]]
if compare[constant[dimension] in name[node].lattrib] begin[:]
variable[dimension] assign[=] call[name[node].lattrib][constant[dimension]]
if compare[constant[exposure] in name[node].lattrib] begin[:]
variable[exposure] assign[=] call[name[node].lattrib][constant[exposure]]
call[name[self].current_regime.add_state_variable, parameter[call[name[StateVariable], parameter[name[name], name[dimension], name[exposure]]]]] | keyword[def] identifier[parse_state_variable] ( identifier[self] , identifier[node] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[node] . identifier[lattrib] :
identifier[name] = identifier[node] . identifier[lattrib] [ literal[string] ]
keyword[else] :
identifier[self] . identifier[raise_error] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[node] . identifier[lattrib] :
identifier[dimension] = identifier[node] . identifier[lattrib] [ literal[string] ]
keyword[else] :
identifier[self] . identifier[raise_error] ( literal[string] , identifier[name] )
keyword[if] literal[string] keyword[in] identifier[node] . identifier[lattrib] :
identifier[exposure] = identifier[node] . identifier[lattrib] [ literal[string] ]
keyword[else] :
identifier[exposure] = keyword[None]
identifier[self] . identifier[current_regime] . identifier[add_state_variable] ( identifier[StateVariable] ( identifier[name] , identifier[dimension] , identifier[exposure] )) | def parse_state_variable(self, node):
"""
Parses <StateVariable>
@param node: Node containing the <StateVariable> element
@type node: xml.etree.Element
@raise ParseError: Raised when the state variable is not
being defined in the context of a component type.
"""
if 'name' in node.lattrib:
name = node.lattrib['name'] # depends on [control=['if'], data=[]]
else:
self.raise_error('<StateVariable> must specify a name')
if 'dimension' in node.lattrib:
dimension = node.lattrib['dimension'] # depends on [control=['if'], data=[]]
else:
self.raise_error("State variable '{0}' must specify a dimension", name)
if 'exposure' in node.lattrib:
exposure = node.lattrib['exposure'] # depends on [control=['if'], data=[]]
else:
exposure = None
self.current_regime.add_state_variable(StateVariable(name, dimension, exposure)) |
def drop_table(model, keyspaces=None, connections=None):
"""
Drops the table indicated by the model, if it exists.
If `keyspaces` is specified, the table will be dropped for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case.
If `connections` is specified, the table will be synched for all specified connections. Note that the `Model.__connection__` is ignored in that case.
If not specified, it will try to get the connection from the Model.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
"""
context = _get_context(keyspaces, connections)
for connection, keyspace in context:
with query.ContextQuery(model, keyspace=keyspace) as m:
_drop_table(m, connection=connection) | def function[drop_table, parameter[model, keyspaces, connections]]:
constant[
Drops the table indicated by the model, if it exists.
If `keyspaces` is specified, the table will be dropped for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case.
If `connections` is specified, the table will be synched for all specified connections. Note that the `Model.__connection__` is ignored in that case.
If not specified, it will try to get the connection from the Model.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
]
variable[context] assign[=] call[name[_get_context], parameter[name[keyspaces], name[connections]]]
for taget[tuple[[<ast.Name object at 0x7da2054a5090>, <ast.Name object at 0x7da2054a43d0>]]] in starred[name[context]] begin[:]
with call[name[query].ContextQuery, parameter[name[model]]] begin[:]
call[name[_drop_table], parameter[name[m]]] | keyword[def] identifier[drop_table] ( identifier[model] , identifier[keyspaces] = keyword[None] , identifier[connections] = keyword[None] ):
literal[string]
identifier[context] = identifier[_get_context] ( identifier[keyspaces] , identifier[connections] )
keyword[for] identifier[connection] , identifier[keyspace] keyword[in] identifier[context] :
keyword[with] identifier[query] . identifier[ContextQuery] ( identifier[model] , identifier[keyspace] = identifier[keyspace] ) keyword[as] identifier[m] :
identifier[_drop_table] ( identifier[m] , identifier[connection] = identifier[connection] ) | def drop_table(model, keyspaces=None, connections=None):
"""
Drops the table indicated by the model, if it exists.
If `keyspaces` is specified, the table will be dropped for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case.
If `connections` is specified, the table will be synched for all specified connections. Note that the `Model.__connection__` is ignored in that case.
If not specified, it will try to get the connection from the Model.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
"""
context = _get_context(keyspaces, connections)
for (connection, keyspace) in context:
with query.ContextQuery(model, keyspace=keyspace) as m:
_drop_table(m, connection=connection) # depends on [control=['with'], data=['m']] # depends on [control=['for'], data=[]] |
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Capture the exception that caused the task to fail, if any.
"""
log.error('[{}] failed due to {}'.format(task_id, getattr(einfo, 'traceback', None)))
super(LoggedTask, self).on_failure(exc, task_id, args, kwargs, einfo) | def function[on_failure, parameter[self, exc, task_id, args, kwargs, einfo]]:
constant[
Capture the exception that caused the task to fail, if any.
]
call[name[log].error, parameter[call[constant[[{}] failed due to {}].format, parameter[name[task_id], call[name[getattr], parameter[name[einfo], constant[traceback], constant[None]]]]]]]
call[call[name[super], parameter[name[LoggedTask], name[self]]].on_failure, parameter[name[exc], name[task_id], name[args], name[kwargs], name[einfo]]] | keyword[def] identifier[on_failure] ( identifier[self] , identifier[exc] , identifier[task_id] , identifier[args] , identifier[kwargs] , identifier[einfo] ):
literal[string]
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[task_id] , identifier[getattr] ( identifier[einfo] , literal[string] , keyword[None] )))
identifier[super] ( identifier[LoggedTask] , identifier[self] ). identifier[on_failure] ( identifier[exc] , identifier[task_id] , identifier[args] , identifier[kwargs] , identifier[einfo] ) | def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Capture the exception that caused the task to fail, if any.
"""
log.error('[{}] failed due to {}'.format(task_id, getattr(einfo, 'traceback', None)))
super(LoggedTask, self).on_failure(exc, task_id, args, kwargs, einfo) |
def encode_modfun(self, props, msg=None):
"""
Encodes a dict with 'mod' and 'fun' keys into a protobuf
modfun pair. Used in bucket properties.
:param props: the module/function pair
:type props: dict
:param msg: the protobuf message to fill
:type msg: riak.pb.riak_pb2.RpbModFun
:rtype riak.pb.riak_pb2.RpbModFun
"""
if msg is None:
msg = riak.pb.riak_pb2.RpbModFun()
msg.module = str_to_bytes(props['mod'])
msg.function = str_to_bytes(props['fun'])
return msg | def function[encode_modfun, parameter[self, props, msg]]:
constant[
Encodes a dict with 'mod' and 'fun' keys into a protobuf
modfun pair. Used in bucket properties.
:param props: the module/function pair
:type props: dict
:param msg: the protobuf message to fill
:type msg: riak.pb.riak_pb2.RpbModFun
:rtype riak.pb.riak_pb2.RpbModFun
]
if compare[name[msg] is constant[None]] begin[:]
variable[msg] assign[=] call[name[riak].pb.riak_pb2.RpbModFun, parameter[]]
name[msg].module assign[=] call[name[str_to_bytes], parameter[call[name[props]][constant[mod]]]]
name[msg].function assign[=] call[name[str_to_bytes], parameter[call[name[props]][constant[fun]]]]
return[name[msg]] | keyword[def] identifier[encode_modfun] ( identifier[self] , identifier[props] , identifier[msg] = keyword[None] ):
literal[string]
keyword[if] identifier[msg] keyword[is] keyword[None] :
identifier[msg] = identifier[riak] . identifier[pb] . identifier[riak_pb2] . identifier[RpbModFun] ()
identifier[msg] . identifier[module] = identifier[str_to_bytes] ( identifier[props] [ literal[string] ])
identifier[msg] . identifier[function] = identifier[str_to_bytes] ( identifier[props] [ literal[string] ])
keyword[return] identifier[msg] | def encode_modfun(self, props, msg=None):
"""
Encodes a dict with 'mod' and 'fun' keys into a protobuf
modfun pair. Used in bucket properties.
:param props: the module/function pair
:type props: dict
:param msg: the protobuf message to fill
:type msg: riak.pb.riak_pb2.RpbModFun
:rtype riak.pb.riak_pb2.RpbModFun
"""
if msg is None:
msg = riak.pb.riak_pb2.RpbModFun() # depends on [control=['if'], data=['msg']]
msg.module = str_to_bytes(props['mod'])
msg.function = str_to_bytes(props['fun'])
return msg |
def imgVRange(h, va, fontSize):
"""
return bottom,top offsets relative to baseline(0)
"""
if va == 'baseline':
iyo = 0
elif va in ('text-top', 'top'):
iyo = fontSize - h
elif va == 'middle':
iyo = fontSize - (1.2 * fontSize + h) * 0.5
elif va in ('text-bottom', 'bottom'):
iyo = fontSize - 1.2 * fontSize
elif va == 'super':
iyo = 0.5 * fontSize
elif va == 'sub':
iyo = -0.5 * fontSize
elif hasattr(va, 'normalizedValue'):
iyo = va.normalizedValue(fontSize)
else:
iyo = va
return iyo, iyo + h | def function[imgVRange, parameter[h, va, fontSize]]:
constant[
return bottom,top offsets relative to baseline(0)
]
if compare[name[va] equal[==] constant[baseline]] begin[:]
variable[iyo] assign[=] constant[0]
return[tuple[[<ast.Name object at 0x7da1b11f7970>, <ast.BinOp object at 0x7da1b11f7f10>]]] | keyword[def] identifier[imgVRange] ( identifier[h] , identifier[va] , identifier[fontSize] ):
literal[string]
keyword[if] identifier[va] == literal[string] :
identifier[iyo] = literal[int]
keyword[elif] identifier[va] keyword[in] ( literal[string] , literal[string] ):
identifier[iyo] = identifier[fontSize] - identifier[h]
keyword[elif] identifier[va] == literal[string] :
identifier[iyo] = identifier[fontSize] -( literal[int] * identifier[fontSize] + identifier[h] )* literal[int]
keyword[elif] identifier[va] keyword[in] ( literal[string] , literal[string] ):
identifier[iyo] = identifier[fontSize] - literal[int] * identifier[fontSize]
keyword[elif] identifier[va] == literal[string] :
identifier[iyo] = literal[int] * identifier[fontSize]
keyword[elif] identifier[va] == literal[string] :
identifier[iyo] =- literal[int] * identifier[fontSize]
keyword[elif] identifier[hasattr] ( identifier[va] , literal[string] ):
identifier[iyo] = identifier[va] . identifier[normalizedValue] ( identifier[fontSize] )
keyword[else] :
identifier[iyo] = identifier[va]
keyword[return] identifier[iyo] , identifier[iyo] + identifier[h] | def imgVRange(h, va, fontSize):
"""
return bottom,top offsets relative to baseline(0)
"""
if va == 'baseline':
iyo = 0 # depends on [control=['if'], data=[]]
elif va in ('text-top', 'top'):
iyo = fontSize - h # depends on [control=['if'], data=[]]
elif va == 'middle':
iyo = fontSize - (1.2 * fontSize + h) * 0.5 # depends on [control=['if'], data=[]]
elif va in ('text-bottom', 'bottom'):
iyo = fontSize - 1.2 * fontSize # depends on [control=['if'], data=[]]
elif va == 'super':
iyo = 0.5 * fontSize # depends on [control=['if'], data=[]]
elif va == 'sub':
iyo = -0.5 * fontSize # depends on [control=['if'], data=[]]
elif hasattr(va, 'normalizedValue'):
iyo = va.normalizedValue(fontSize) # depends on [control=['if'], data=[]]
else:
iyo = va
return (iyo, iyo + h) |
def acquire(self, blocking=True, timeout=None):
"""Acquire the lock.
If *blocking* is true (the default), then this will block until the
lock can be acquired. The *timeout* parameter specifies an optional
timeout in seconds.
The return value is a boolean indicating whether the lock was acquired.
"""
hub = get_hub()
try:
# switcher.__call__ needs to be synchronized with a lock IF it can
# be called from different threads. This is the case here because
# this method may be called from multiple threads and the callbacks
# are run in the calling thread. So pass it our _lock.
with switch_back(timeout, lock=self._lock) as switcher:
with self._lock:
if not self._locked:
self._locked = 1
self._owner = fibers.current()
return True
elif self._reentrant and self._owner is fibers.current():
self._locked += 1
return True
elif not blocking:
return False
handle = add_callback(self, switcher)
# It is safe to call hub.switch() outside the lock. Another
# thread could have called acquire()+release(), thereby firing
# the switchback. However the switchback only schedules the
# switchback in our hub, it won't execute it yet. So the
# switchback won't actually happen until we switch to the hub.
hub.switch()
# Here the lock should be ours because _release() wakes up only
# the fiber that it passed the lock.
assert self._locked > 0
assert self._owner is fibers.current()
except BaseException as e:
# Likely a Timeout but could also be e.g. Cancelled
with self._lock:
# Clean up the callback. It might have been popped by
# _release() but that is OK.
remove_callback(self, handle)
# This fiber was passed the lock but before that an exception
# was already scheduled with run_callback() (likely through
# Fiber.throw())
if self._owner is fibers.current():
self._release()
if e is switcher.timeout:
return False
raise
return True | def function[acquire, parameter[self, blocking, timeout]]:
constant[Acquire the lock.
If *blocking* is true (the default), then this will block until the
lock can be acquired. The *timeout* parameter specifies an optional
timeout in seconds.
The return value is a boolean indicating whether the lock was acquired.
]
variable[hub] assign[=] call[name[get_hub], parameter[]]
<ast.Try object at 0x7da1b031f550>
return[constant[True]] | keyword[def] identifier[acquire] ( identifier[self] , identifier[blocking] = keyword[True] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[hub] = identifier[get_hub] ()
keyword[try] :
keyword[with] identifier[switch_back] ( identifier[timeout] , identifier[lock] = identifier[self] . identifier[_lock] ) keyword[as] identifier[switcher] :
keyword[with] identifier[self] . identifier[_lock] :
keyword[if] keyword[not] identifier[self] . identifier[_locked] :
identifier[self] . identifier[_locked] = literal[int]
identifier[self] . identifier[_owner] = identifier[fibers] . identifier[current] ()
keyword[return] keyword[True]
keyword[elif] identifier[self] . identifier[_reentrant] keyword[and] identifier[self] . identifier[_owner] keyword[is] identifier[fibers] . identifier[current] ():
identifier[self] . identifier[_locked] += literal[int]
keyword[return] keyword[True]
keyword[elif] keyword[not] identifier[blocking] :
keyword[return] keyword[False]
identifier[handle] = identifier[add_callback] ( identifier[self] , identifier[switcher] )
identifier[hub] . identifier[switch] ()
keyword[assert] identifier[self] . identifier[_locked] > literal[int]
keyword[assert] identifier[self] . identifier[_owner] keyword[is] identifier[fibers] . identifier[current] ()
keyword[except] identifier[BaseException] keyword[as] identifier[e] :
keyword[with] identifier[self] . identifier[_lock] :
identifier[remove_callback] ( identifier[self] , identifier[handle] )
keyword[if] identifier[self] . identifier[_owner] keyword[is] identifier[fibers] . identifier[current] ():
identifier[self] . identifier[_release] ()
keyword[if] identifier[e] keyword[is] identifier[switcher] . identifier[timeout] :
keyword[return] keyword[False]
keyword[raise]
keyword[return] keyword[True] | def acquire(self, blocking=True, timeout=None):
"""Acquire the lock.
If *blocking* is true (the default), then this will block until the
lock can be acquired. The *timeout* parameter specifies an optional
timeout in seconds.
The return value is a boolean indicating whether the lock was acquired.
"""
hub = get_hub()
try:
# switcher.__call__ needs to be synchronized with a lock IF it can
# be called from different threads. This is the case here because
# this method may be called from multiple threads and the callbacks
# are run in the calling thread. So pass it our _lock.
with switch_back(timeout, lock=self._lock) as switcher:
with self._lock:
if not self._locked:
self._locked = 1
self._owner = fibers.current()
return True # depends on [control=['if'], data=[]]
elif self._reentrant and self._owner is fibers.current():
self._locked += 1
return True # depends on [control=['if'], data=[]]
elif not blocking:
return False # depends on [control=['if'], data=[]]
handle = add_callback(self, switcher) # depends on [control=['with'], data=[]]
# It is safe to call hub.switch() outside the lock. Another
# thread could have called acquire()+release(), thereby firing
# the switchback. However the switchback only schedules the
# switchback in our hub, it won't execute it yet. So the
# switchback won't actually happen until we switch to the hub.
hub.switch()
# Here the lock should be ours because _release() wakes up only
# the fiber that it passed the lock.
assert self._locked > 0
assert self._owner is fibers.current() # depends on [control=['with'], data=['switcher']] # depends on [control=['try'], data=[]]
except BaseException as e:
# Likely a Timeout but could also be e.g. Cancelled
with self._lock:
# Clean up the callback. It might have been popped by
# _release() but that is OK.
remove_callback(self, handle)
# This fiber was passed the lock but before that an exception
# was already scheduled with run_callback() (likely through
# Fiber.throw())
if self._owner is fibers.current():
self._release() # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
if e is switcher.timeout:
return False # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['e']]
return True |
def parse_qs(string):
"""Intelligently parse the query string"""
result = {}
for item in split_qs(string):
# Split the query string by unquotes ampersants ('&')
try:
# Split the item by unquotes equal signs
key, value = split_qs(item, delimiter='=')
except ValueError:
# Single value without equals sign
result[item] = ''
else:
result[key] = value
return result | def function[parse_qs, parameter[string]]:
constant[Intelligently parse the query string]
variable[result] assign[=] dictionary[[], []]
for taget[name[item]] in starred[call[name[split_qs], parameter[name[string]]]] begin[:]
<ast.Try object at 0x7da1b0baac50>
return[name[result]] | keyword[def] identifier[parse_qs] ( identifier[string] ):
literal[string]
identifier[result] ={}
keyword[for] identifier[item] keyword[in] identifier[split_qs] ( identifier[string] ):
keyword[try] :
identifier[key] , identifier[value] = identifier[split_qs] ( identifier[item] , identifier[delimiter] = literal[string] )
keyword[except] identifier[ValueError] :
identifier[result] [ identifier[item] ]= literal[string]
keyword[else] :
identifier[result] [ identifier[key] ]= identifier[value]
keyword[return] identifier[result] | def parse_qs(string):
"""Intelligently parse the query string"""
result = {}
for item in split_qs(string):
# Split the query string by unquotes ampersants ('&')
try:
# Split the item by unquotes equal signs
(key, value) = split_qs(item, delimiter='=') # depends on [control=['try'], data=[]]
except ValueError:
# Single value without equals sign
result[item] = '' # depends on [control=['except'], data=[]]
else:
result[key] = value # depends on [control=['for'], data=['item']]
return result |
def interconnect_link_topologies(self):
"""
Gets the InterconnectLinkTopologies API client.
Returns:
InterconnectLinkTopologies:
"""
if not self.__interconnect_link_topologies:
self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection)
return self.__interconnect_link_topologies | def function[interconnect_link_topologies, parameter[self]]:
constant[
Gets the InterconnectLinkTopologies API client.
Returns:
InterconnectLinkTopologies:
]
if <ast.UnaryOp object at 0x7da20c76c730> begin[:]
name[self].__interconnect_link_topologies assign[=] call[name[InterconnectLinkTopologies], parameter[name[self].__connection]]
return[name[self].__interconnect_link_topologies] | keyword[def] identifier[interconnect_link_topologies] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__interconnect_link_topologies] :
identifier[self] . identifier[__interconnect_link_topologies] = identifier[InterconnectLinkTopologies] ( identifier[self] . identifier[__connection] )
keyword[return] identifier[self] . identifier[__interconnect_link_topologies] | def interconnect_link_topologies(self):
"""
Gets the InterconnectLinkTopologies API client.
Returns:
InterconnectLinkTopologies:
"""
if not self.__interconnect_link_topologies:
self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection) # depends on [control=['if'], data=[]]
return self.__interconnect_link_topologies |
def set_line_width(self, width):
"Set line width"
self.line_width=width
if(self.page>0):
self._out(sprintf('%.2f w',width*self.k)) | def function[set_line_width, parameter[self, width]]:
constant[Set line width]
name[self].line_width assign[=] name[width]
if compare[name[self].page greater[>] constant[0]] begin[:]
call[name[self]._out, parameter[call[name[sprintf], parameter[constant[%.2f w], binary_operation[name[width] * name[self].k]]]]] | keyword[def] identifier[set_line_width] ( identifier[self] , identifier[width] ):
literal[string]
identifier[self] . identifier[line_width] = identifier[width]
keyword[if] ( identifier[self] . identifier[page] > literal[int] ):
identifier[self] . identifier[_out] ( identifier[sprintf] ( literal[string] , identifier[width] * identifier[self] . identifier[k] )) | def set_line_width(self, width):
"""Set line width"""
self.line_width = width
if self.page > 0:
self._out(sprintf('%.2f w', width * self.k)) # depends on [control=['if'], data=[]] |
def hook_symbol(self, symbol_name, simproc, kwargs=None, replace=None):
"""
Resolve a dependency in a binary. Looks up the address of the given symbol, and then hooks that
address. If the symbol was not available in the loaded libraries, this address may be provided
by the CLE externs object.
Additionally, if instead of a symbol name you provide an address, some secret functionality will
kick in and you will probably just hook that address, UNLESS you're on powerpc64 ABIv1 or some
yet-unknown scary ABI that has its function pointers point to something other than the actual
functions, in which case it'll do the right thing.
:param symbol_name: The name of the dependency to resolve.
:param simproc: The SimProcedure instance (or function) with which to hook the symbol
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
:param replace: Control the behavior on finding that the address is already hooked. If
true, silently replace the hook. If false, warn and do not replace the
hook. If none (default), warn and replace the hook.
:returns: The address of the new symbol.
:rtype: int
"""
if type(symbol_name) is not int:
sym = self.loader.find_symbol(symbol_name)
if sym is None:
# it could be a previously unresolved weak symbol..?
new_sym = None
for reloc in self.loader.find_relevant_relocations(symbol_name):
if not reloc.symbol.is_weak:
raise Exception("Symbol is strong but we couldn't find its resolution? Report to @rhelmot.")
if new_sym is None:
new_sym = self.loader.extern_object.make_extern(symbol_name)
reloc.resolve(new_sym)
reloc.relocate([])
if new_sym is None:
l.error("Could not find symbol %s", symbol_name)
return None
sym = new_sym
basic_addr = sym.rebased_addr
else:
basic_addr = symbol_name
symbol_name = None
hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=basic_addr)
self.hook(hook_addr, simproc, kwargs=kwargs, replace=replace)
return hook_addr | def function[hook_symbol, parameter[self, symbol_name, simproc, kwargs, replace]]:
constant[
Resolve a dependency in a binary. Looks up the address of the given symbol, and then hooks that
address. If the symbol was not available in the loaded libraries, this address may be provided
by the CLE externs object.
Additionally, if instead of a symbol name you provide an address, some secret functionality will
kick in and you will probably just hook that address, UNLESS you're on powerpc64 ABIv1 or some
yet-unknown scary ABI that has its function pointers point to something other than the actual
functions, in which case it'll do the right thing.
:param symbol_name: The name of the dependency to resolve.
:param simproc: The SimProcedure instance (or function) with which to hook the symbol
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
:param replace: Control the behavior on finding that the address is already hooked. If
true, silently replace the hook. If false, warn and do not replace the
hook. If none (default), warn and replace the hook.
:returns: The address of the new symbol.
:rtype: int
]
if compare[call[name[type], parameter[name[symbol_name]]] is_not name[int]] begin[:]
variable[sym] assign[=] call[name[self].loader.find_symbol, parameter[name[symbol_name]]]
if compare[name[sym] is constant[None]] begin[:]
variable[new_sym] assign[=] constant[None]
for taget[name[reloc]] in starred[call[name[self].loader.find_relevant_relocations, parameter[name[symbol_name]]]] begin[:]
if <ast.UnaryOp object at 0x7da2044c2980> begin[:]
<ast.Raise object at 0x7da2044c1ba0>
if compare[name[new_sym] is constant[None]] begin[:]
variable[new_sym] assign[=] call[name[self].loader.extern_object.make_extern, parameter[name[symbol_name]]]
call[name[reloc].resolve, parameter[name[new_sym]]]
call[name[reloc].relocate, parameter[list[[]]]]
if compare[name[new_sym] is constant[None]] begin[:]
call[name[l].error, parameter[constant[Could not find symbol %s], name[symbol_name]]]
return[constant[None]]
variable[sym] assign[=] name[new_sym]
variable[basic_addr] assign[=] name[sym].rebased_addr
<ast.Tuple object at 0x7da18ede4c10> assign[=] call[name[self].simos.prepare_function_symbol, parameter[name[symbol_name]]]
call[name[self].hook, parameter[name[hook_addr], name[simproc]]]
return[name[hook_addr]] | keyword[def] identifier[hook_symbol] ( identifier[self] , identifier[symbol_name] , identifier[simproc] , identifier[kwargs] = keyword[None] , identifier[replace] = keyword[None] ):
literal[string]
keyword[if] identifier[type] ( identifier[symbol_name] ) keyword[is] keyword[not] identifier[int] :
identifier[sym] = identifier[self] . identifier[loader] . identifier[find_symbol] ( identifier[symbol_name] )
keyword[if] identifier[sym] keyword[is] keyword[None] :
identifier[new_sym] = keyword[None]
keyword[for] identifier[reloc] keyword[in] identifier[self] . identifier[loader] . identifier[find_relevant_relocations] ( identifier[symbol_name] ):
keyword[if] keyword[not] identifier[reloc] . identifier[symbol] . identifier[is_weak] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[new_sym] keyword[is] keyword[None] :
identifier[new_sym] = identifier[self] . identifier[loader] . identifier[extern_object] . identifier[make_extern] ( identifier[symbol_name] )
identifier[reloc] . identifier[resolve] ( identifier[new_sym] )
identifier[reloc] . identifier[relocate] ([])
keyword[if] identifier[new_sym] keyword[is] keyword[None] :
identifier[l] . identifier[error] ( literal[string] , identifier[symbol_name] )
keyword[return] keyword[None]
identifier[sym] = identifier[new_sym]
identifier[basic_addr] = identifier[sym] . identifier[rebased_addr]
keyword[else] :
identifier[basic_addr] = identifier[symbol_name]
identifier[symbol_name] = keyword[None]
identifier[hook_addr] , identifier[_] = identifier[self] . identifier[simos] . identifier[prepare_function_symbol] ( identifier[symbol_name] , identifier[basic_addr] = identifier[basic_addr] )
identifier[self] . identifier[hook] ( identifier[hook_addr] , identifier[simproc] , identifier[kwargs] = identifier[kwargs] , identifier[replace] = identifier[replace] )
keyword[return] identifier[hook_addr] | def hook_symbol(self, symbol_name, simproc, kwargs=None, replace=None):
"""
Resolve a dependency in a binary. Looks up the address of the given symbol, and then hooks that
address. If the symbol was not available in the loaded libraries, this address may be provided
by the CLE externs object.
Additionally, if instead of a symbol name you provide an address, some secret functionality will
kick in and you will probably just hook that address, UNLESS you're on powerpc64 ABIv1 or some
yet-unknown scary ABI that has its function pointers point to something other than the actual
functions, in which case it'll do the right thing.
:param symbol_name: The name of the dependency to resolve.
:param simproc: The SimProcedure instance (or function) with which to hook the symbol
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
:param replace: Control the behavior on finding that the address is already hooked. If
true, silently replace the hook. If false, warn and do not replace the
hook. If none (default), warn and replace the hook.
:returns: The address of the new symbol.
:rtype: int
"""
if type(symbol_name) is not int:
sym = self.loader.find_symbol(symbol_name)
if sym is None:
# it could be a previously unresolved weak symbol..?
new_sym = None
for reloc in self.loader.find_relevant_relocations(symbol_name):
if not reloc.symbol.is_weak:
raise Exception("Symbol is strong but we couldn't find its resolution? Report to @rhelmot.") # depends on [control=['if'], data=[]]
if new_sym is None:
new_sym = self.loader.extern_object.make_extern(symbol_name) # depends on [control=['if'], data=['new_sym']]
reloc.resolve(new_sym)
reloc.relocate([]) # depends on [control=['for'], data=['reloc']]
if new_sym is None:
l.error('Could not find symbol %s', symbol_name)
return None # depends on [control=['if'], data=[]]
sym = new_sym # depends on [control=['if'], data=['sym']]
basic_addr = sym.rebased_addr # depends on [control=['if'], data=[]]
else:
basic_addr = symbol_name
symbol_name = None
(hook_addr, _) = self.simos.prepare_function_symbol(symbol_name, basic_addr=basic_addr)
self.hook(hook_addr, simproc, kwargs=kwargs, replace=replace)
return hook_addr |
def _compute_mean(self, C, mag, dists, rake):
"""
Compute and return mean value without site conditions,
that is equations 2-5, page 970.
"""
mean = (
self._compute_magnitude_scaling_term(C, mag) +
self._compute_geometric_decay_term(C, mag, dists) +
self._compute_faulting_style_term(C, rake) +
self._compute_anelestic_attenuation_term(C, dists)
)
return mean | def function[_compute_mean, parameter[self, C, mag, dists, rake]]:
constant[
Compute and return mean value without site conditions,
that is equations 2-5, page 970.
]
variable[mean] assign[=] binary_operation[binary_operation[binary_operation[call[name[self]._compute_magnitude_scaling_term, parameter[name[C], name[mag]]] + call[name[self]._compute_geometric_decay_term, parameter[name[C], name[mag], name[dists]]]] + call[name[self]._compute_faulting_style_term, parameter[name[C], name[rake]]]] + call[name[self]._compute_anelestic_attenuation_term, parameter[name[C], name[dists]]]]
return[name[mean]] | keyword[def] identifier[_compute_mean] ( identifier[self] , identifier[C] , identifier[mag] , identifier[dists] , identifier[rake] ):
literal[string]
identifier[mean] =(
identifier[self] . identifier[_compute_magnitude_scaling_term] ( identifier[C] , identifier[mag] )+
identifier[self] . identifier[_compute_geometric_decay_term] ( identifier[C] , identifier[mag] , identifier[dists] )+
identifier[self] . identifier[_compute_faulting_style_term] ( identifier[C] , identifier[rake] )+
identifier[self] . identifier[_compute_anelestic_attenuation_term] ( identifier[C] , identifier[dists] )
)
keyword[return] identifier[mean] | def _compute_mean(self, C, mag, dists, rake):
"""
Compute and return mean value without site conditions,
that is equations 2-5, page 970.
"""
mean = self._compute_magnitude_scaling_term(C, mag) + self._compute_geometric_decay_term(C, mag, dists) + self._compute_faulting_style_term(C, rake) + self._compute_anelestic_attenuation_term(C, dists)
return mean |
def validate(self, bigchain, current_transactions=[]):
"""For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21
"""
current_validators = self.get_validators(bigchain)
super(ValidatorElection, self).validate(bigchain, current_transactions=current_transactions)
# NOTE: change more than 1/3 of the current power is not allowed
if self.asset['data']['power'] >= (1/3)*sum(current_validators.values()):
raise InvalidPowerChange('`power` change must be less than 1/3 of total power')
return self | def function[validate, parameter[self, bigchain, current_transactions]]:
constant[For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21
]
variable[current_validators] assign[=] call[name[self].get_validators, parameter[name[bigchain]]]
call[call[name[super], parameter[name[ValidatorElection], name[self]]].validate, parameter[name[bigchain]]]
if compare[call[call[name[self].asset][constant[data]]][constant[power]] greater_or_equal[>=] binary_operation[binary_operation[constant[1] / constant[3]] * call[name[sum], parameter[call[name[current_validators].values, parameter[]]]]]] begin[:]
<ast.Raise object at 0x7da1b1b61b40>
return[name[self]] | keyword[def] identifier[validate] ( identifier[self] , identifier[bigchain] , identifier[current_transactions] =[]):
literal[string]
identifier[current_validators] = identifier[self] . identifier[get_validators] ( identifier[bigchain] )
identifier[super] ( identifier[ValidatorElection] , identifier[self] ). identifier[validate] ( identifier[bigchain] , identifier[current_transactions] = identifier[current_transactions] )
keyword[if] identifier[self] . identifier[asset] [ literal[string] ][ literal[string] ]>=( literal[int] / literal[int] )* identifier[sum] ( identifier[current_validators] . identifier[values] ()):
keyword[raise] identifier[InvalidPowerChange] ( literal[string] )
keyword[return] identifier[self] | def validate(self, bigchain, current_transactions=[]):
"""For more details refer BEP-21: https://github.com/bigchaindb/BEPs/tree/master/21
"""
current_validators = self.get_validators(bigchain)
super(ValidatorElection, self).validate(bigchain, current_transactions=current_transactions)
# NOTE: change more than 1/3 of the current power is not allowed
if self.asset['data']['power'] >= 1 / 3 * sum(current_validators.values()):
raise InvalidPowerChange('`power` change must be less than 1/3 of total power') # depends on [control=['if'], data=[]]
return self |
def add_child(self, child, name=None, index=None):
"""Add object `child` to the first map and store it for the second."""
self.m1.add_child(child, name, index)
if index is None:
index = len(self.m2._children)
self.children_for_m2.append((child, name, index)) | def function[add_child, parameter[self, child, name, index]]:
constant[Add object `child` to the first map and store it for the second.]
call[name[self].m1.add_child, parameter[name[child], name[name], name[index]]]
if compare[name[index] is constant[None]] begin[:]
variable[index] assign[=] call[name[len], parameter[name[self].m2._children]]
call[name[self].children_for_m2.append, parameter[tuple[[<ast.Name object at 0x7da20e9b2f80>, <ast.Name object at 0x7da20e9b0520>, <ast.Name object at 0x7da20e9b2f20>]]]] | keyword[def] identifier[add_child] ( identifier[self] , identifier[child] , identifier[name] = keyword[None] , identifier[index] = keyword[None] ):
literal[string]
identifier[self] . identifier[m1] . identifier[add_child] ( identifier[child] , identifier[name] , identifier[index] )
keyword[if] identifier[index] keyword[is] keyword[None] :
identifier[index] = identifier[len] ( identifier[self] . identifier[m2] . identifier[_children] )
identifier[self] . identifier[children_for_m2] . identifier[append] (( identifier[child] , identifier[name] , identifier[index] )) | def add_child(self, child, name=None, index=None):
"""Add object `child` to the first map and store it for the second."""
self.m1.add_child(child, name, index)
if index is None:
index = len(self.m2._children) # depends on [control=['if'], data=['index']]
self.children_for_m2.append((child, name, index)) |
def get_rev(tag=True):
"""Get build revision.
@param tag Use git tag instead of hash?
"""
rev_cmd = "git describe --always --tag" if tag in (True, "True") else \
"git rev-parse HEAD"
return local(rev_cmd, capture=True).strip() | def function[get_rev, parameter[tag]]:
constant[Get build revision.
@param tag Use git tag instead of hash?
]
variable[rev_cmd] assign[=] <ast.IfExp object at 0x7da1b2344190>
return[call[call[name[local], parameter[name[rev_cmd]]].strip, parameter[]]] | keyword[def] identifier[get_rev] ( identifier[tag] = keyword[True] ):
literal[string]
identifier[rev_cmd] = literal[string] keyword[if] identifier[tag] keyword[in] ( keyword[True] , literal[string] ) keyword[else] literal[string]
keyword[return] identifier[local] ( identifier[rev_cmd] , identifier[capture] = keyword[True] ). identifier[strip] () | def get_rev(tag=True):
"""Get build revision.
@param tag Use git tag instead of hash?
"""
rev_cmd = 'git describe --always --tag' if tag in (True, 'True') else 'git rev-parse HEAD'
return local(rev_cmd, capture=True).strip() |
def open(filename):
"""Open (if necessary) filename, and read the magic."""
if isinstance(filename, six.string_types):
try:
fdesc = gzip.open(filename, "rb")
magic = fdesc.read(4)
except IOError:
fdesc = open(filename, "rb")
magic = fdesc.read(4)
else:
fdesc = filename
filename = getattr(fdesc, "name", "No name")
magic = fdesc.read(4)
return filename, fdesc, magic | def function[open, parameter[filename]]:
constant[Open (if necessary) filename, and read the magic.]
if call[name[isinstance], parameter[name[filename], name[six].string_types]] begin[:]
<ast.Try object at 0x7da1b2126800>
return[tuple[[<ast.Name object at 0x7da1b215fc70>, <ast.Name object at 0x7da1b215e0e0>, <ast.Name object at 0x7da1b215d300>]]] | keyword[def] identifier[open] ( identifier[filename] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[filename] , identifier[six] . identifier[string_types] ):
keyword[try] :
identifier[fdesc] = identifier[gzip] . identifier[open] ( identifier[filename] , literal[string] )
identifier[magic] = identifier[fdesc] . identifier[read] ( literal[int] )
keyword[except] identifier[IOError] :
identifier[fdesc] = identifier[open] ( identifier[filename] , literal[string] )
identifier[magic] = identifier[fdesc] . identifier[read] ( literal[int] )
keyword[else] :
identifier[fdesc] = identifier[filename]
identifier[filename] = identifier[getattr] ( identifier[fdesc] , literal[string] , literal[string] )
identifier[magic] = identifier[fdesc] . identifier[read] ( literal[int] )
keyword[return] identifier[filename] , identifier[fdesc] , identifier[magic] | def open(filename):
"""Open (if necessary) filename, and read the magic."""
if isinstance(filename, six.string_types):
try:
fdesc = gzip.open(filename, 'rb')
magic = fdesc.read(4) # depends on [control=['try'], data=[]]
except IOError:
fdesc = open(filename, 'rb')
magic = fdesc.read(4) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
fdesc = filename
filename = getattr(fdesc, 'name', 'No name')
magic = fdesc.read(4)
return (filename, fdesc, magic) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.