code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def proximal_huber(space, gamma):
"""Proximal factory of the Huber norm.
Parameters
----------
space : `TensorSpace`
The domain of the functional
gamma : float
The smoothing parameter of the Huber norm functional.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized.
See Also
--------
odl.solvers.default_functionals.Huber : the Huber norm functional
Notes
-----
The proximal operator is given by given by the proximal operator of
``1/(2*gamma) * L2 norm`` in points that are ``<= gamma``, and by the
proximal operator of the l1 norm in points that are ``> gamma``.
"""
gamma = float(gamma)
class ProximalHuber(Operator):
"""Proximal operator of Huber norm."""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
"""
self.sigma = float(sigma)
super(ProximalHuber, self).__init__(domain=space, range=space,
linear=False)
def _call(self, x, out):
"""Return ``self(x, out=out)``."""
if isinstance(self.domain, ProductSpace):
norm = PointwiseNorm(self.domain, 2)(x)
else:
norm = x.ufuncs.absolute()
mask = norm.ufuncs.less_equal(gamma + self.sigma)
out[mask] = gamma / (gamma + self.sigma) * x[mask]
mask.ufuncs.logical_not(out=mask)
sign_x = x.ufuncs.sign()
out[mask] = x[mask] - self.sigma * sign_x[mask]
return out
return ProximalHuber | def function[proximal_huber, parameter[space, gamma]]:
constant[Proximal factory of the Huber norm.
Parameters
----------
space : `TensorSpace`
The domain of the functional
gamma : float
The smoothing parameter of the Huber norm functional.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized.
See Also
--------
odl.solvers.default_functionals.Huber : the Huber norm functional
Notes
-----
The proximal operator is given by given by the proximal operator of
``1/(2*gamma) * L2 norm`` in points that are ``<= gamma``, and by the
proximal operator of the l1 norm in points that are ``> gamma``.
]
variable[gamma] assign[=] call[name[float], parameter[name[gamma]]]
class class[ProximalHuber, parameter[]] begin[:]
constant[Proximal operator of Huber norm.]
def function[__init__, parameter[self, sigma]]:
constant[Initialize a new instance.
Parameters
----------
sigma : positive float
]
name[self].sigma assign[=] call[name[float], parameter[name[sigma]]]
call[call[name[super], parameter[name[ProximalHuber], name[self]]].__init__, parameter[]]
def function[_call, parameter[self, x, out]]:
constant[Return ``self(x, out=out)``.]
if call[name[isinstance], parameter[name[self].domain, name[ProductSpace]]] begin[:]
variable[norm] assign[=] call[call[name[PointwiseNorm], parameter[name[self].domain, constant[2]]], parameter[name[x]]]
variable[mask] assign[=] call[name[norm].ufuncs.less_equal, parameter[binary_operation[name[gamma] + name[self].sigma]]]
call[name[out]][name[mask]] assign[=] binary_operation[binary_operation[name[gamma] / binary_operation[name[gamma] + name[self].sigma]] * call[name[x]][name[mask]]]
call[name[mask].ufuncs.logical_not, parameter[]]
variable[sign_x] assign[=] call[name[x].ufuncs.sign, parameter[]]
call[name[out]][name[mask]] assign[=] binary_operation[call[name[x]][name[mask]] - binary_operation[name[self].sigma * call[name[sign_x]][name[mask]]]]
return[name[out]]
return[name[ProximalHuber]] | keyword[def] identifier[proximal_huber] ( identifier[space] , identifier[gamma] ):
literal[string]
identifier[gamma] = identifier[float] ( identifier[gamma] )
keyword[class] identifier[ProximalHuber] ( identifier[Operator] ):
literal[string]
keyword[def] identifier[__init__] ( identifier[self] , identifier[sigma] ):
literal[string]
identifier[self] . identifier[sigma] = identifier[float] ( identifier[sigma] )
identifier[super] ( identifier[ProximalHuber] , identifier[self] ). identifier[__init__] ( identifier[domain] = identifier[space] , identifier[range] = identifier[space] ,
identifier[linear] = keyword[False] )
keyword[def] identifier[_call] ( identifier[self] , identifier[x] , identifier[out] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[domain] , identifier[ProductSpace] ):
identifier[norm] = identifier[PointwiseNorm] ( identifier[self] . identifier[domain] , literal[int] )( identifier[x] )
keyword[else] :
identifier[norm] = identifier[x] . identifier[ufuncs] . identifier[absolute] ()
identifier[mask] = identifier[norm] . identifier[ufuncs] . identifier[less_equal] ( identifier[gamma] + identifier[self] . identifier[sigma] )
identifier[out] [ identifier[mask] ]= identifier[gamma] /( identifier[gamma] + identifier[self] . identifier[sigma] )* identifier[x] [ identifier[mask] ]
identifier[mask] . identifier[ufuncs] . identifier[logical_not] ( identifier[out] = identifier[mask] )
identifier[sign_x] = identifier[x] . identifier[ufuncs] . identifier[sign] ()
identifier[out] [ identifier[mask] ]= identifier[x] [ identifier[mask] ]- identifier[self] . identifier[sigma] * identifier[sign_x] [ identifier[mask] ]
keyword[return] identifier[out]
keyword[return] identifier[ProximalHuber] | def proximal_huber(space, gamma):
"""Proximal factory of the Huber norm.
Parameters
----------
space : `TensorSpace`
The domain of the functional
gamma : float
The smoothing parameter of the Huber norm functional.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized.
See Also
--------
odl.solvers.default_functionals.Huber : the Huber norm functional
Notes
-----
The proximal operator is given by given by the proximal operator of
``1/(2*gamma) * L2 norm`` in points that are ``<= gamma``, and by the
proximal operator of the l1 norm in points that are ``> gamma``.
"""
gamma = float(gamma)
class ProximalHuber(Operator):
"""Proximal operator of Huber norm."""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
"""
self.sigma = float(sigma)
super(ProximalHuber, self).__init__(domain=space, range=space, linear=False)
def _call(self, x, out):
"""Return ``self(x, out=out)``."""
if isinstance(self.domain, ProductSpace):
norm = PointwiseNorm(self.domain, 2)(x) # depends on [control=['if'], data=[]]
else:
norm = x.ufuncs.absolute()
mask = norm.ufuncs.less_equal(gamma + self.sigma)
out[mask] = gamma / (gamma + self.sigma) * x[mask]
mask.ufuncs.logical_not(out=mask)
sign_x = x.ufuncs.sign()
out[mask] = x[mask] - self.sigma * sign_x[mask]
return out
return ProximalHuber |
def _make_user_dict(self, username):
"""
Processes a Twitter User object, exporting as a nested dictionary.
Complex values (i.e. objects that aren't int, bool, float, str, or
a collection of such) are converted to strings (i.e. using __str__
or __repr__). To access user data only, use make_user_dict(username)['_json'].
:param username: A Twitter username string.
:return: A nested dictionary of user data.
"""
user = self._api.get_user(username)
return self._make_object_dict(user) | def function[_make_user_dict, parameter[self, username]]:
constant[
Processes a Twitter User object, exporting as a nested dictionary.
Complex values (i.e. objects that aren't int, bool, float, str, or
a collection of such) are converted to strings (i.e. using __str__
or __repr__). To access user data only, use make_user_dict(username)['_json'].
:param username: A Twitter username string.
:return: A nested dictionary of user data.
]
variable[user] assign[=] call[name[self]._api.get_user, parameter[name[username]]]
return[call[name[self]._make_object_dict, parameter[name[user]]]] | keyword[def] identifier[_make_user_dict] ( identifier[self] , identifier[username] ):
literal[string]
identifier[user] = identifier[self] . identifier[_api] . identifier[get_user] ( identifier[username] )
keyword[return] identifier[self] . identifier[_make_object_dict] ( identifier[user] ) | def _make_user_dict(self, username):
"""
Processes a Twitter User object, exporting as a nested dictionary.
Complex values (i.e. objects that aren't int, bool, float, str, or
a collection of such) are converted to strings (i.e. using __str__
or __repr__). To access user data only, use make_user_dict(username)['_json'].
:param username: A Twitter username string.
:return: A nested dictionary of user data.
"""
user = self._api.get_user(username)
return self._make_object_dict(user) |
def is_valid_package_module_name(name):
"""
Test whether it's a valid package or module name.
- a-z, 0-9, and underline
- starts with underline or alpha letter
valid:
- ``a``
- ``a.b.c``
- ``_a``
- ``_a._b._c``
invalid:
- ``A``
- ``0``
- ``.a``
- ``a#b``
"""
if "." in name:
for part in name.split("."):
if not is_valid_package_module_name(part):
return False
elif len(name):
if name[0] not in _first_letter_for_valid_name:
return False
if len(set(name).difference(_char_set_for_valid_name)):
return False
else:
return False
return True | def function[is_valid_package_module_name, parameter[name]]:
constant[
Test whether it's a valid package or module name.
- a-z, 0-9, and underline
- starts with underline or alpha letter
valid:
- ``a``
- ``a.b.c``
- ``_a``
- ``_a._b._c``
invalid:
- ``A``
- ``0``
- ``.a``
- ``a#b``
]
if compare[constant[.] in name[name]] begin[:]
for taget[name[part]] in starred[call[name[name].split, parameter[constant[.]]]] begin[:]
if <ast.UnaryOp object at 0x7da204961b70> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_valid_package_module_name] ( identifier[name] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[name] :
keyword[for] identifier[part] keyword[in] identifier[name] . identifier[split] ( literal[string] ):
keyword[if] keyword[not] identifier[is_valid_package_module_name] ( identifier[part] ):
keyword[return] keyword[False]
keyword[elif] identifier[len] ( identifier[name] ):
keyword[if] identifier[name] [ literal[int] ] keyword[not] keyword[in] identifier[_first_letter_for_valid_name] :
keyword[return] keyword[False]
keyword[if] identifier[len] ( identifier[set] ( identifier[name] ). identifier[difference] ( identifier[_char_set_for_valid_name] )):
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_valid_package_module_name(name):
"""
Test whether it's a valid package or module name.
- a-z, 0-9, and underline
- starts with underline or alpha letter
valid:
- ``a``
- ``a.b.c``
- ``_a``
- ``_a._b._c``
invalid:
- ``A``
- ``0``
- ``.a``
- ``a#b``
"""
if '.' in name:
for part in name.split('.'):
if not is_valid_package_module_name(part):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['part']] # depends on [control=['if'], data=['name']]
elif len(name):
if name[0] not in _first_letter_for_valid_name:
return False # depends on [control=['if'], data=[]]
if len(set(name).difference(_char_set_for_valid_name)):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
return False
return True |
def get_email_logs(self):
'''
Returns a string representation of logs.
Only displays errors and warnings in the email logs
to avoid being verbose
'''
message = ""
for log in self.record:
if log["log_type"] in [ERROR, WARNING]:
message += self.format_message(**log)
return message | def function[get_email_logs, parameter[self]]:
constant[
Returns a string representation of logs.
Only displays errors and warnings in the email logs
to avoid being verbose
]
variable[message] assign[=] constant[]
for taget[name[log]] in starred[name[self].record] begin[:]
if compare[call[name[log]][constant[log_type]] in list[[<ast.Name object at 0x7da1b0568be0>, <ast.Name object at 0x7da1b056a680>]]] begin[:]
<ast.AugAssign object at 0x7da1b056b8e0>
return[name[message]] | keyword[def] identifier[get_email_logs] ( identifier[self] ):
literal[string]
identifier[message] = literal[string]
keyword[for] identifier[log] keyword[in] identifier[self] . identifier[record] :
keyword[if] identifier[log] [ literal[string] ] keyword[in] [ identifier[ERROR] , identifier[WARNING] ]:
identifier[message] += identifier[self] . identifier[format_message] (** identifier[log] )
keyword[return] identifier[message] | def get_email_logs(self):
"""
Returns a string representation of logs.
Only displays errors and warnings in the email logs
to avoid being verbose
"""
message = ''
for log in self.record:
if log['log_type'] in [ERROR, WARNING]:
message += self.format_message(**log) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['log']]
return message |
def input_connections(self, name):
"""Yield ordered list of connections to one child.
Each result is a ((component, output), (component, input)) tuple.
:param string name: the component whose input connections are
wanted.
"""
for input_name in self._compound_children[name].inputs:
dest = name, input_name
for src, dests in self._compound_linkages.items():
if isinstance(dests[0], six.string_types):
dests = zip(dests[0::2], dests[1::2])
if dest in dests:
yield src, dest | def function[input_connections, parameter[self, name]]:
constant[Yield ordered list of connections to one child.
Each result is a ((component, output), (component, input)) tuple.
:param string name: the component whose input connections are
wanted.
]
for taget[name[input_name]] in starred[call[name[self]._compound_children][name[name]].inputs] begin[:]
variable[dest] assign[=] tuple[[<ast.Name object at 0x7da1b25e89a0>, <ast.Name object at 0x7da1b25ebb20>]]
for taget[tuple[[<ast.Name object at 0x7da1b25eaad0>, <ast.Name object at 0x7da1b25e9180>]]] in starred[call[name[self]._compound_linkages.items, parameter[]]] begin[:]
if call[name[isinstance], parameter[call[name[dests]][constant[0]], name[six].string_types]] begin[:]
variable[dests] assign[=] call[name[zip], parameter[call[name[dests]][<ast.Slice object at 0x7da1b25ebfd0>], call[name[dests]][<ast.Slice object at 0x7da1b25eb610>]]]
if compare[name[dest] in name[dests]] begin[:]
<ast.Yield object at 0x7da1b25e90f0> | keyword[def] identifier[input_connections] ( identifier[self] , identifier[name] ):
literal[string]
keyword[for] identifier[input_name] keyword[in] identifier[self] . identifier[_compound_children] [ identifier[name] ]. identifier[inputs] :
identifier[dest] = identifier[name] , identifier[input_name]
keyword[for] identifier[src] , identifier[dests] keyword[in] identifier[self] . identifier[_compound_linkages] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[dests] [ literal[int] ], identifier[six] . identifier[string_types] ):
identifier[dests] = identifier[zip] ( identifier[dests] [ literal[int] :: literal[int] ], identifier[dests] [ literal[int] :: literal[int] ])
keyword[if] identifier[dest] keyword[in] identifier[dests] :
keyword[yield] identifier[src] , identifier[dest] | def input_connections(self, name):
"""Yield ordered list of connections to one child.
Each result is a ((component, output), (component, input)) tuple.
:param string name: the component whose input connections are
wanted.
"""
for input_name in self._compound_children[name].inputs:
dest = (name, input_name)
for (src, dests) in self._compound_linkages.items():
if isinstance(dests[0], six.string_types):
dests = zip(dests[0::2], dests[1::2]) # depends on [control=['if'], data=[]]
if dest in dests:
yield (src, dest) # depends on [control=['if'], data=['dest']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['input_name']] |
def profile(self):
"""Measure of bandedness, also known as 'envelope size'."""
leftmost_idx = np.argmax(self.matrix('dense').astype(bool), axis=0)
return (np.arange(self.num_vertices()) - leftmost_idx).sum() | def function[profile, parameter[self]]:
constant[Measure of bandedness, also known as 'envelope size'.]
variable[leftmost_idx] assign[=] call[name[np].argmax, parameter[call[call[name[self].matrix, parameter[constant[dense]]].astype, parameter[name[bool]]]]]
return[call[binary_operation[call[name[np].arange, parameter[call[name[self].num_vertices, parameter[]]]] - name[leftmost_idx]].sum, parameter[]]] | keyword[def] identifier[profile] ( identifier[self] ):
literal[string]
identifier[leftmost_idx] = identifier[np] . identifier[argmax] ( identifier[self] . identifier[matrix] ( literal[string] ). identifier[astype] ( identifier[bool] ), identifier[axis] = literal[int] )
keyword[return] ( identifier[np] . identifier[arange] ( identifier[self] . identifier[num_vertices] ())- identifier[leftmost_idx] ). identifier[sum] () | def profile(self):
"""Measure of bandedness, also known as 'envelope size'."""
leftmost_idx = np.argmax(self.matrix('dense').astype(bool), axis=0)
return (np.arange(self.num_vertices()) - leftmost_idx).sum() |
def embed_file(self,
input_file: IO,
output_file_path: str,
output_format: str = "all",
batch_size: int = DEFAULT_BATCH_SIZE,
forget_sentences: bool = False,
use_sentence_keys: bool = False) -> None:
"""
Computes ELMo embeddings from an input_file where each line contains a sentence tokenized by whitespace.
The ELMo embeddings are written out in HDF5 format, where each sentence embedding
is saved in a dataset with the line number in the original file as the key.
Parameters
----------
input_file : ``IO``, required
A file with one tokenized sentence per line.
output_file_path : ``str``, required
A path to the output hdf5 file.
output_format : ``str``, optional, (default = "all")
The embeddings to output. Must be one of "all", "top", or "average".
batch_size : ``int``, optional, (default = 64)
The number of sentences to process in ELMo at one time.
forget_sentences : ``bool``, optional, (default = False).
If use_sentence_keys is False, whether or not to include a string
serialized JSON dictionary that associates sentences with their
line number (its HDF5 key). The mapping is placed in the
"sentence_to_index" HDF5 key. This is useful if
you want to use the embeddings without keeping the original file
of sentences around.
use_sentence_keys : ``bool``, optional, (default = False).
Whether or not to use full sentences as keys. By default,
the line numbers of the input file are used as ids, which is more robust.
"""
assert output_format in ["all", "top", "average"]
# Tokenizes the sentences.
sentences = [line.strip() for line in input_file]
blank_lines = [i for (i, line) in enumerate(sentences) if line == ""]
if blank_lines:
raise ConfigurationError(f"Your input file contains empty lines at indexes "
f"{blank_lines}. Please remove them.")
split_sentences = [sentence.split() for sentence in sentences]
# Uses the sentence index as the key.
if use_sentence_keys:
logger.warning("Using sentences as keys can fail if sentences "
"contain forward slashes or colons. Use with caution.")
embedded_sentences = zip(sentences, self.embed_sentences(split_sentences, batch_size))
else:
embedded_sentences = ((str(i), x) for i, x in
enumerate(self.embed_sentences(split_sentences, batch_size)))
sentence_to_index = {}
logger.info("Processing sentences.")
with h5py.File(output_file_path, 'w') as fout:
for key, embeddings in Tqdm.tqdm(embedded_sentences):
if use_sentence_keys and key in fout.keys():
raise ConfigurationError(f"Key already exists in {output_file_path}. "
f"To encode duplicate sentences, do not pass "
f"the --use-sentence-keys flag.")
if not forget_sentences and not use_sentence_keys:
sentence = sentences[int(key)]
sentence_to_index[sentence] = key
if output_format == "all":
output = embeddings
elif output_format == "top":
output = embeddings[-1]
elif output_format == "average":
output = numpy.average(embeddings, axis=0)
fout.create_dataset(
str(key),
output.shape, dtype='float32',
data=output
)
if not forget_sentences and not use_sentence_keys:
sentence_index_dataset = fout.create_dataset(
"sentence_to_index",
(1,),
dtype=h5py.special_dtype(vlen=str))
sentence_index_dataset[0] = json.dumps(sentence_to_index)
input_file.close() | def function[embed_file, parameter[self, input_file, output_file_path, output_format, batch_size, forget_sentences, use_sentence_keys]]:
constant[
Computes ELMo embeddings from an input_file where each line contains a sentence tokenized by whitespace.
The ELMo embeddings are written out in HDF5 format, where each sentence embedding
is saved in a dataset with the line number in the original file as the key.
Parameters
----------
input_file : ``IO``, required
A file with one tokenized sentence per line.
output_file_path : ``str``, required
A path to the output hdf5 file.
output_format : ``str``, optional, (default = "all")
The embeddings to output. Must be one of "all", "top", or "average".
batch_size : ``int``, optional, (default = 64)
The number of sentences to process in ELMo at one time.
forget_sentences : ``bool``, optional, (default = False).
If use_sentence_keys is False, whether or not to include a string
serialized JSON dictionary that associates sentences with their
line number (its HDF5 key). The mapping is placed in the
"sentence_to_index" HDF5 key. This is useful if
you want to use the embeddings without keeping the original file
of sentences around.
use_sentence_keys : ``bool``, optional, (default = False).
Whether or not to use full sentences as keys. By default,
the line numbers of the input file are used as ids, which is more robust.
]
assert[compare[name[output_format] in list[[<ast.Constant object at 0x7da20e955450>, <ast.Constant object at 0x7da20e954400>, <ast.Constant object at 0x7da20e954ac0>]]]]
variable[sentences] assign[=] <ast.ListComp object at 0x7da20e955990>
variable[blank_lines] assign[=] <ast.ListComp object at 0x7da20e9549d0>
if name[blank_lines] begin[:]
<ast.Raise object at 0x7da20e954a90>
variable[split_sentences] assign[=] <ast.ListComp object at 0x7da20e954910>
if name[use_sentence_keys] begin[:]
call[name[logger].warning, parameter[constant[Using sentences as keys can fail if sentences contain forward slashes or colons. Use with caution.]]]
variable[embedded_sentences] assign[=] call[name[zip], parameter[name[sentences], call[name[self].embed_sentences, parameter[name[split_sentences], name[batch_size]]]]]
variable[sentence_to_index] assign[=] dictionary[[], []]
call[name[logger].info, parameter[constant[Processing sentences.]]]
with call[name[h5py].File, parameter[name[output_file_path], constant[w]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2041db370>, <ast.Name object at 0x7da2041db2e0>]]] in starred[call[name[Tqdm].tqdm, parameter[name[embedded_sentences]]]] begin[:]
if <ast.BoolOp object at 0x7da2041dbfa0> begin[:]
<ast.Raise object at 0x7da2041db910>
if <ast.BoolOp object at 0x7da2041d92a0> begin[:]
variable[sentence] assign[=] call[name[sentences]][call[name[int], parameter[name[key]]]]
call[name[sentence_to_index]][name[sentence]] assign[=] name[key]
if compare[name[output_format] equal[==] constant[all]] begin[:]
variable[output] assign[=] name[embeddings]
call[name[fout].create_dataset, parameter[call[name[str], parameter[name[key]]], name[output].shape]]
if <ast.BoolOp object at 0x7da20c794af0> begin[:]
variable[sentence_index_dataset] assign[=] call[name[fout].create_dataset, parameter[constant[sentence_to_index], tuple[[<ast.Constant object at 0x7da20c7952d0>]]]]
call[name[sentence_index_dataset]][constant[0]] assign[=] call[name[json].dumps, parameter[name[sentence_to_index]]]
call[name[input_file].close, parameter[]] | keyword[def] identifier[embed_file] ( identifier[self] ,
identifier[input_file] : identifier[IO] ,
identifier[output_file_path] : identifier[str] ,
identifier[output_format] : identifier[str] = literal[string] ,
identifier[batch_size] : identifier[int] = identifier[DEFAULT_BATCH_SIZE] ,
identifier[forget_sentences] : identifier[bool] = keyword[False] ,
identifier[use_sentence_keys] : identifier[bool] = keyword[False] )-> keyword[None] :
literal[string]
keyword[assert] identifier[output_format] keyword[in] [ literal[string] , literal[string] , literal[string] ]
identifier[sentences] =[ identifier[line] . identifier[strip] () keyword[for] identifier[line] keyword[in] identifier[input_file] ]
identifier[blank_lines] =[ identifier[i] keyword[for] ( identifier[i] , identifier[line] ) keyword[in] identifier[enumerate] ( identifier[sentences] ) keyword[if] identifier[line] == literal[string] ]
keyword[if] identifier[blank_lines] :
keyword[raise] identifier[ConfigurationError] ( literal[string]
literal[string] )
identifier[split_sentences] =[ identifier[sentence] . identifier[split] () keyword[for] identifier[sentence] keyword[in] identifier[sentences] ]
keyword[if] identifier[use_sentence_keys] :
identifier[logger] . identifier[warning] ( literal[string]
literal[string] )
identifier[embedded_sentences] = identifier[zip] ( identifier[sentences] , identifier[self] . identifier[embed_sentences] ( identifier[split_sentences] , identifier[batch_size] ))
keyword[else] :
identifier[embedded_sentences] =(( identifier[str] ( identifier[i] ), identifier[x] ) keyword[for] identifier[i] , identifier[x] keyword[in]
identifier[enumerate] ( identifier[self] . identifier[embed_sentences] ( identifier[split_sentences] , identifier[batch_size] )))
identifier[sentence_to_index] ={}
identifier[logger] . identifier[info] ( literal[string] )
keyword[with] identifier[h5py] . identifier[File] ( identifier[output_file_path] , literal[string] ) keyword[as] identifier[fout] :
keyword[for] identifier[key] , identifier[embeddings] keyword[in] identifier[Tqdm] . identifier[tqdm] ( identifier[embedded_sentences] ):
keyword[if] identifier[use_sentence_keys] keyword[and] identifier[key] keyword[in] identifier[fout] . identifier[keys] ():
keyword[raise] identifier[ConfigurationError] ( literal[string]
literal[string]
literal[string] )
keyword[if] keyword[not] identifier[forget_sentences] keyword[and] keyword[not] identifier[use_sentence_keys] :
identifier[sentence] = identifier[sentences] [ identifier[int] ( identifier[key] )]
identifier[sentence_to_index] [ identifier[sentence] ]= identifier[key]
keyword[if] identifier[output_format] == literal[string] :
identifier[output] = identifier[embeddings]
keyword[elif] identifier[output_format] == literal[string] :
identifier[output] = identifier[embeddings] [- literal[int] ]
keyword[elif] identifier[output_format] == literal[string] :
identifier[output] = identifier[numpy] . identifier[average] ( identifier[embeddings] , identifier[axis] = literal[int] )
identifier[fout] . identifier[create_dataset] (
identifier[str] ( identifier[key] ),
identifier[output] . identifier[shape] , identifier[dtype] = literal[string] ,
identifier[data] = identifier[output]
)
keyword[if] keyword[not] identifier[forget_sentences] keyword[and] keyword[not] identifier[use_sentence_keys] :
identifier[sentence_index_dataset] = identifier[fout] . identifier[create_dataset] (
literal[string] ,
( literal[int] ,),
identifier[dtype] = identifier[h5py] . identifier[special_dtype] ( identifier[vlen] = identifier[str] ))
identifier[sentence_index_dataset] [ literal[int] ]= identifier[json] . identifier[dumps] ( identifier[sentence_to_index] )
identifier[input_file] . identifier[close] () | def embed_file(self, input_file: IO, output_file_path: str, output_format: str='all', batch_size: int=DEFAULT_BATCH_SIZE, forget_sentences: bool=False, use_sentence_keys: bool=False) -> None:
"""
Computes ELMo embeddings from an input_file where each line contains a sentence tokenized by whitespace.
The ELMo embeddings are written out in HDF5 format, where each sentence embedding
is saved in a dataset with the line number in the original file as the key.
Parameters
----------
input_file : ``IO``, required
A file with one tokenized sentence per line.
output_file_path : ``str``, required
A path to the output hdf5 file.
output_format : ``str``, optional, (default = "all")
The embeddings to output. Must be one of "all", "top", or "average".
batch_size : ``int``, optional, (default = 64)
The number of sentences to process in ELMo at one time.
forget_sentences : ``bool``, optional, (default = False).
If use_sentence_keys is False, whether or not to include a string
serialized JSON dictionary that associates sentences with their
line number (its HDF5 key). The mapping is placed in the
"sentence_to_index" HDF5 key. This is useful if
you want to use the embeddings without keeping the original file
of sentences around.
use_sentence_keys : ``bool``, optional, (default = False).
Whether or not to use full sentences as keys. By default,
the line numbers of the input file are used as ids, which is more robust.
"""
assert output_format in ['all', 'top', 'average']
# Tokenizes the sentences.
sentences = [line.strip() for line in input_file]
blank_lines = [i for (i, line) in enumerate(sentences) if line == '']
if blank_lines:
raise ConfigurationError(f'Your input file contains empty lines at indexes {blank_lines}. Please remove them.') # depends on [control=['if'], data=[]]
split_sentences = [sentence.split() for sentence in sentences]
# Uses the sentence index as the key.
if use_sentence_keys:
logger.warning('Using sentences as keys can fail if sentences contain forward slashes or colons. Use with caution.')
embedded_sentences = zip(sentences, self.embed_sentences(split_sentences, batch_size)) # depends on [control=['if'], data=[]]
else:
embedded_sentences = ((str(i), x) for (i, x) in enumerate(self.embed_sentences(split_sentences, batch_size)))
sentence_to_index = {}
logger.info('Processing sentences.')
with h5py.File(output_file_path, 'w') as fout:
for (key, embeddings) in Tqdm.tqdm(embedded_sentences):
if use_sentence_keys and key in fout.keys():
raise ConfigurationError(f'Key already exists in {output_file_path}. To encode duplicate sentences, do not pass the --use-sentence-keys flag.') # depends on [control=['if'], data=[]]
if not forget_sentences and (not use_sentence_keys):
sentence = sentences[int(key)]
sentence_to_index[sentence] = key # depends on [control=['if'], data=[]]
if output_format == 'all':
output = embeddings # depends on [control=['if'], data=[]]
elif output_format == 'top':
output = embeddings[-1] # depends on [control=['if'], data=[]]
elif output_format == 'average':
output = numpy.average(embeddings, axis=0) # depends on [control=['if'], data=[]]
fout.create_dataset(str(key), output.shape, dtype='float32', data=output) # depends on [control=['for'], data=[]]
if not forget_sentences and (not use_sentence_keys):
sentence_index_dataset = fout.create_dataset('sentence_to_index', (1,), dtype=h5py.special_dtype(vlen=str))
sentence_index_dataset[0] = json.dumps(sentence_to_index) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['fout']]
input_file.close() |
def version(self, value):
"""
Save the Site's version from a string or version tuple
@type value: tuple or str
"""
if isinstance(value, tuple):
value = unparse_version(value)
self._version = value | def function[version, parameter[self, value]]:
constant[
Save the Site's version from a string or version tuple
@type value: tuple or str
]
if call[name[isinstance], parameter[name[value], name[tuple]]] begin[:]
variable[value] assign[=] call[name[unparse_version], parameter[name[value]]]
name[self]._version assign[=] name[value] | keyword[def] identifier[version] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[tuple] ):
identifier[value] = identifier[unparse_version] ( identifier[value] )
identifier[self] . identifier[_version] = identifier[value] | def version(self, value):
"""
Save the Site's version from a string or version tuple
@type value: tuple or str
"""
if isinstance(value, tuple):
value = unparse_version(value) # depends on [control=['if'], data=[]]
self._version = value |
def prt_hier_rec(self, item_id, depth=1):
"""Write hierarchy for a GO Term record and all GO IDs down to the leaf level."""
# Shortens hierarchy report by only printing the hierarchy
# for the sub-set of user-specified GO terms which are connected.
if self.include_only and item_id not in self.include_only:
return
obj = self.id2obj[item_id]
# Optionally space the branches for readability
if self.space_branches:
if depth == 1 and obj.children:
self.prt.write("\n")
# Print marks if provided
if self.item_marks:
self.prt.write('{MARK} '.format(
MARK=self.item_marks.get(item_id, self.mark_dflt)))
no_repeat = self.concise_prt and item_id in self.items_printed
# Print content
dashes = self._str_dash(depth, no_repeat, obj)
if self.do_prtfmt:
self._prtfmt(item_id, dashes)
else:
self._prtstr(obj, dashes)
self.items_printed.add(item_id)
self.items_list.append(item_id)
# Do not print hierarchy below this turn if it has already been printed
if no_repeat:
return
depth += 1
if self.max_indent is not None and depth > self.max_indent:
return
children = obj.children if self.sortby is None else sorted(obj.children, key=self.sortby)
for child in children:
self.prt_hier_rec(child.item_id, depth) | def function[prt_hier_rec, parameter[self, item_id, depth]]:
constant[Write hierarchy for a GO Term record and all GO IDs down to the leaf level.]
if <ast.BoolOp object at 0x7da20c6a8df0> begin[:]
return[None]
variable[obj] assign[=] call[name[self].id2obj][name[item_id]]
if name[self].space_branches begin[:]
if <ast.BoolOp object at 0x7da20c6a8310> begin[:]
call[name[self].prt.write, parameter[constant[
]]]
if name[self].item_marks begin[:]
call[name[self].prt.write, parameter[call[constant[{MARK} ].format, parameter[]]]]
variable[no_repeat] assign[=] <ast.BoolOp object at 0x7da20c6a8910>
variable[dashes] assign[=] call[name[self]._str_dash, parameter[name[depth], name[no_repeat], name[obj]]]
if name[self].do_prtfmt begin[:]
call[name[self]._prtfmt, parameter[name[item_id], name[dashes]]]
call[name[self].items_printed.add, parameter[name[item_id]]]
call[name[self].items_list.append, parameter[name[item_id]]]
if name[no_repeat] begin[:]
return[None]
<ast.AugAssign object at 0x7da20c6ab280>
if <ast.BoolOp object at 0x7da20c6a9000> begin[:]
return[None]
variable[children] assign[=] <ast.IfExp object at 0x7da20c6aa230>
for taget[name[child]] in starred[name[children]] begin[:]
call[name[self].prt_hier_rec, parameter[name[child].item_id, name[depth]]] | keyword[def] identifier[prt_hier_rec] ( identifier[self] , identifier[item_id] , identifier[depth] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[include_only] keyword[and] identifier[item_id] keyword[not] keyword[in] identifier[self] . identifier[include_only] :
keyword[return]
identifier[obj] = identifier[self] . identifier[id2obj] [ identifier[item_id] ]
keyword[if] identifier[self] . identifier[space_branches] :
keyword[if] identifier[depth] == literal[int] keyword[and] identifier[obj] . identifier[children] :
identifier[self] . identifier[prt] . identifier[write] ( literal[string] )
keyword[if] identifier[self] . identifier[item_marks] :
identifier[self] . identifier[prt] . identifier[write] ( literal[string] . identifier[format] (
identifier[MARK] = identifier[self] . identifier[item_marks] . identifier[get] ( identifier[item_id] , identifier[self] . identifier[mark_dflt] )))
identifier[no_repeat] = identifier[self] . identifier[concise_prt] keyword[and] identifier[item_id] keyword[in] identifier[self] . identifier[items_printed]
identifier[dashes] = identifier[self] . identifier[_str_dash] ( identifier[depth] , identifier[no_repeat] , identifier[obj] )
keyword[if] identifier[self] . identifier[do_prtfmt] :
identifier[self] . identifier[_prtfmt] ( identifier[item_id] , identifier[dashes] )
keyword[else] :
identifier[self] . identifier[_prtstr] ( identifier[obj] , identifier[dashes] )
identifier[self] . identifier[items_printed] . identifier[add] ( identifier[item_id] )
identifier[self] . identifier[items_list] . identifier[append] ( identifier[item_id] )
keyword[if] identifier[no_repeat] :
keyword[return]
identifier[depth] += literal[int]
keyword[if] identifier[self] . identifier[max_indent] keyword[is] keyword[not] keyword[None] keyword[and] identifier[depth] > identifier[self] . identifier[max_indent] :
keyword[return]
identifier[children] = identifier[obj] . identifier[children] keyword[if] identifier[self] . identifier[sortby] keyword[is] keyword[None] keyword[else] identifier[sorted] ( identifier[obj] . identifier[children] , identifier[key] = identifier[self] . identifier[sortby] )
keyword[for] identifier[child] keyword[in] identifier[children] :
identifier[self] . identifier[prt_hier_rec] ( identifier[child] . identifier[item_id] , identifier[depth] ) | def prt_hier_rec(self, item_id, depth=1):
"""Write hierarchy for a GO Term record and all GO IDs down to the leaf level."""
# Shortens hierarchy report by only printing the hierarchy
# for the sub-set of user-specified GO terms which are connected.
if self.include_only and item_id not in self.include_only:
return # depends on [control=['if'], data=[]]
obj = self.id2obj[item_id]
# Optionally space the branches for readability
if self.space_branches:
if depth == 1 and obj.children:
self.prt.write('\n') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Print marks if provided
if self.item_marks:
self.prt.write('{MARK} '.format(MARK=self.item_marks.get(item_id, self.mark_dflt))) # depends on [control=['if'], data=[]]
no_repeat = self.concise_prt and item_id in self.items_printed
# Print content
dashes = self._str_dash(depth, no_repeat, obj)
if self.do_prtfmt:
self._prtfmt(item_id, dashes) # depends on [control=['if'], data=[]]
else:
self._prtstr(obj, dashes)
self.items_printed.add(item_id)
self.items_list.append(item_id)
# Do not print hierarchy below this turn if it has already been printed
if no_repeat:
return # depends on [control=['if'], data=[]]
depth += 1
if self.max_indent is not None and depth > self.max_indent:
return # depends on [control=['if'], data=[]]
children = obj.children if self.sortby is None else sorted(obj.children, key=self.sortby)
for child in children:
self.prt_hier_rec(child.item_id, depth) # depends on [control=['for'], data=['child']] |
def scan(self, scan_filter=None,
attributes_to_get=None, request_limit=None, max_results=None,
count=False, exclusive_start_key=None, item_class=Item):
"""
Scan through this table, this is a very long
and expensive operation, and should be avoided if
at all possible.
:type scan_filter: A list of tuples
:param scan_filter: A list of tuples where each tuple consists
of an attribute name, a comparison operator, and either
a scalar or tuple consisting of the values to compare
the attribute to. Valid comparison operators are shown below
along with the expected number of values that should be supplied.
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: generator
"""
return self.layer2.scan(self, scan_filter, attributes_to_get,
request_limit, max_results,
exclusive_start_key, item_class=item_class) | def function[scan, parameter[self, scan_filter, attributes_to_get, request_limit, max_results, count, exclusive_start_key, item_class]]:
constant[
Scan through this table, this is a very long
and expensive operation, and should be avoided if
at all possible.
:type scan_filter: A list of tuples
:param scan_filter: A list of tuples where each tuple consists
of an attribute name, a comparison operator, and either
a scalar or tuple consisting of the values to compare
the attribute to. Valid comparison operators are shown below
along with the expected number of values that should be supplied.
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: generator
]
return[call[name[self].layer2.scan, parameter[name[self], name[scan_filter], name[attributes_to_get], name[request_limit], name[max_results], name[exclusive_start_key]]]] | keyword[def] identifier[scan] ( identifier[self] , identifier[scan_filter] = keyword[None] ,
identifier[attributes_to_get] = keyword[None] , identifier[request_limit] = keyword[None] , identifier[max_results] = keyword[None] ,
identifier[count] = keyword[False] , identifier[exclusive_start_key] = keyword[None] , identifier[item_class] = identifier[Item] ):
literal[string]
keyword[return] identifier[self] . identifier[layer2] . identifier[scan] ( identifier[self] , identifier[scan_filter] , identifier[attributes_to_get] ,
identifier[request_limit] , identifier[max_results] ,
identifier[exclusive_start_key] , identifier[item_class] = identifier[item_class] ) | def scan(self, scan_filter=None, attributes_to_get=None, request_limit=None, max_results=None, count=False, exclusive_start_key=None, item_class=Item):
"""
Scan through this table, this is a very long
and expensive operation, and should be avoided if
at all possible.
:type scan_filter: A list of tuples
:param scan_filter: A list of tuples where each tuple consists
of an attribute name, a comparison operator, and either
a scalar or tuple consisting of the values to compare
the attribute to. Valid comparison operators are shown below
along with the expected number of values that should be supplied.
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: generator
"""
return self.layer2.scan(self, scan_filter, attributes_to_get, request_limit, max_results, exclusive_start_key, item_class=item_class) |
def listMigrationBlocks(self, migration_request_id=""):
"""
get eveything of block that is has status = 0 and migration_request_id as specified.
"""
conn = self.dbi.connection()
try:
return self.mgrblklist.execute(conn, migration_request_id=migration_request_id)
finally:
if conn: conn.close() | def function[listMigrationBlocks, parameter[self, migration_request_id]]:
constant[
get eveything of block that is has status = 0 and migration_request_id as specified.
]
variable[conn] assign[=] call[name[self].dbi.connection, parameter[]]
<ast.Try object at 0x7da1b0fda770> | keyword[def] identifier[listMigrationBlocks] ( identifier[self] , identifier[migration_request_id] = literal[string] ):
literal[string]
identifier[conn] = identifier[self] . identifier[dbi] . identifier[connection] ()
keyword[try] :
keyword[return] identifier[self] . identifier[mgrblklist] . identifier[execute] ( identifier[conn] , identifier[migration_request_id] = identifier[migration_request_id] )
keyword[finally] :
keyword[if] identifier[conn] : identifier[conn] . identifier[close] () | def listMigrationBlocks(self, migration_request_id=''):
"""
get eveything of block that is has status = 0 and migration_request_id as specified.
"""
conn = self.dbi.connection()
try:
return self.mgrblklist.execute(conn, migration_request_id=migration_request_id) # depends on [control=['try'], data=[]]
finally:
if conn:
conn.close() # depends on [control=['if'], data=[]] |
def __posix_to_local_path(path, local_path_module=os.path):
"""
Converts a posix path (coming from Galaxy), to a local path (be it posix or Windows).
>>> import ntpath
>>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=ntpath)
'dataset_1_files\\\\moo\\\\cow'
>>> import posixpath
>>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=posixpath)
'dataset_1_files/moo/cow'
"""
partial_path = deque()
while True:
if not path or path == '/':
break
(path, base) = posixpath.split(path)
partial_path.appendleft(base)
return local_path_module.join(*partial_path) | def function[__posix_to_local_path, parameter[path, local_path_module]]:
constant[
Converts a posix path (coming from Galaxy), to a local path (be it posix or Windows).
>>> import ntpath
>>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=ntpath)
'dataset_1_files\\moo\\cow'
>>> import posixpath
>>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=posixpath)
'dataset_1_files/moo/cow'
]
variable[partial_path] assign[=] call[name[deque], parameter[]]
while constant[True] begin[:]
if <ast.BoolOp object at 0x7da1b05bd4b0> begin[:]
break
<ast.Tuple object at 0x7da1b05bd630> assign[=] call[name[posixpath].split, parameter[name[path]]]
call[name[partial_path].appendleft, parameter[name[base]]]
return[call[name[local_path_module].join, parameter[<ast.Starred object at 0x7da1b05bc2b0>]]] | keyword[def] identifier[__posix_to_local_path] ( identifier[path] , identifier[local_path_module] = identifier[os] . identifier[path] ):
literal[string]
identifier[partial_path] = identifier[deque] ()
keyword[while] keyword[True] :
keyword[if] keyword[not] identifier[path] keyword[or] identifier[path] == literal[string] :
keyword[break]
( identifier[path] , identifier[base] )= identifier[posixpath] . identifier[split] ( identifier[path] )
identifier[partial_path] . identifier[appendleft] ( identifier[base] )
keyword[return] identifier[local_path_module] . identifier[join] (* identifier[partial_path] ) | def __posix_to_local_path(path, local_path_module=os.path):
"""
Converts a posix path (coming from Galaxy), to a local path (be it posix or Windows).
>>> import ntpath
>>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=ntpath)
'dataset_1_files\\\\moo\\\\cow'
>>> import posixpath
>>> __posix_to_local_path('dataset_1_files/moo/cow', local_path_module=posixpath)
'dataset_1_files/moo/cow'
"""
partial_path = deque()
while True:
if not path or path == '/':
break # depends on [control=['if'], data=[]]
(path, base) = posixpath.split(path)
partial_path.appendleft(base) # depends on [control=['while'], data=[]]
return local_path_module.join(*partial_path) |
def welch(timeseries, segmentlength, noverlap=None, **kwargs):
"""Calculate a PSD of this `TimeSeries` using Welch's method.
"""
# calculate PSD
freqs, psd_ = scipy.signal.welch(
timeseries.value,
noverlap=noverlap,
fs=timeseries.sample_rate.decompose().value,
nperseg=segmentlength,
**kwargs
)
# generate FrequencySeries and return
unit = scale_timeseries_unit(
timeseries.unit,
kwargs.get('scaling', 'density'),
)
return FrequencySeries(
psd_,
unit=unit,
frequencies=freqs,
name=timeseries.name,
epoch=timeseries.epoch,
channel=timeseries.channel,
) | def function[welch, parameter[timeseries, segmentlength, noverlap]]:
constant[Calculate a PSD of this `TimeSeries` using Welch's method.
]
<ast.Tuple object at 0x7da18ede5030> assign[=] call[name[scipy].signal.welch, parameter[name[timeseries].value]]
variable[unit] assign[=] call[name[scale_timeseries_unit], parameter[name[timeseries].unit, call[name[kwargs].get, parameter[constant[scaling], constant[density]]]]]
return[call[name[FrequencySeries], parameter[name[psd_]]]] | keyword[def] identifier[welch] ( identifier[timeseries] , identifier[segmentlength] , identifier[noverlap] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[freqs] , identifier[psd_] = identifier[scipy] . identifier[signal] . identifier[welch] (
identifier[timeseries] . identifier[value] ,
identifier[noverlap] = identifier[noverlap] ,
identifier[fs] = identifier[timeseries] . identifier[sample_rate] . identifier[decompose] (). identifier[value] ,
identifier[nperseg] = identifier[segmentlength] ,
** identifier[kwargs]
)
identifier[unit] = identifier[scale_timeseries_unit] (
identifier[timeseries] . identifier[unit] ,
identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
)
keyword[return] identifier[FrequencySeries] (
identifier[psd_] ,
identifier[unit] = identifier[unit] ,
identifier[frequencies] = identifier[freqs] ,
identifier[name] = identifier[timeseries] . identifier[name] ,
identifier[epoch] = identifier[timeseries] . identifier[epoch] ,
identifier[channel] = identifier[timeseries] . identifier[channel] ,
) | def welch(timeseries, segmentlength, noverlap=None, **kwargs):
"""Calculate a PSD of this `TimeSeries` using Welch's method.
"""
# calculate PSD
(freqs, psd_) = scipy.signal.welch(timeseries.value, noverlap=noverlap, fs=timeseries.sample_rate.decompose().value, nperseg=segmentlength, **kwargs)
# generate FrequencySeries and return
unit = scale_timeseries_unit(timeseries.unit, kwargs.get('scaling', 'density'))
return FrequencySeries(psd_, unit=unit, frequencies=freqs, name=timeseries.name, epoch=timeseries.epoch, channel=timeseries.channel) |
def _get_configured_repos(root=None):
'''
Get all the info about repositories from the configurations.
'''
repos = os.path.join(root, os.path.relpath(REPOS, os.path.sep)) if root else REPOS
repos_cfg = configparser.ConfigParser()
if os.path.exists(repos):
repos_cfg.read([repos + '/' + fname for fname in os.listdir(repos) if fname.endswith(".repo")])
else:
log.warning('Repositories not found in %s', repos)
return repos_cfg | def function[_get_configured_repos, parameter[root]]:
constant[
Get all the info about repositories from the configurations.
]
variable[repos] assign[=] <ast.IfExp object at 0x7da1b1ca75b0>
variable[repos_cfg] assign[=] call[name[configparser].ConfigParser, parameter[]]
if call[name[os].path.exists, parameter[name[repos]]] begin[:]
call[name[repos_cfg].read, parameter[<ast.ListComp object at 0x7da1b1ca4550>]]
return[name[repos_cfg]] | keyword[def] identifier[_get_configured_repos] ( identifier[root] = keyword[None] ):
literal[string]
identifier[repos] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[os] . identifier[path] . identifier[relpath] ( identifier[REPOS] , identifier[os] . identifier[path] . identifier[sep] )) keyword[if] identifier[root] keyword[else] identifier[REPOS]
identifier[repos_cfg] = identifier[configparser] . identifier[ConfigParser] ()
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[repos] ):
identifier[repos_cfg] . identifier[read] ([ identifier[repos] + literal[string] + identifier[fname] keyword[for] identifier[fname] keyword[in] identifier[os] . identifier[listdir] ( identifier[repos] ) keyword[if] identifier[fname] . identifier[endswith] ( literal[string] )])
keyword[else] :
identifier[log] . identifier[warning] ( literal[string] , identifier[repos] )
keyword[return] identifier[repos_cfg] | def _get_configured_repos(root=None):
"""
Get all the info about repositories from the configurations.
"""
repos = os.path.join(root, os.path.relpath(REPOS, os.path.sep)) if root else REPOS
repos_cfg = configparser.ConfigParser()
if os.path.exists(repos):
repos_cfg.read([repos + '/' + fname for fname in os.listdir(repos) if fname.endswith('.repo')]) # depends on [control=['if'], data=[]]
else:
log.warning('Repositories not found in %s', repos)
return repos_cfg |
def update_path_labels(self):
"""Update labels showing config paths
"""
self.view['core_label'].set_text("Core Config Path: " + str(self.core_config_model.config.config_file_path))
self.view['gui_label'].set_text("GUI Config Path: " + str(self.gui_config_model.config.config_file_path)) | def function[update_path_labels, parameter[self]]:
constant[Update labels showing config paths
]
call[call[name[self].view][constant[core_label]].set_text, parameter[binary_operation[constant[Core Config Path: ] + call[name[str], parameter[name[self].core_config_model.config.config_file_path]]]]]
call[call[name[self].view][constant[gui_label]].set_text, parameter[binary_operation[constant[GUI Config Path: ] + call[name[str], parameter[name[self].gui_config_model.config.config_file_path]]]]] | keyword[def] identifier[update_path_labels] ( identifier[self] ):
literal[string]
identifier[self] . identifier[view] [ literal[string] ]. identifier[set_text] ( literal[string] + identifier[str] ( identifier[self] . identifier[core_config_model] . identifier[config] . identifier[config_file_path] ))
identifier[self] . identifier[view] [ literal[string] ]. identifier[set_text] ( literal[string] + identifier[str] ( identifier[self] . identifier[gui_config_model] . identifier[config] . identifier[config_file_path] )) | def update_path_labels(self):
"""Update labels showing config paths
"""
self.view['core_label'].set_text('Core Config Path: ' + str(self.core_config_model.config.config_file_path))
self.view['gui_label'].set_text('GUI Config Path: ' + str(self.gui_config_model.config.config_file_path)) |
def targets_w_bins(cnv_file, access_file, target_anti_fn, work_dir, data):
"""Calculate target and anti-target files with pre-determined bins.
"""
target_file = os.path.join(work_dir, "%s-target.bed" % dd.get_sample_name(data))
anti_file = os.path.join(work_dir, "%s-antitarget.bed" % dd.get_sample_name(data))
if not utils.file_exists(target_file):
target_bin, _ = target_anti_fn()
with file_transaction(data, target_file) as tx_out_file:
cmd = [_get_cmd(), "target", cnv_file, "--split", "-o", tx_out_file,
"--avg-size", str(target_bin)]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit target")
if not os.path.exists(anti_file):
_, anti_bin = target_anti_fn()
with file_transaction(data, anti_file) as tx_out_file:
# Create access file without targets to avoid overlap
# antitarget in cnvkit is meant to do this but appears to not always happen
# after chromosome 1
tx_access_file = os.path.join(os.path.dirname(tx_out_file), os.path.basename(access_file))
pybedtools.BedTool(access_file).subtract(cnv_file).saveas(tx_access_file)
cmd = [_get_cmd(), "antitarget", "-g", tx_access_file, cnv_file, "-o", tx_out_file,
"--avg-size", str(anti_bin)]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit antitarget")
return target_file, anti_file | def function[targets_w_bins, parameter[cnv_file, access_file, target_anti_fn, work_dir, data]]:
constant[Calculate target and anti-target files with pre-determined bins.
]
variable[target_file] assign[=] call[name[os].path.join, parameter[name[work_dir], binary_operation[constant[%s-target.bed] <ast.Mod object at 0x7da2590d6920> call[name[dd].get_sample_name, parameter[name[data]]]]]]
variable[anti_file] assign[=] call[name[os].path.join, parameter[name[work_dir], binary_operation[constant[%s-antitarget.bed] <ast.Mod object at 0x7da2590d6920> call[name[dd].get_sample_name, parameter[name[data]]]]]]
if <ast.UnaryOp object at 0x7da1b170a740> begin[:]
<ast.Tuple object at 0x7da1b170aad0> assign[=] call[name[target_anti_fn], parameter[]]
with call[name[file_transaction], parameter[name[data], name[target_file]]] begin[:]
variable[cmd] assign[=] list[[<ast.Call object at 0x7da1b1709d50>, <ast.Constant object at 0x7da1b1708dc0>, <ast.Name object at 0x7da1b1709300>, <ast.Constant object at 0x7da1b1708340>, <ast.Constant object at 0x7da1b170aec0>, <ast.Name object at 0x7da1b170ad40>, <ast.Constant object at 0x7da1b170bd60>, <ast.Call object at 0x7da1b170a950>]]
call[name[do].run, parameter[call[name[_prep_cmd], parameter[name[cmd], name[tx_out_file]]], constant[CNVkit target]]]
if <ast.UnaryOp object at 0x7da1b170a500> begin[:]
<ast.Tuple object at 0x7da1b17094b0> assign[=] call[name[target_anti_fn], parameter[]]
with call[name[file_transaction], parameter[name[data], name[anti_file]]] begin[:]
variable[tx_access_file] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[tx_out_file]]], call[name[os].path.basename, parameter[name[access_file]]]]]
call[call[call[name[pybedtools].BedTool, parameter[name[access_file]]].subtract, parameter[name[cnv_file]]].saveas, parameter[name[tx_access_file]]]
variable[cmd] assign[=] list[[<ast.Call object at 0x7da1b1708520>, <ast.Constant object at 0x7da1b1708730>, <ast.Constant object at 0x7da1b1708610>, <ast.Name object at 0x7da1b17086a0>, <ast.Name object at 0x7da1b1708640>, <ast.Constant object at 0x7da1b1708670>, <ast.Name object at 0x7da1b1708970>, <ast.Constant object at 0x7da1b1708040>, <ast.Call object at 0x7da1b17092d0>]]
call[name[do].run, parameter[call[name[_prep_cmd], parameter[name[cmd], name[tx_out_file]]], constant[CNVkit antitarget]]]
return[tuple[[<ast.Name object at 0x7da1b19b88b0>, <ast.Name object at 0x7da1b19bbdc0>]]] | keyword[def] identifier[targets_w_bins] ( identifier[cnv_file] , identifier[access_file] , identifier[target_anti_fn] , identifier[work_dir] , identifier[data] ):
literal[string]
identifier[target_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] % identifier[dd] . identifier[get_sample_name] ( identifier[data] ))
identifier[anti_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] % identifier[dd] . identifier[get_sample_name] ( identifier[data] ))
keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[target_file] ):
identifier[target_bin] , identifier[_] = identifier[target_anti_fn] ()
keyword[with] identifier[file_transaction] ( identifier[data] , identifier[target_file] ) keyword[as] identifier[tx_out_file] :
identifier[cmd] =[ identifier[_get_cmd] (), literal[string] , identifier[cnv_file] , literal[string] , literal[string] , identifier[tx_out_file] ,
literal[string] , identifier[str] ( identifier[target_bin] )]
identifier[do] . identifier[run] ( identifier[_prep_cmd] ( identifier[cmd] , identifier[tx_out_file] ), literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[anti_file] ):
identifier[_] , identifier[anti_bin] = identifier[target_anti_fn] ()
keyword[with] identifier[file_transaction] ( identifier[data] , identifier[anti_file] ) keyword[as] identifier[tx_out_file] :
identifier[tx_access_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[tx_out_file] ), identifier[os] . identifier[path] . identifier[basename] ( identifier[access_file] ))
identifier[pybedtools] . identifier[BedTool] ( identifier[access_file] ). identifier[subtract] ( identifier[cnv_file] ). identifier[saveas] ( identifier[tx_access_file] )
identifier[cmd] =[ identifier[_get_cmd] (), literal[string] , literal[string] , identifier[tx_access_file] , identifier[cnv_file] , literal[string] , identifier[tx_out_file] ,
literal[string] , identifier[str] ( identifier[anti_bin] )]
identifier[do] . identifier[run] ( identifier[_prep_cmd] ( identifier[cmd] , identifier[tx_out_file] ), literal[string] )
keyword[return] identifier[target_file] , identifier[anti_file] | def targets_w_bins(cnv_file, access_file, target_anti_fn, work_dir, data):
"""Calculate target and anti-target files with pre-determined bins.
"""
target_file = os.path.join(work_dir, '%s-target.bed' % dd.get_sample_name(data))
anti_file = os.path.join(work_dir, '%s-antitarget.bed' % dd.get_sample_name(data))
if not utils.file_exists(target_file):
(target_bin, _) = target_anti_fn()
with file_transaction(data, target_file) as tx_out_file:
cmd = [_get_cmd(), 'target', cnv_file, '--split', '-o', tx_out_file, '--avg-size', str(target_bin)]
do.run(_prep_cmd(cmd, tx_out_file), 'CNVkit target') # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
if not os.path.exists(anti_file):
(_, anti_bin) = target_anti_fn()
with file_transaction(data, anti_file) as tx_out_file:
# Create access file without targets to avoid overlap
# antitarget in cnvkit is meant to do this but appears to not always happen
# after chromosome 1
tx_access_file = os.path.join(os.path.dirname(tx_out_file), os.path.basename(access_file))
pybedtools.BedTool(access_file).subtract(cnv_file).saveas(tx_access_file)
cmd = [_get_cmd(), 'antitarget', '-g', tx_access_file, cnv_file, '-o', tx_out_file, '--avg-size', str(anti_bin)]
do.run(_prep_cmd(cmd, tx_out_file), 'CNVkit antitarget') # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
return (target_file, anti_file) |
def cached(function):
"""Method decorator caching a method's returned values."""
cache_variable = '_cached_' + function.__name__
@wraps(function)
def function_wrapper(obj, *args, **kwargs):
# values are cached in a dict stored in the object
try:
cache = getattr(obj, cache_variable)
except AttributeError:
cache = {}
setattr(obj, cache_variable, cache)
args_kwargs = args + tuple(kwargs.values())
try:
return cache[args_kwargs]
except KeyError:
cache_value = function(obj, *args, **kwargs)
cache[args_kwargs] = cache_value
return cache_value
return function_wrapper | def function[cached, parameter[function]]:
constant[Method decorator caching a method's returned values.]
variable[cache_variable] assign[=] binary_operation[constant[_cached_] + name[function].__name__]
def function[function_wrapper, parameter[obj]]:
<ast.Try object at 0x7da2041d88e0>
variable[args_kwargs] assign[=] binary_operation[name[args] + call[name[tuple], parameter[call[name[kwargs].values, parameter[]]]]]
<ast.Try object at 0x7da2043445e0>
return[name[function_wrapper]] | keyword[def] identifier[cached] ( identifier[function] ):
literal[string]
identifier[cache_variable] = literal[string] + identifier[function] . identifier[__name__]
@ identifier[wraps] ( identifier[function] )
keyword[def] identifier[function_wrapper] ( identifier[obj] ,* identifier[args] ,** identifier[kwargs] ):
keyword[try] :
identifier[cache] = identifier[getattr] ( identifier[obj] , identifier[cache_variable] )
keyword[except] identifier[AttributeError] :
identifier[cache] ={}
identifier[setattr] ( identifier[obj] , identifier[cache_variable] , identifier[cache] )
identifier[args_kwargs] = identifier[args] + identifier[tuple] ( identifier[kwargs] . identifier[values] ())
keyword[try] :
keyword[return] identifier[cache] [ identifier[args_kwargs] ]
keyword[except] identifier[KeyError] :
identifier[cache_value] = identifier[function] ( identifier[obj] ,* identifier[args] ,** identifier[kwargs] )
identifier[cache] [ identifier[args_kwargs] ]= identifier[cache_value]
keyword[return] identifier[cache_value]
keyword[return] identifier[function_wrapper] | def cached(function):
"""Method decorator caching a method's returned values."""
cache_variable = '_cached_' + function.__name__
@wraps(function)
def function_wrapper(obj, *args, **kwargs):
# values are cached in a dict stored in the object
try:
cache = getattr(obj, cache_variable) # depends on [control=['try'], data=[]]
except AttributeError:
cache = {}
setattr(obj, cache_variable, cache) # depends on [control=['except'], data=[]]
args_kwargs = args + tuple(kwargs.values())
try:
return cache[args_kwargs] # depends on [control=['try'], data=[]]
except KeyError:
cache_value = function(obj, *args, **kwargs)
cache[args_kwargs] = cache_value
return cache_value # depends on [control=['except'], data=[]]
return function_wrapper |
def irreducible_causes(self):
"""The set of irreducible causes in this |Account|."""
return tuple(link for link in self
if link.direction is Direction.CAUSE) | def function[irreducible_causes, parameter[self]]:
constant[The set of irreducible causes in this |Account|.]
return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da18f58f0a0>]]] | keyword[def] identifier[irreducible_causes] ( identifier[self] ):
literal[string]
keyword[return] identifier[tuple] ( identifier[link] keyword[for] identifier[link] keyword[in] identifier[self]
keyword[if] identifier[link] . identifier[direction] keyword[is] identifier[Direction] . identifier[CAUSE] ) | def irreducible_causes(self):
"""The set of irreducible causes in this |Account|."""
return tuple((link for link in self if link.direction is Direction.CAUSE)) |
def _parse_options(self, options):
"""Copy needed options to self"""
attributes = ('host', 'wapi_version', 'username', 'password',
'ssl_verify', 'http_request_timeout', 'max_retries',
'http_pool_connections', 'http_pool_maxsize',
'silent_ssl_warnings', 'log_api_calls_as_info',
'max_results', 'paging')
for attr in attributes:
if isinstance(options, dict) and attr in options:
setattr(self, attr, options[attr])
elif hasattr(options, attr):
value = getattr(options, attr)
setattr(self, attr, value)
elif attr in self.DEFAULT_OPTIONS:
setattr(self, attr, self.DEFAULT_OPTIONS[attr])
else:
msg = "WAPI config error. Option %s is not defined" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
for attr in ('host', 'username', 'password'):
if not getattr(self, attr):
msg = "WAPI config error. Option %s can not be blank" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
self.wapi_url = "https://%s/wapi/v%s/" % (self.host,
self.wapi_version)
self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version) | def function[_parse_options, parameter[self, options]]:
constant[Copy needed options to self]
variable[attributes] assign[=] tuple[[<ast.Constant object at 0x7da20c7c97b0>, <ast.Constant object at 0x7da20c7c9ea0>, <ast.Constant object at 0x7da20c7c95d0>, <ast.Constant object at 0x7da20c7c82e0>, <ast.Constant object at 0x7da20c7c9660>, <ast.Constant object at 0x7da20c7c84f0>, <ast.Constant object at 0x7da20c7cad10>, <ast.Constant object at 0x7da20c7c9ed0>, <ast.Constant object at 0x7da20c7cb310>, <ast.Constant object at 0x7da20c7c8280>, <ast.Constant object at 0x7da20c7c9f30>, <ast.Constant object at 0x7da20c7c8fd0>, <ast.Constant object at 0x7da20c7cb280>]]
for taget[name[attr]] in starred[name[attributes]] begin[:]
if <ast.BoolOp object at 0x7da20c7c9b40> begin[:]
call[name[setattr], parameter[name[self], name[attr], call[name[options]][name[attr]]]]
for taget[name[attr]] in starred[tuple[[<ast.Constant object at 0x7da204565600>, <ast.Constant object at 0x7da204565b70>, <ast.Constant object at 0x7da204565a50>]]] begin[:]
if <ast.UnaryOp object at 0x7da204565d50> begin[:]
variable[msg] assign[=] binary_operation[constant[WAPI config error. Option %s can not be blank] <ast.Mod object at 0x7da2590d6920> name[attr]]
<ast.Raise object at 0x7da204564550>
name[self].wapi_url assign[=] binary_operation[constant[https://%s/wapi/v%s/] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20e960220>, <ast.Attribute object at 0x7da20e961150>]]]
name[self].cloud_api_enabled assign[=] call[name[self].is_cloud_wapi, parameter[name[self].wapi_version]] | keyword[def] identifier[_parse_options] ( identifier[self] , identifier[options] ):
literal[string]
identifier[attributes] =( literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] )
keyword[for] identifier[attr] keyword[in] identifier[attributes] :
keyword[if] identifier[isinstance] ( identifier[options] , identifier[dict] ) keyword[and] identifier[attr] keyword[in] identifier[options] :
identifier[setattr] ( identifier[self] , identifier[attr] , identifier[options] [ identifier[attr] ])
keyword[elif] identifier[hasattr] ( identifier[options] , identifier[attr] ):
identifier[value] = identifier[getattr] ( identifier[options] , identifier[attr] )
identifier[setattr] ( identifier[self] , identifier[attr] , identifier[value] )
keyword[elif] identifier[attr] keyword[in] identifier[self] . identifier[DEFAULT_OPTIONS] :
identifier[setattr] ( identifier[self] , identifier[attr] , identifier[self] . identifier[DEFAULT_OPTIONS] [ identifier[attr] ])
keyword[else] :
identifier[msg] = literal[string] % identifier[attr]
keyword[raise] identifier[ib_ex] . identifier[InfobloxConfigException] ( identifier[msg] = identifier[msg] )
keyword[for] identifier[attr] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[if] keyword[not] identifier[getattr] ( identifier[self] , identifier[attr] ):
identifier[msg] = literal[string] % identifier[attr]
keyword[raise] identifier[ib_ex] . identifier[InfobloxConfigException] ( identifier[msg] = identifier[msg] )
identifier[self] . identifier[wapi_url] = literal[string] %( identifier[self] . identifier[host] ,
identifier[self] . identifier[wapi_version] )
identifier[self] . identifier[cloud_api_enabled] = identifier[self] . identifier[is_cloud_wapi] ( identifier[self] . identifier[wapi_version] ) | def _parse_options(self, options):
"""Copy needed options to self"""
attributes = ('host', 'wapi_version', 'username', 'password', 'ssl_verify', 'http_request_timeout', 'max_retries', 'http_pool_connections', 'http_pool_maxsize', 'silent_ssl_warnings', 'log_api_calls_as_info', 'max_results', 'paging')
for attr in attributes:
if isinstance(options, dict) and attr in options:
setattr(self, attr, options[attr]) # depends on [control=['if'], data=[]]
elif hasattr(options, attr):
value = getattr(options, attr)
setattr(self, attr, value) # depends on [control=['if'], data=[]]
elif attr in self.DEFAULT_OPTIONS:
setattr(self, attr, self.DEFAULT_OPTIONS[attr]) # depends on [control=['if'], data=['attr']]
else:
msg = 'WAPI config error. Option %s is not defined' % attr
raise ib_ex.InfobloxConfigException(msg=msg) # depends on [control=['for'], data=['attr']]
for attr in ('host', 'username', 'password'):
if not getattr(self, attr):
msg = 'WAPI config error. Option %s can not be blank' % attr
raise ib_ex.InfobloxConfigException(msg=msg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attr']]
self.wapi_url = 'https://%s/wapi/v%s/' % (self.host, self.wapi_version)
self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version) |
def get_pkg_module_names(package_path):
"""Returns module filenames from package.
Args:
package_path: Path to Python package.
Returns:
A set of module filenames.
"""
module_names = set()
for fobj, modname, _ in pkgutil.iter_modules(path=[package_path]):
filename = os.path.join(fobj.path, '%s.py' % modname)
if os.path.exists(filename):
module_names.add(os.path.abspath(filename))
return module_names | def function[get_pkg_module_names, parameter[package_path]]:
constant[Returns module filenames from package.
Args:
package_path: Path to Python package.
Returns:
A set of module filenames.
]
variable[module_names] assign[=] call[name[set], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c6c6710>, <ast.Name object at 0x7da20c6c4a60>, <ast.Name object at 0x7da20c6c5750>]]] in starred[call[name[pkgutil].iter_modules, parameter[]]] begin[:]
variable[filename] assign[=] call[name[os].path.join, parameter[name[fobj].path, binary_operation[constant[%s.py] <ast.Mod object at 0x7da2590d6920> name[modname]]]]
if call[name[os].path.exists, parameter[name[filename]]] begin[:]
call[name[module_names].add, parameter[call[name[os].path.abspath, parameter[name[filename]]]]]
return[name[module_names]] | keyword[def] identifier[get_pkg_module_names] ( identifier[package_path] ):
literal[string]
identifier[module_names] = identifier[set] ()
keyword[for] identifier[fobj] , identifier[modname] , identifier[_] keyword[in] identifier[pkgutil] . identifier[iter_modules] ( identifier[path] =[ identifier[package_path] ]):
identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[fobj] . identifier[path] , literal[string] % identifier[modname] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ):
identifier[module_names] . identifier[add] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[filename] ))
keyword[return] identifier[module_names] | def get_pkg_module_names(package_path):
"""Returns module filenames from package.
Args:
package_path: Path to Python package.
Returns:
A set of module filenames.
"""
module_names = set()
for (fobj, modname, _) in pkgutil.iter_modules(path=[package_path]):
filename = os.path.join(fobj.path, '%s.py' % modname)
if os.path.exists(filename):
module_names.add(os.path.abspath(filename)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return module_names |
def _compute_vectorized(self, *args):
"""Compare attributes (vectorized)
Parameters
----------
*args : pandas.Series
pandas.Series' as arguments.
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects.
"""
if self._f_compare_vectorized:
return self._f_compare_vectorized(
*(args + self.args), **self.kwargs)
else:
raise NotImplementedError() | def function[_compute_vectorized, parameter[self]]:
constant[Compare attributes (vectorized)
Parameters
----------
*args : pandas.Series
pandas.Series' as arguments.
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects.
]
if name[self]._f_compare_vectorized begin[:]
return[call[name[self]._f_compare_vectorized, parameter[<ast.Starred object at 0x7da18f7232e0>]]] | keyword[def] identifier[_compute_vectorized] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[self] . identifier[_f_compare_vectorized] :
keyword[return] identifier[self] . identifier[_f_compare_vectorized] (
*( identifier[args] + identifier[self] . identifier[args] ),** identifier[self] . identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] () | def _compute_vectorized(self, *args):
"""Compare attributes (vectorized)
Parameters
----------
*args : pandas.Series
pandas.Series' as arguments.
Returns
-------
pandas.Series, pandas.DataFrame, numpy.ndarray
The result of comparing record pairs (the features). Can be
a tuple with multiple pandas.Series, pandas.DataFrame,
numpy.ndarray objects.
"""
if self._f_compare_vectorized:
return self._f_compare_vectorized(*args + self.args, **self.kwargs) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError() |
def find_tag(match: str, strict: bool, directory: str):
"""Find tag for git repository."""
with suppress(CalledProcessError):
echo(git.find_tag(match, strict=strict, git_dir=directory)) | def function[find_tag, parameter[match, strict, directory]]:
constant[Find tag for git repository.]
with call[name[suppress], parameter[name[CalledProcessError]]] begin[:]
call[name[echo], parameter[call[name[git].find_tag, parameter[name[match]]]]] | keyword[def] identifier[find_tag] ( identifier[match] : identifier[str] , identifier[strict] : identifier[bool] , identifier[directory] : identifier[str] ):
literal[string]
keyword[with] identifier[suppress] ( identifier[CalledProcessError] ):
identifier[echo] ( identifier[git] . identifier[find_tag] ( identifier[match] , identifier[strict] = identifier[strict] , identifier[git_dir] = identifier[directory] )) | def find_tag(match: str, strict: bool, directory: str):
"""Find tag for git repository."""
with suppress(CalledProcessError):
echo(git.find_tag(match, strict=strict, git_dir=directory)) # depends on [control=['with'], data=[]] |
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt=_DEFAULT_FLOATFMT, numalign="decimal", stralign="left",
missingval=_DEFAULT_MISSINGVAL, showindex="default",
disable_numparse=False):
"""Format a fixed width table for pretty printing.
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(
tabular_data, headers, showindex=showindex)
# empty values in the first column of RST tables should be escaped (issue #82)
# "" should be escaped as "\\ " or ".."
if tablefmt == 'rst':
list_of_lists, headers = _rst_escape_first_column(
list_of_lists, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\t'.join(['\t'.join(map(_text_type, headers))] +
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if tablefmt in multiline_formats and _is_multiline(plain_text):
tablefmt = multiline_formats.get(tablefmt, tablefmt)
is_multiline = True
else:
is_multiline = False
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(izip_longest(*list_of_lists))
numparses = _expand_numparse(disable_numparse, len(cols))
coltypes = [_column_type(col, numparse=np) for col, np in
zip(cols, numparses)]
if isinstance(floatfmt, basestring): # old version
# just duplicate the string to use in each column
float_formats = len(cols) * [floatfmt]
else: # if floatfmt is list, tuple etc we have one per column
float_formats = list(floatfmt)
if len(float_formats) < len(cols):
float_formats.extend(
(len(cols) - len(float_formats)) * [_DEFAULT_FLOATFMT])
if isinstance(missingval, basestring):
missing_vals = len(cols) * [missingval]
else:
missing_vals = list(missingval)
if len(missing_vals) < len(cols):
missing_vals.extend(
(len(cols) - len(missing_vals)) * [_DEFAULT_MISSINGVAL])
cols = [[_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c]
for c, ct, fl_fmt, miss_v in zip(cols, coltypes, float_formats, missing_vals)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [
width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, max(width_fn(cl) for cl in c))
for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), is_multiline, width_fn)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [max(width_fn(cl) for cl in c) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows,
minwidths, aligns, is_multiline) | def function[tabulate, parameter[tabular_data, headers, tablefmt, floatfmt, numalign, stralign, missingval, showindex, disable_numparse]]:
constant[Format a fixed width table for pretty printing.
]
if compare[name[tabular_data] is constant[None]] begin[:]
variable[tabular_data] assign[=] list[[]]
<ast.Tuple object at 0x7da207f01ae0> assign[=] call[name[_normalize_tabular_data], parameter[name[tabular_data], name[headers]]]
if compare[name[tablefmt] equal[==] constant[rst]] begin[:]
<ast.Tuple object at 0x7da207f03bb0> assign[=] call[name[_rst_escape_first_column], parameter[name[list_of_lists], name[headers]]]
variable[plain_text] assign[=] call[constant[ ].join, parameter[binary_operation[list[[<ast.Call object at 0x7da207f02a10>]] + <ast.ListComp object at 0x7da207f003d0>]]]
variable[has_invisible] assign[=] call[name[re].search, parameter[name[_invisible_codes], name[plain_text]]]
variable[enable_widechars] assign[=] <ast.BoolOp object at 0x7da207f02020>
if <ast.BoolOp object at 0x7da207f01cc0> begin[:]
variable[tablefmt] assign[=] call[name[multiline_formats].get, parameter[name[tablefmt], name[tablefmt]]]
variable[is_multiline] assign[=] constant[True]
variable[width_fn] assign[=] call[name[_choose_width_fn], parameter[name[has_invisible], name[enable_widechars], name[is_multiline]]]
variable[cols] assign[=] call[name[list], parameter[call[name[izip_longest], parameter[<ast.Starred object at 0x7da207f00310>]]]]
variable[numparses] assign[=] call[name[_expand_numparse], parameter[name[disable_numparse], call[name[len], parameter[name[cols]]]]]
variable[coltypes] assign[=] <ast.ListComp object at 0x7da207f01030>
if call[name[isinstance], parameter[name[floatfmt], name[basestring]]] begin[:]
variable[float_formats] assign[=] binary_operation[call[name[len], parameter[name[cols]]] * list[[<ast.Name object at 0x7da207f027a0>]]]
if call[name[isinstance], parameter[name[missingval], name[basestring]]] begin[:]
variable[missing_vals] assign[=] binary_operation[call[name[len], parameter[name[cols]]] * list[[<ast.Name object at 0x7da207f03520>]]]
variable[cols] assign[=] <ast.ListComp object at 0x7da2054a7be0>
variable[aligns] assign[=] <ast.ListComp object at 0x7da2054a7370>
variable[minwidths] assign[=] <ast.IfExp object at 0x7da2054a4a30>
variable[cols] assign[=] <ast.ListComp object at 0x7da2054a63b0>
if name[headers] begin[:]
variable[t_cols] assign[=] <ast.BoolOp object at 0x7da2054a5210>
variable[t_aligns] assign[=] <ast.BoolOp object at 0x7da2054a55a0>
variable[minwidths] assign[=] <ast.ListComp object at 0x7da2054a6320>
variable[headers] assign[=] <ast.ListComp object at 0x7da2054a76a0>
variable[rows] assign[=] call[name[list], parameter[call[name[zip], parameter[<ast.Starred object at 0x7da2054a5c00>]]]]
if <ast.UnaryOp object at 0x7da2054a7d00> begin[:]
variable[tablefmt] assign[=] call[name[_table_formats].get, parameter[name[tablefmt], call[name[_table_formats]][constant[simple]]]]
return[call[name[_format_table], parameter[name[tablefmt], name[headers], name[rows], name[minwidths], name[aligns], name[is_multiline]]]] | keyword[def] identifier[tabulate] ( identifier[tabular_data] , identifier[headers] =(), identifier[tablefmt] = literal[string] ,
identifier[floatfmt] = identifier[_DEFAULT_FLOATFMT] , identifier[numalign] = literal[string] , identifier[stralign] = literal[string] ,
identifier[missingval] = identifier[_DEFAULT_MISSINGVAL] , identifier[showindex] = literal[string] ,
identifier[disable_numparse] = keyword[False] ):
literal[string]
keyword[if] identifier[tabular_data] keyword[is] keyword[None] :
identifier[tabular_data] =[]
identifier[list_of_lists] , identifier[headers] = identifier[_normalize_tabular_data] (
identifier[tabular_data] , identifier[headers] , identifier[showindex] = identifier[showindex] )
keyword[if] identifier[tablefmt] == literal[string] :
identifier[list_of_lists] , identifier[headers] = identifier[_rst_escape_first_column] (
identifier[list_of_lists] , identifier[headers] )
identifier[plain_text] = literal[string] . identifier[join] ([ literal[string] . identifier[join] ( identifier[map] ( identifier[_text_type] , identifier[headers] ))]+
[ literal[string] . identifier[join] ( identifier[map] ( identifier[_text_type] , identifier[row] )) keyword[for] identifier[row] keyword[in] identifier[list_of_lists] ])
identifier[has_invisible] = identifier[re] . identifier[search] ( identifier[_invisible_codes] , identifier[plain_text] )
identifier[enable_widechars] = identifier[wcwidth] keyword[is] keyword[not] keyword[None] keyword[and] identifier[WIDE_CHARS_MODE]
keyword[if] identifier[tablefmt] keyword[in] identifier[multiline_formats] keyword[and] identifier[_is_multiline] ( identifier[plain_text] ):
identifier[tablefmt] = identifier[multiline_formats] . identifier[get] ( identifier[tablefmt] , identifier[tablefmt] )
identifier[is_multiline] = keyword[True]
keyword[else] :
identifier[is_multiline] = keyword[False]
identifier[width_fn] = identifier[_choose_width_fn] ( identifier[has_invisible] , identifier[enable_widechars] , identifier[is_multiline] )
identifier[cols] = identifier[list] ( identifier[izip_longest] (* identifier[list_of_lists] ))
identifier[numparses] = identifier[_expand_numparse] ( identifier[disable_numparse] , identifier[len] ( identifier[cols] ))
identifier[coltypes] =[ identifier[_column_type] ( identifier[col] , identifier[numparse] = identifier[np] ) keyword[for] identifier[col] , identifier[np] keyword[in]
identifier[zip] ( identifier[cols] , identifier[numparses] )]
keyword[if] identifier[isinstance] ( identifier[floatfmt] , identifier[basestring] ):
identifier[float_formats] = identifier[len] ( identifier[cols] )*[ identifier[floatfmt] ]
keyword[else] :
identifier[float_formats] = identifier[list] ( identifier[floatfmt] )
keyword[if] identifier[len] ( identifier[float_formats] )< identifier[len] ( identifier[cols] ):
identifier[float_formats] . identifier[extend] (
( identifier[len] ( identifier[cols] )- identifier[len] ( identifier[float_formats] ))*[ identifier[_DEFAULT_FLOATFMT] ])
keyword[if] identifier[isinstance] ( identifier[missingval] , identifier[basestring] ):
identifier[missing_vals] = identifier[len] ( identifier[cols] )*[ identifier[missingval] ]
keyword[else] :
identifier[missing_vals] = identifier[list] ( identifier[missingval] )
keyword[if] identifier[len] ( identifier[missing_vals] )< identifier[len] ( identifier[cols] ):
identifier[missing_vals] . identifier[extend] (
( identifier[len] ( identifier[cols] )- identifier[len] ( identifier[missing_vals] ))*[ identifier[_DEFAULT_MISSINGVAL] ])
identifier[cols] =[[ identifier[_format] ( identifier[v] , identifier[ct] , identifier[fl_fmt] , identifier[miss_v] , identifier[has_invisible] ) keyword[for] identifier[v] keyword[in] identifier[c] ]
keyword[for] identifier[c] , identifier[ct] , identifier[fl_fmt] , identifier[miss_v] keyword[in] identifier[zip] ( identifier[cols] , identifier[coltypes] , identifier[float_formats] , identifier[missing_vals] )]
identifier[aligns] =[ identifier[numalign] keyword[if] identifier[ct] keyword[in] [ identifier[int] , identifier[float] ] keyword[else] identifier[stralign] keyword[for] identifier[ct] keyword[in] identifier[coltypes] ]
identifier[minwidths] =[
identifier[width_fn] ( identifier[h] )+ identifier[MIN_PADDING] keyword[for] identifier[h] keyword[in] identifier[headers] ] keyword[if] identifier[headers] keyword[else] [ literal[int] ]* identifier[len] ( identifier[cols] )
identifier[cols] =[ identifier[_align_column] ( identifier[c] , identifier[a] , identifier[minw] , identifier[has_invisible] , identifier[enable_widechars] , identifier[is_multiline] )
keyword[for] identifier[c] , identifier[a] , identifier[minw] keyword[in] identifier[zip] ( identifier[cols] , identifier[aligns] , identifier[minwidths] )]
keyword[if] identifier[headers] :
identifier[t_cols] = identifier[cols] keyword[or] [[ literal[string] ]]* identifier[len] ( identifier[headers] )
identifier[t_aligns] = identifier[aligns] keyword[or] [ identifier[stralign] ]* identifier[len] ( identifier[headers] )
identifier[minwidths] =[ identifier[max] ( identifier[minw] , identifier[max] ( identifier[width_fn] ( identifier[cl] ) keyword[for] identifier[cl] keyword[in] identifier[c] ))
keyword[for] identifier[minw] , identifier[c] keyword[in] identifier[zip] ( identifier[minwidths] , identifier[t_cols] )]
identifier[headers] =[ identifier[_align_header] ( identifier[h] , identifier[a] , identifier[minw] , identifier[width_fn] ( identifier[h] ), identifier[is_multiline] , identifier[width_fn] )
keyword[for] identifier[h] , identifier[a] , identifier[minw] keyword[in] identifier[zip] ( identifier[headers] , identifier[t_aligns] , identifier[minwidths] )]
identifier[rows] = identifier[list] ( identifier[zip] (* identifier[cols] ))
keyword[else] :
identifier[minwidths] =[ identifier[max] ( identifier[width_fn] ( identifier[cl] ) keyword[for] identifier[cl] keyword[in] identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[cols] ]
identifier[rows] = identifier[list] ( identifier[zip] (* identifier[cols] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[tablefmt] , identifier[TableFormat] ):
identifier[tablefmt] = identifier[_table_formats] . identifier[get] ( identifier[tablefmt] , identifier[_table_formats] [ literal[string] ])
keyword[return] identifier[_format_table] ( identifier[tablefmt] , identifier[headers] , identifier[rows] ,
identifier[minwidths] , identifier[aligns] , identifier[is_multiline] ) | def tabulate(tabular_data, headers=(), tablefmt='simple', floatfmt=_DEFAULT_FLOATFMT, numalign='decimal', stralign='left', missingval=_DEFAULT_MISSINGVAL, showindex='default', disable_numparse=False):
"""Format a fixed width table for pretty printing.
"""
if tabular_data is None:
tabular_data = [] # depends on [control=['if'], data=['tabular_data']]
(list_of_lists, headers) = _normalize_tabular_data(tabular_data, headers, showindex=showindex)
# empty values in the first column of RST tables should be escaped (issue #82)
# "" should be escaped as "\\ " or ".."
if tablefmt == 'rst':
(list_of_lists, headers) = _rst_escape_first_column(list_of_lists, headers) # depends on [control=['if'], data=[]]
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\t'.join(['\t'.join(map(_text_type, headers))] + ['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if tablefmt in multiline_formats and _is_multiline(plain_text):
tablefmt = multiline_formats.get(tablefmt, tablefmt)
is_multiline = True # depends on [control=['if'], data=[]]
else:
is_multiline = False
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(izip_longest(*list_of_lists))
numparses = _expand_numparse(disable_numparse, len(cols))
coltypes = [_column_type(col, numparse=np) for (col, np) in zip(cols, numparses)]
if isinstance(floatfmt, basestring): # old version
# just duplicate the string to use in each column
float_formats = len(cols) * [floatfmt] # depends on [control=['if'], data=[]]
else: # if floatfmt is list, tuple etc we have one per column
float_formats = list(floatfmt)
if len(float_formats) < len(cols):
float_formats.extend((len(cols) - len(float_formats)) * [_DEFAULT_FLOATFMT]) # depends on [control=['if'], data=[]]
if isinstance(missingval, basestring):
missing_vals = len(cols) * [missingval] # depends on [control=['if'], data=[]]
else:
missing_vals = list(missingval)
if len(missing_vals) < len(cols):
missing_vals.extend((len(cols) - len(missing_vals)) * [_DEFAULT_MISSINGVAL]) # depends on [control=['if'], data=[]]
cols = [[_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c] for (c, ct, fl_fmt, miss_v) in zip(cols, coltypes, float_formats, missing_vals)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline) for (c, a, minw) in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, max((width_fn(cl) for cl in c))) for (minw, c) in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), is_multiline, width_fn) for (h, a, minw) in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols)) # depends on [control=['if'], data=[]]
else:
minwidths = [max((width_fn(cl) for cl in c)) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats['simple']) # depends on [control=['if'], data=[]]
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline) |
def get_iter_returns(
self,
jid,
minions,
timeout=None,
tgt='*',
tgt_type='glob',
expect_minions=False,
block=True,
**kwargs):
'''
Watch the event system and return job data as it comes in
:returns: all of the information for the JID
'''
if not isinstance(minions, set):
if isinstance(minions, six.string_types):
minions = set([minions])
elif isinstance(minions, (list, tuple)):
minions = set(list(minions))
if timeout is None:
timeout = self.opts['timeout']
gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout']))
start = int(time.time())
# timeouts per minion, id_ -> timeout time
minion_timeouts = {}
found = set()
missing = set()
# Check to see if the jid is real, if not return the empty dict
try:
if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}:
log.warning('jid does not exist')
yield {}
# stop the iteration, since the jid is invalid
raise StopIteration()
except Exception as exc:
log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG)
# Wait for the hosts to check in
last_time = False
# iterator for this job's return
if self.opts['order_masters']:
# If we are a MoM, we need to gather expected minions from downstreams masters.
ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex')
else:
ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid))
# iterator for the info of this job
jinfo_iter = []
# open event jids that need to be un-subscribed from later
open_jids = set()
timeout_at = time.time() + timeout
gather_syndic_wait = time.time() + self.opts['syndic_wait']
# are there still minions running the job out there
# start as True so that we ping at least once
minions_running = True
log.debug(
'get_iter_returns for jid %s sent to %s will timeout at %s',
jid, minions, datetime.fromtimestamp(timeout_at).time()
)
while True:
# Process events until timeout is reached or all minions have returned
for raw in ret_iter:
# if we got None, then there were no events
if raw is None:
break
if 'minions' in raw.get('data', {}):
minions.update(raw['data']['minions'])
if 'missing' in raw.get('data', {}):
missing.update(raw['data']['missing'])
continue
if 'return' not in raw['data']:
continue
if kwargs.get('raw', False):
found.add(raw['data']['id'])
yield raw
else:
found.add(raw['data']['id'])
ret = {raw['data']['id']: {'ret': raw['data']['return']}}
if 'out' in raw['data']:
ret[raw['data']['id']]['out'] = raw['data']['out']
if 'retcode' in raw['data']:
ret[raw['data']['id']]['retcode'] = raw['data']['retcode']
if 'jid' in raw['data']:
ret[raw['data']['id']]['jid'] = raw['data']['jid']
if kwargs.get('_cmd_meta', False):
ret[raw['data']['id']].update(raw['data'])
log.debug('jid %s return from %s', jid, raw['data']['id'])
yield ret
# if we have all of the returns (and we aren't a syndic), no need for anything fancy
if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']:
# All minions have returned, break out of the loop
log.debug('jid %s found all minions %s', jid, found)
break
elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']:
if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait:
# There were some minions to find and we found them
# However, this does not imply that *all* masters have yet responded with expected minion lists.
# Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see
# if additional lower-level masters deliver their lists of expected
# minions.
break
# If we get here we may not have gathered the minion list yet. Keep waiting
# for all lower-level masters to respond with their minion lists
# let start the timeouts for all remaining minions
for id_ in minions - found:
# if we have a new minion in the list, make sure it has a timeout
if id_ not in minion_timeouts:
minion_timeouts[id_] = time.time() + timeout
# if the jinfo has timed out and some minions are still running the job
# re-do the ping
if time.time() > timeout_at and minions_running:
# since this is a new ping, no one has responded yet
jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs)
minions_running = False
# if we weren't assigned any jid that means the master thinks
# we have nothing to send
if 'jid' not in jinfo:
jinfo_iter = []
else:
jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid']))
timeout_at = time.time() + gather_job_timeout
# if you are a syndic, wait a little longer
if self.opts['order_masters']:
timeout_at += self.opts.get('syndic_wait', 1)
# check for minions that are running the job still
for raw in jinfo_iter:
# if there are no more events, lets stop waiting for the jinfo
if raw is None:
break
try:
if raw['data']['retcode'] > 0:
log.error('saltutil returning errors on minion %s', raw['data']['id'])
minions.remove(raw['data']['id'])
break
except KeyError as exc:
# This is a safe pass. We're just using the try/except to
# avoid having to deep-check for keys.
missing_key = exc.__str__().strip('\'"')
if missing_key == 'retcode':
log.debug('retcode missing from client return')
else:
log.debug(
'Passing on saltutil error. Key \'%s\' missing '
'from client return. This may be an error in '
'the client.', missing_key
)
# Keep track of the jid events to unsubscribe from later
open_jids.add(jinfo['jid'])
# TODO: move to a library??
if 'minions' in raw.get('data', {}):
minions.update(raw['data']['minions'])
continue
if 'syndic' in raw.get('data', {}):
minions.update(raw['syndic'])
continue
if 'return' not in raw.get('data', {}):
continue
# if the job isn't running there anymore... don't count
if raw['data']['return'] == {}:
continue
# if the minion throws an exception containing the word "return"
# the master will try to handle the string as a dict in the next
# step. Check if we have a string, log the issue and continue.
if isinstance(raw['data']['return'], six.string_types):
log.error("unexpected return from minion: %s", raw)
continue
if 'return' in raw['data']['return'] and \
raw['data']['return']['return'] == {}:
continue
# if we didn't originally target the minion, lets add it to the list
if raw['data']['id'] not in minions:
minions.add(raw['data']['id'])
# update this minion's timeout, as long as the job is still running
minion_timeouts[raw['data']['id']] = time.time() + timeout
# a minion returned, so we know its running somewhere
minions_running = True
# if we have hit gather_job_timeout (after firing the job) AND
# if we have hit all minion timeouts, lets call it
now = time.time()
# if we have finished waiting, and no minions are running the job
# then we need to see if each minion has timedout
done = (now > timeout_at) and not minions_running
if done:
# if all minions have timeod out
for id_ in minions - found:
if now < minion_timeouts[id_]:
done = False
break
if done:
break
# don't spin
if block:
time.sleep(0.01)
else:
yield
# If there are any remaining open events, clean them up.
if open_jids:
for jid in open_jids:
self.event.unsubscribe(jid)
if expect_minions:
for minion in list((minions - found)):
yield {minion: {'failed': True}}
# Filter out any minions marked as missing for which we received
# returns (prevents false events sent due to higher-level masters not
# knowing about lower-level minions).
missing -= found
# Report on missing minions
if missing:
for minion in missing:
yield {minion: {'failed': True}} | def function[get_iter_returns, parameter[self, jid, minions, timeout, tgt, tgt_type, expect_minions, block]]:
constant[
Watch the event system and return job data as it comes in
:returns: all of the information for the JID
]
if <ast.UnaryOp object at 0x7da207f98e20> begin[:]
if call[name[isinstance], parameter[name[minions], name[six].string_types]] begin[:]
variable[minions] assign[=] call[name[set], parameter[list[[<ast.Name object at 0x7da207f9bca0>]]]]
if compare[name[timeout] is constant[None]] begin[:]
variable[timeout] assign[=] call[name[self].opts][constant[timeout]]
variable[gather_job_timeout] assign[=] call[name[int], parameter[call[name[kwargs].get, parameter[constant[gather_job_timeout], call[name[self].opts][constant[gather_job_timeout]]]]]]
variable[start] assign[=] call[name[int], parameter[call[name[time].time, parameter[]]]]
variable[minion_timeouts] assign[=] dictionary[[], []]
variable[found] assign[=] call[name[set], parameter[]]
variable[missing] assign[=] call[name[set], parameter[]]
<ast.Try object at 0x7da207f9bb20>
variable[last_time] assign[=] constant[False]
if call[name[self].opts][constant[order_masters]] begin[:]
variable[ret_iter] assign[=] call[name[self].get_returns_no_block, parameter[call[constant[(salt/job|syndic/.*)/{0}].format, parameter[name[jid]]], constant[regex]]]
variable[jinfo_iter] assign[=] list[[]]
variable[open_jids] assign[=] call[name[set], parameter[]]
variable[timeout_at] assign[=] binary_operation[call[name[time].time, parameter[]] + name[timeout]]
variable[gather_syndic_wait] assign[=] binary_operation[call[name[time].time, parameter[]] + call[name[self].opts][constant[syndic_wait]]]
variable[minions_running] assign[=] constant[True]
call[name[log].debug, parameter[constant[get_iter_returns for jid %s sent to %s will timeout at %s], name[jid], name[minions], call[call[name[datetime].fromtimestamp, parameter[name[timeout_at]]].time, parameter[]]]]
while constant[True] begin[:]
for taget[name[raw]] in starred[name[ret_iter]] begin[:]
if compare[name[raw] is constant[None]] begin[:]
break
if compare[constant[minions] in call[name[raw].get, parameter[constant[data], dictionary[[], []]]]] begin[:]
call[name[minions].update, parameter[call[call[name[raw]][constant[data]]][constant[minions]]]]
if compare[constant[missing] in call[name[raw].get, parameter[constant[data], dictionary[[], []]]]] begin[:]
call[name[missing].update, parameter[call[call[name[raw]][constant[data]]][constant[missing]]]]
continue
if compare[constant[return] <ast.NotIn object at 0x7da2590d7190> call[name[raw]][constant[data]]] begin[:]
continue
if call[name[kwargs].get, parameter[constant[raw], constant[False]]] begin[:]
call[name[found].add, parameter[call[call[name[raw]][constant[data]]][constant[id]]]]
<ast.Yield object at 0x7da1b26aebf0>
if <ast.BoolOp object at 0x7da1b26ae920> begin[:]
call[name[log].debug, parameter[constant[jid %s found all minions %s], name[jid], name[found]]]
break
for taget[name[id_]] in starred[binary_operation[name[minions] - name[found]]] begin[:]
if compare[name[id_] <ast.NotIn object at 0x7da2590d7190> name[minion_timeouts]] begin[:]
call[name[minion_timeouts]][name[id_]] assign[=] binary_operation[call[name[time].time, parameter[]] + name[timeout]]
if <ast.BoolOp object at 0x7da1b26afc70> begin[:]
variable[jinfo] assign[=] call[name[self].gather_job_info, parameter[name[jid], call[name[list], parameter[binary_operation[name[minions] - name[found]]]], constant[list]]]
variable[minions_running] assign[=] constant[False]
if compare[constant[jid] <ast.NotIn object at 0x7da2590d7190> name[jinfo]] begin[:]
variable[jinfo_iter] assign[=] list[[]]
variable[timeout_at] assign[=] binary_operation[call[name[time].time, parameter[]] + name[gather_job_timeout]]
if call[name[self].opts][constant[order_masters]] begin[:]
<ast.AugAssign object at 0x7da1b1cadde0>
for taget[name[raw]] in starred[name[jinfo_iter]] begin[:]
if compare[name[raw] is constant[None]] begin[:]
break
<ast.Try object at 0x7da1b1cad5a0>
call[name[open_jids].add, parameter[call[name[jinfo]][constant[jid]]]]
if compare[constant[minions] in call[name[raw].get, parameter[constant[data], dictionary[[], []]]]] begin[:]
call[name[minions].update, parameter[call[call[name[raw]][constant[data]]][constant[minions]]]]
continue
if compare[constant[syndic] in call[name[raw].get, parameter[constant[data], dictionary[[], []]]]] begin[:]
call[name[minions].update, parameter[call[name[raw]][constant[syndic]]]]
continue
if compare[constant[return] <ast.NotIn object at 0x7da2590d7190> call[name[raw].get, parameter[constant[data], dictionary[[], []]]]] begin[:]
continue
if compare[call[call[name[raw]][constant[data]]][constant[return]] equal[==] dictionary[[], []]] begin[:]
continue
if call[name[isinstance], parameter[call[call[name[raw]][constant[data]]][constant[return]], name[six].string_types]] begin[:]
call[name[log].error, parameter[constant[unexpected return from minion: %s], name[raw]]]
continue
if <ast.BoolOp object at 0x7da1b1cad0f0> begin[:]
continue
if compare[call[call[name[raw]][constant[data]]][constant[id]] <ast.NotIn object at 0x7da2590d7190> name[minions]] begin[:]
call[name[minions].add, parameter[call[call[name[raw]][constant[data]]][constant[id]]]]
call[name[minion_timeouts]][call[call[name[raw]][constant[data]]][constant[id]]] assign[=] binary_operation[call[name[time].time, parameter[]] + name[timeout]]
variable[minions_running] assign[=] constant[True]
variable[now] assign[=] call[name[time].time, parameter[]]
variable[done] assign[=] <ast.BoolOp object at 0x7da1b21ebc10>
if name[done] begin[:]
for taget[name[id_]] in starred[binary_operation[name[minions] - name[found]]] begin[:]
if compare[name[now] less[<] call[name[minion_timeouts]][name[id_]]] begin[:]
variable[done] assign[=] constant[False]
break
if name[done] begin[:]
break
if name[block] begin[:]
call[name[time].sleep, parameter[constant[0.01]]]
if name[open_jids] begin[:]
for taget[name[jid]] in starred[name[open_jids]] begin[:]
call[name[self].event.unsubscribe, parameter[name[jid]]]
if name[expect_minions] begin[:]
for taget[name[minion]] in starred[call[name[list], parameter[binary_operation[name[minions] - name[found]]]]] begin[:]
<ast.Yield object at 0x7da1b21ea4d0>
<ast.AugAssign object at 0x7da1b21eb1c0>
if name[missing] begin[:]
for taget[name[minion]] in starred[name[missing]] begin[:]
<ast.Yield object at 0x7da1b21eb490> | keyword[def] identifier[get_iter_returns] (
identifier[self] ,
identifier[jid] ,
identifier[minions] ,
identifier[timeout] = keyword[None] ,
identifier[tgt] = literal[string] ,
identifier[tgt_type] = literal[string] ,
identifier[expect_minions] = keyword[False] ,
identifier[block] = keyword[True] ,
** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[minions] , identifier[set] ):
keyword[if] identifier[isinstance] ( identifier[minions] , identifier[six] . identifier[string_types] ):
identifier[minions] = identifier[set] ([ identifier[minions] ])
keyword[elif] identifier[isinstance] ( identifier[minions] ,( identifier[list] , identifier[tuple] )):
identifier[minions] = identifier[set] ( identifier[list] ( identifier[minions] ))
keyword[if] identifier[timeout] keyword[is] keyword[None] :
identifier[timeout] = identifier[self] . identifier[opts] [ literal[string] ]
identifier[gather_job_timeout] = identifier[int] ( identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[opts] [ literal[string] ]))
identifier[start] = identifier[int] ( identifier[time] . identifier[time] ())
identifier[minion_timeouts] ={}
identifier[found] = identifier[set] ()
identifier[missing] = identifier[set] ()
keyword[try] :
keyword[if] identifier[self] . identifier[returners] [ literal[string] . identifier[format] ( identifier[self] . identifier[opts] [ literal[string] ])]( identifier[jid] )=={}:
identifier[log] . identifier[warning] ( literal[string] )
keyword[yield] {}
keyword[raise] identifier[StopIteration] ()
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[log] . identifier[warning] ( literal[string] , identifier[exc] , identifier[exc_info_on_loglevel] = identifier[logging] . identifier[DEBUG] )
identifier[last_time] = keyword[False]
keyword[if] identifier[self] . identifier[opts] [ literal[string] ]:
identifier[ret_iter] = identifier[self] . identifier[get_returns_no_block] ( literal[string] . identifier[format] ( identifier[jid] ), literal[string] )
keyword[else] :
identifier[ret_iter] = identifier[self] . identifier[get_returns_no_block] ( literal[string] . identifier[format] ( identifier[jid] ))
identifier[jinfo_iter] =[]
identifier[open_jids] = identifier[set] ()
identifier[timeout_at] = identifier[time] . identifier[time] ()+ identifier[timeout]
identifier[gather_syndic_wait] = identifier[time] . identifier[time] ()+ identifier[self] . identifier[opts] [ literal[string] ]
identifier[minions_running] = keyword[True]
identifier[log] . identifier[debug] (
literal[string] ,
identifier[jid] , identifier[minions] , identifier[datetime] . identifier[fromtimestamp] ( identifier[timeout_at] ). identifier[time] ()
)
keyword[while] keyword[True] :
keyword[for] identifier[raw] keyword[in] identifier[ret_iter] :
keyword[if] identifier[raw] keyword[is] keyword[None] :
keyword[break]
keyword[if] literal[string] keyword[in] identifier[raw] . identifier[get] ( literal[string] ,{}):
identifier[minions] . identifier[update] ( identifier[raw] [ literal[string] ][ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[raw] . identifier[get] ( literal[string] ,{}):
identifier[missing] . identifier[update] ( identifier[raw] [ literal[string] ][ literal[string] ])
keyword[continue]
keyword[if] literal[string] keyword[not] keyword[in] identifier[raw] [ literal[string] ]:
keyword[continue]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
identifier[found] . identifier[add] ( identifier[raw] [ literal[string] ][ literal[string] ])
keyword[yield] identifier[raw]
keyword[else] :
identifier[found] . identifier[add] ( identifier[raw] [ literal[string] ][ literal[string] ])
identifier[ret] ={ identifier[raw] [ literal[string] ][ literal[string] ]:{ literal[string] : identifier[raw] [ literal[string] ][ literal[string] ]}}
keyword[if] literal[string] keyword[in] identifier[raw] [ literal[string] ]:
identifier[ret] [ identifier[raw] [ literal[string] ][ literal[string] ]][ literal[string] ]= identifier[raw] [ literal[string] ][ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[raw] [ literal[string] ]:
identifier[ret] [ identifier[raw] [ literal[string] ][ literal[string] ]][ literal[string] ]= identifier[raw] [ literal[string] ][ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[raw] [ literal[string] ]:
identifier[ret] [ identifier[raw] [ literal[string] ][ literal[string] ]][ literal[string] ]= identifier[raw] [ literal[string] ][ literal[string] ]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
identifier[ret] [ identifier[raw] [ literal[string] ][ literal[string] ]]. identifier[update] ( identifier[raw] [ literal[string] ])
identifier[log] . identifier[debug] ( literal[string] , identifier[jid] , identifier[raw] [ literal[string] ][ literal[string] ])
keyword[yield] identifier[ret]
keyword[if] identifier[len] ( identifier[found] . identifier[intersection] ( identifier[minions] ))>= identifier[len] ( identifier[minions] ) keyword[and] keyword[not] identifier[self] . identifier[opts] [ literal[string] ]:
identifier[log] . identifier[debug] ( literal[string] , identifier[jid] , identifier[found] )
keyword[break]
keyword[elif] identifier[len] ( identifier[found] . identifier[intersection] ( identifier[minions] ))>= identifier[len] ( identifier[minions] ) keyword[and] identifier[self] . identifier[opts] [ literal[string] ]:
keyword[if] identifier[len] ( identifier[found] )>= identifier[len] ( identifier[minions] ) keyword[and] identifier[len] ( identifier[minions] )> literal[int] keyword[and] identifier[time] . identifier[time] ()> identifier[gather_syndic_wait] :
keyword[break]
keyword[for] identifier[id_] keyword[in] identifier[minions] - identifier[found] :
keyword[if] identifier[id_] keyword[not] keyword[in] identifier[minion_timeouts] :
identifier[minion_timeouts] [ identifier[id_] ]= identifier[time] . identifier[time] ()+ identifier[timeout]
keyword[if] identifier[time] . identifier[time] ()> identifier[timeout_at] keyword[and] identifier[minions_running] :
identifier[jinfo] = identifier[self] . identifier[gather_job_info] ( identifier[jid] , identifier[list] ( identifier[minions] - identifier[found] ), literal[string] ,** identifier[kwargs] )
identifier[minions_running] = keyword[False]
keyword[if] literal[string] keyword[not] keyword[in] identifier[jinfo] :
identifier[jinfo_iter] =[]
keyword[else] :
identifier[jinfo_iter] = identifier[self] . identifier[get_returns_no_block] ( literal[string] . identifier[format] ( identifier[jinfo] [ literal[string] ]))
identifier[timeout_at] = identifier[time] . identifier[time] ()+ identifier[gather_job_timeout]
keyword[if] identifier[self] . identifier[opts] [ literal[string] ]:
identifier[timeout_at] += identifier[self] . identifier[opts] . identifier[get] ( literal[string] , literal[int] )
keyword[for] identifier[raw] keyword[in] identifier[jinfo_iter] :
keyword[if] identifier[raw] keyword[is] keyword[None] :
keyword[break]
keyword[try] :
keyword[if] identifier[raw] [ literal[string] ][ literal[string] ]> literal[int] :
identifier[log] . identifier[error] ( literal[string] , identifier[raw] [ literal[string] ][ literal[string] ])
identifier[minions] . identifier[remove] ( identifier[raw] [ literal[string] ][ literal[string] ])
keyword[break]
keyword[except] identifier[KeyError] keyword[as] identifier[exc] :
identifier[missing_key] = identifier[exc] . identifier[__str__] (). identifier[strip] ( literal[string] )
keyword[if] identifier[missing_key] == literal[string] :
identifier[log] . identifier[debug] ( literal[string] )
keyword[else] :
identifier[log] . identifier[debug] (
literal[string]
literal[string]
literal[string] , identifier[missing_key]
)
identifier[open_jids] . identifier[add] ( identifier[jinfo] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[raw] . identifier[get] ( literal[string] ,{}):
identifier[minions] . identifier[update] ( identifier[raw] [ literal[string] ][ literal[string] ])
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[raw] . identifier[get] ( literal[string] ,{}):
identifier[minions] . identifier[update] ( identifier[raw] [ literal[string] ])
keyword[continue]
keyword[if] literal[string] keyword[not] keyword[in] identifier[raw] . identifier[get] ( literal[string] ,{}):
keyword[continue]
keyword[if] identifier[raw] [ literal[string] ][ literal[string] ]=={}:
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[raw] [ literal[string] ][ literal[string] ], identifier[six] . identifier[string_types] ):
identifier[log] . identifier[error] ( literal[string] , identifier[raw] )
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[raw] [ literal[string] ][ literal[string] ] keyword[and] identifier[raw] [ literal[string] ][ literal[string] ][ literal[string] ]=={}:
keyword[continue]
keyword[if] identifier[raw] [ literal[string] ][ literal[string] ] keyword[not] keyword[in] identifier[minions] :
identifier[minions] . identifier[add] ( identifier[raw] [ literal[string] ][ literal[string] ])
identifier[minion_timeouts] [ identifier[raw] [ literal[string] ][ literal[string] ]]= identifier[time] . identifier[time] ()+ identifier[timeout]
identifier[minions_running] = keyword[True]
identifier[now] = identifier[time] . identifier[time] ()
identifier[done] =( identifier[now] > identifier[timeout_at] ) keyword[and] keyword[not] identifier[minions_running]
keyword[if] identifier[done] :
keyword[for] identifier[id_] keyword[in] identifier[minions] - identifier[found] :
keyword[if] identifier[now] < identifier[minion_timeouts] [ identifier[id_] ]:
identifier[done] = keyword[False]
keyword[break]
keyword[if] identifier[done] :
keyword[break]
keyword[if] identifier[block] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[else] :
keyword[yield]
keyword[if] identifier[open_jids] :
keyword[for] identifier[jid] keyword[in] identifier[open_jids] :
identifier[self] . identifier[event] . identifier[unsubscribe] ( identifier[jid] )
keyword[if] identifier[expect_minions] :
keyword[for] identifier[minion] keyword[in] identifier[list] (( identifier[minions] - identifier[found] )):
keyword[yield] { identifier[minion] :{ literal[string] : keyword[True] }}
identifier[missing] -= identifier[found]
keyword[if] identifier[missing] :
keyword[for] identifier[minion] keyword[in] identifier[missing] :
keyword[yield] { identifier[minion] :{ literal[string] : keyword[True] }} | def get_iter_returns(self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs):
"""
Watch the event system and return job data as it comes in
:returns: all of the information for the JID
"""
if not isinstance(minions, set):
if isinstance(minions, six.string_types):
minions = set([minions]) # depends on [control=['if'], data=[]]
elif isinstance(minions, (list, tuple)):
minions = set(list(minions)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if timeout is None:
timeout = self.opts['timeout'] # depends on [control=['if'], data=['timeout']]
gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout']))
start = int(time.time())
# timeouts per minion, id_ -> timeout time
minion_timeouts = {}
found = set()
missing = set()
# Check to see if the jid is real, if not return the empty dict
try:
if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}:
log.warning('jid does not exist')
yield {}
# stop the iteration, since the jid is invalid
raise StopIteration() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as exc:
log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # depends on [control=['except'], data=['exc']]
# Wait for the hosts to check in
last_time = False
# iterator for this job's return
if self.opts['order_masters']:
# If we are a MoM, we need to gather expected minions from downstreams masters.
ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') # depends on [control=['if'], data=[]]
else:
ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid))
# iterator for the info of this job
jinfo_iter = []
# open event jids that need to be un-subscribed from later
open_jids = set()
timeout_at = time.time() + timeout
gather_syndic_wait = time.time() + self.opts['syndic_wait']
# are there still minions running the job out there
# start as True so that we ping at least once
minions_running = True
log.debug('get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time())
while True:
# Process events until timeout is reached or all minions have returned
for raw in ret_iter:
# if we got None, then there were no events
if raw is None:
break # depends on [control=['if'], data=[]]
if 'minions' in raw.get('data', {}):
minions.update(raw['data']['minions'])
if 'missing' in raw.get('data', {}):
missing.update(raw['data']['missing']) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
if 'return' not in raw['data']:
continue # depends on [control=['if'], data=[]]
if kwargs.get('raw', False):
found.add(raw['data']['id'])
yield raw # depends on [control=['if'], data=[]]
else:
found.add(raw['data']['id'])
ret = {raw['data']['id']: {'ret': raw['data']['return']}}
if 'out' in raw['data']:
ret[raw['data']['id']]['out'] = raw['data']['out'] # depends on [control=['if'], data=[]]
if 'retcode' in raw['data']:
ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] # depends on [control=['if'], data=[]]
if 'jid' in raw['data']:
ret[raw['data']['id']]['jid'] = raw['data']['jid'] # depends on [control=['if'], data=[]]
if kwargs.get('_cmd_meta', False):
ret[raw['data']['id']].update(raw['data']) # depends on [control=['if'], data=[]]
log.debug('jid %s return from %s', jid, raw['data']['id'])
yield ret # depends on [control=['for'], data=['raw']]
# if we have all of the returns (and we aren't a syndic), no need for anything fancy
if len(found.intersection(minions)) >= len(minions) and (not self.opts['order_masters']):
# All minions have returned, break out of the loop
log.debug('jid %s found all minions %s', jid, found)
break # depends on [control=['if'], data=[]]
elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']:
if len(found) >= len(minions) and len(minions) > 0 and (time.time() > gather_syndic_wait):
# There were some minions to find and we found them
# However, this does not imply that *all* masters have yet responded with expected minion lists.
# Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see
# if additional lower-level masters deliver their lists of expected
# minions.
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# If we get here we may not have gathered the minion list yet. Keep waiting
# for all lower-level masters to respond with their minion lists
# let start the timeouts for all remaining minions
for id_ in minions - found:
# if we have a new minion in the list, make sure it has a timeout
if id_ not in minion_timeouts:
minion_timeouts[id_] = time.time() + timeout # depends on [control=['if'], data=['id_', 'minion_timeouts']] # depends on [control=['for'], data=['id_']]
# if the jinfo has timed out and some minions are still running the job
# re-do the ping
if time.time() > timeout_at and minions_running:
# since this is a new ping, no one has responded yet
jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs)
minions_running = False
# if we weren't assigned any jid that means the master thinks
# we have nothing to send
if 'jid' not in jinfo:
jinfo_iter = [] # depends on [control=['if'], data=[]]
else:
jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid']))
timeout_at = time.time() + gather_job_timeout
# if you are a syndic, wait a little longer
if self.opts['order_masters']:
timeout_at += self.opts.get('syndic_wait', 1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# check for minions that are running the job still
for raw in jinfo_iter:
# if there are no more events, lets stop waiting for the jinfo
if raw is None:
break # depends on [control=['if'], data=[]]
try:
if raw['data']['retcode'] > 0:
log.error('saltutil returning errors on minion %s', raw['data']['id'])
minions.remove(raw['data']['id'])
break # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError as exc:
# This is a safe pass. We're just using the try/except to
# avoid having to deep-check for keys.
missing_key = exc.__str__().strip('\'"')
if missing_key == 'retcode':
log.debug('retcode missing from client return') # depends on [control=['if'], data=[]]
else:
log.debug("Passing on saltutil error. Key '%s' missing from client return. This may be an error in the client.", missing_key) # depends on [control=['except'], data=['exc']]
# Keep track of the jid events to unsubscribe from later
open_jids.add(jinfo['jid'])
# TODO: move to a library??
if 'minions' in raw.get('data', {}):
minions.update(raw['data']['minions'])
continue # depends on [control=['if'], data=[]]
if 'syndic' in raw.get('data', {}):
minions.update(raw['syndic'])
continue # depends on [control=['if'], data=[]]
if 'return' not in raw.get('data', {}):
continue # depends on [control=['if'], data=[]]
# if the job isn't running there anymore... don't count
if raw['data']['return'] == {}:
continue # depends on [control=['if'], data=[]]
# if the minion throws an exception containing the word "return"
# the master will try to handle the string as a dict in the next
# step. Check if we have a string, log the issue and continue.
if isinstance(raw['data']['return'], six.string_types):
log.error('unexpected return from minion: %s', raw)
continue # depends on [control=['if'], data=[]]
if 'return' in raw['data']['return'] and raw['data']['return']['return'] == {}:
continue # depends on [control=['if'], data=[]]
# if we didn't originally target the minion, lets add it to the list
if raw['data']['id'] not in minions:
minions.add(raw['data']['id']) # depends on [control=['if'], data=['minions']]
# update this minion's timeout, as long as the job is still running
minion_timeouts[raw['data']['id']] = time.time() + timeout
# a minion returned, so we know its running somewhere
minions_running = True # depends on [control=['for'], data=['raw']]
# if we have hit gather_job_timeout (after firing the job) AND
# if we have hit all minion timeouts, lets call it
now = time.time()
# if we have finished waiting, and no minions are running the job
# then we need to see if each minion has timedout
done = now > timeout_at and (not minions_running)
if done:
# if all minions have timeod out
for id_ in minions - found:
if now < minion_timeouts[id_]:
done = False
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['id_']] # depends on [control=['if'], data=[]]
if done:
break # depends on [control=['if'], data=[]]
# don't spin
if block:
time.sleep(0.01) # depends on [control=['if'], data=[]]
else:
yield # depends on [control=['while'], data=[]]
# If there are any remaining open events, clean them up.
if open_jids:
for jid in open_jids:
self.event.unsubscribe(jid) # depends on [control=['for'], data=['jid']] # depends on [control=['if'], data=[]]
if expect_minions:
for minion in list(minions - found):
yield {minion: {'failed': True}} # depends on [control=['for'], data=['minion']] # depends on [control=['if'], data=[]]
# Filter out any minions marked as missing for which we received
# returns (prevents false events sent due to higher-level masters not
# knowing about lower-level minions).
missing -= found
# Report on missing minions
if missing:
for minion in missing:
yield {minion: {'failed': True}} # depends on [control=['for'], data=['minion']] # depends on [control=['if'], data=[]] |
def parse_genelists(self):
"""parse gene list"""
if isinstance(self.gene_list, list):
genes = self.gene_list
elif isinstance(self.gene_list, pd.DataFrame):
# input type is bed file
if self.gene_list.shape[1] >=3:
genes= self.gene_list.iloc[:,:3].apply(lambda x: "\t".join([str(i) for i in x]), axis=1).tolist()
# input type with weight values
elif self.gene_list.shape[1] == 2:
genes= self.gene_list.apply(lambda x: ",".join([str(i) for i in x]), axis=1).tolist()
else:
genes = self.gene_list.squeeze().tolist()
elif isinstance(self.gene_list, pd.Series):
genes = self.gene_list.squeeze().tolist()
else:
# get gene lists or bed file, or gene list with weighted values.
genes=[]
with open(self.gene_list) as f:
for gene in f:
genes.append(gene.strip())
self._isezid = all(map(self._is_entrez_id, genes))
if self._isezid:
self._gls = set(map(int, self._gls))
else:
self._gls = genes
return '\n'.join(genes) | def function[parse_genelists, parameter[self]]:
constant[parse gene list]
if call[name[isinstance], parameter[name[self].gene_list, name[list]]] begin[:]
variable[genes] assign[=] name[self].gene_list
name[self]._isezid assign[=] call[name[all], parameter[call[name[map], parameter[name[self]._is_entrez_id, name[genes]]]]]
if name[self]._isezid begin[:]
name[self]._gls assign[=] call[name[set], parameter[call[name[map], parameter[name[int], name[self]._gls]]]]
return[call[constant[
].join, parameter[name[genes]]]] | keyword[def] identifier[parse_genelists] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[gene_list] , identifier[list] ):
identifier[genes] = identifier[self] . identifier[gene_list]
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[gene_list] , identifier[pd] . identifier[DataFrame] ):
keyword[if] identifier[self] . identifier[gene_list] . identifier[shape] [ literal[int] ]>= literal[int] :
identifier[genes] = identifier[self] . identifier[gene_list] . identifier[iloc] [:,: literal[int] ]. identifier[apply] ( keyword[lambda] identifier[x] : literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[x] ]), identifier[axis] = literal[int] ). identifier[tolist] ()
keyword[elif] identifier[self] . identifier[gene_list] . identifier[shape] [ literal[int] ]== literal[int] :
identifier[genes] = identifier[self] . identifier[gene_list] . identifier[apply] ( keyword[lambda] identifier[x] : literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[x] ]), identifier[axis] = literal[int] ). identifier[tolist] ()
keyword[else] :
identifier[genes] = identifier[self] . identifier[gene_list] . identifier[squeeze] (). identifier[tolist] ()
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[gene_list] , identifier[pd] . identifier[Series] ):
identifier[genes] = identifier[self] . identifier[gene_list] . identifier[squeeze] (). identifier[tolist] ()
keyword[else] :
identifier[genes] =[]
keyword[with] identifier[open] ( identifier[self] . identifier[gene_list] ) keyword[as] identifier[f] :
keyword[for] identifier[gene] keyword[in] identifier[f] :
identifier[genes] . identifier[append] ( identifier[gene] . identifier[strip] ())
identifier[self] . identifier[_isezid] = identifier[all] ( identifier[map] ( identifier[self] . identifier[_is_entrez_id] , identifier[genes] ))
keyword[if] identifier[self] . identifier[_isezid] :
identifier[self] . identifier[_gls] = identifier[set] ( identifier[map] ( identifier[int] , identifier[self] . identifier[_gls] ))
keyword[else] :
identifier[self] . identifier[_gls] = identifier[genes]
keyword[return] literal[string] . identifier[join] ( identifier[genes] ) | def parse_genelists(self):
"""parse gene list"""
if isinstance(self.gene_list, list):
genes = self.gene_list # depends on [control=['if'], data=[]]
elif isinstance(self.gene_list, pd.DataFrame):
# input type is bed file
if self.gene_list.shape[1] >= 3:
genes = self.gene_list.iloc[:, :3].apply(lambda x: '\t'.join([str(i) for i in x]), axis=1).tolist() # depends on [control=['if'], data=[]]
# input type with weight values
elif self.gene_list.shape[1] == 2:
genes = self.gene_list.apply(lambda x: ','.join([str(i) for i in x]), axis=1).tolist() # depends on [control=['if'], data=[]]
else:
genes = self.gene_list.squeeze().tolist() # depends on [control=['if'], data=[]]
elif isinstance(self.gene_list, pd.Series):
genes = self.gene_list.squeeze().tolist() # depends on [control=['if'], data=[]]
else:
# get gene lists or bed file, or gene list with weighted values.
genes = []
with open(self.gene_list) as f:
for gene in f:
genes.append(gene.strip()) # depends on [control=['for'], data=['gene']] # depends on [control=['with'], data=['f']]
self._isezid = all(map(self._is_entrez_id, genes))
if self._isezid:
self._gls = set(map(int, self._gls)) # depends on [control=['if'], data=[]]
else:
self._gls = genes
return '\n'.join(genes) |
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx) | def function[agg, parameter[self]]:
constant[Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
]
assert[name[exprs]]
if <ast.BoolOp object at 0x7da20e954d60> begin[:]
variable[jdf] assign[=] call[name[self]._jgd.agg, parameter[call[name[exprs]][constant[0]]]]
return[call[name[DataFrame], parameter[name[jdf], name[self].sql_ctx]]] | keyword[def] identifier[agg] ( identifier[self] ,* identifier[exprs] ):
literal[string]
keyword[assert] identifier[exprs] , literal[string]
keyword[if] identifier[len] ( identifier[exprs] )== literal[int] keyword[and] identifier[isinstance] ( identifier[exprs] [ literal[int] ], identifier[dict] ):
identifier[jdf] = identifier[self] . identifier[_jgd] . identifier[agg] ( identifier[exprs] [ literal[int] ])
keyword[else] :
keyword[assert] identifier[all] ( identifier[isinstance] ( identifier[c] , identifier[Column] ) keyword[for] identifier[c] keyword[in] identifier[exprs] ), literal[string]
identifier[jdf] = identifier[self] . identifier[_jgd] . identifier[agg] ( identifier[exprs] [ literal[int] ]. identifier[_jc] ,
identifier[_to_seq] ( identifier[self] . identifier[sql_ctx] . identifier[_sc] ,[ identifier[c] . identifier[_jc] keyword[for] identifier[c] keyword[in] identifier[exprs] [ literal[int] :]]))
keyword[return] identifier[DataFrame] ( identifier[jdf] , identifier[self] . identifier[sql_ctx] ) | def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
"""
assert exprs, 'exprs should not be empty'
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0]) # depends on [control=['if'], data=[]]
else:
# Columns
assert all((isinstance(c, Column) for c in exprs)), 'all exprs should be Column'
jdf = self._jgd.agg(exprs[0]._jc, _to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx) |
def float_to_fp(signed, n_bits, n_frac):
"""Return a function to convert a floating point value to a fixed point
value.
For example, a function to convert a float to a signed fractional
representation with 8 bits overall and 4 fractional bits (S3.4) can be
constructed and used with::
>>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4)
>>> hex(int(s34(0.5)))
'0x8'
The fixed point conversion is saturating::
>>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4
>>> hex(int(q34(-0.5)))
'0x0'
>>> hex(int(q34(15.0)))
'0xf0'
>>> hex(int(q34(16.0)))
'0xff'
Parameters
----------
signed : bool
Whether the values that are to be converted should be signed, or
clipped at zero.
>>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed
'-0x8'
>>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned
'0x0'
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
"""
# Calculate the maximum and minimum values
if signed:
max_v = (1 << (n_bits - 1)) - 1
min_v = -max_v - 1
else:
min_v = 0
max_v = (1 << n_bits) - 1
# Compute the scale
scale = 2.0**n_frac
def bitsk(value):
"""Convert a floating point value to a fixed point value.
Parameters
----------
value : float
The value to convert.
"""
int_val = int(scale * value)
return max((min(max_v, int_val), min_v))
return bitsk | def function[float_to_fp, parameter[signed, n_bits, n_frac]]:
constant[Return a function to convert a floating point value to a fixed point
value.
For example, a function to convert a float to a signed fractional
representation with 8 bits overall and 4 fractional bits (S3.4) can be
constructed and used with::
>>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4)
>>> hex(int(s34(0.5)))
'0x8'
The fixed point conversion is saturating::
>>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4
>>> hex(int(q34(-0.5)))
'0x0'
>>> hex(int(q34(15.0)))
'0xf0'
>>> hex(int(q34(16.0)))
'0xff'
Parameters
----------
signed : bool
Whether the values that are to be converted should be signed, or
clipped at zero.
>>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed
'-0x8'
>>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned
'0x0'
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
]
if name[signed] begin[:]
variable[max_v] assign[=] binary_operation[binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> binary_operation[name[n_bits] - constant[1]]] - constant[1]]
variable[min_v] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b1816ef0> - constant[1]]
variable[scale] assign[=] binary_operation[constant[2.0] ** name[n_frac]]
def function[bitsk, parameter[value]]:
constant[Convert a floating point value to a fixed point value.
Parameters
----------
value : float
The value to convert.
]
variable[int_val] assign[=] call[name[int], parameter[binary_operation[name[scale] * name[value]]]]
return[call[name[max], parameter[tuple[[<ast.Call object at 0x7da1b18150c0>, <ast.Name object at 0x7da1b1815840>]]]]]
return[name[bitsk]] | keyword[def] identifier[float_to_fp] ( identifier[signed] , identifier[n_bits] , identifier[n_frac] ):
literal[string]
keyword[if] identifier[signed] :
identifier[max_v] =( literal[int] <<( identifier[n_bits] - literal[int] ))- literal[int]
identifier[min_v] =- identifier[max_v] - literal[int]
keyword[else] :
identifier[min_v] = literal[int]
identifier[max_v] =( literal[int] << identifier[n_bits] )- literal[int]
identifier[scale] = literal[int] ** identifier[n_frac]
keyword[def] identifier[bitsk] ( identifier[value] ):
literal[string]
identifier[int_val] = identifier[int] ( identifier[scale] * identifier[value] )
keyword[return] identifier[max] (( identifier[min] ( identifier[max_v] , identifier[int_val] ), identifier[min_v] ))
keyword[return] identifier[bitsk] | def float_to_fp(signed, n_bits, n_frac):
"""Return a function to convert a floating point value to a fixed point
value.
For example, a function to convert a float to a signed fractional
representation with 8 bits overall and 4 fractional bits (S3.4) can be
constructed and used with::
>>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4)
>>> hex(int(s34(0.5)))
'0x8'
The fixed point conversion is saturating::
>>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4
>>> hex(int(q34(-0.5)))
'0x0'
>>> hex(int(q34(15.0)))
'0xf0'
>>> hex(int(q34(16.0)))
'0xff'
Parameters
----------
signed : bool
Whether the values that are to be converted should be signed, or
clipped at zero.
>>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed
'-0x8'
>>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned
'0x0'
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
"""
# Calculate the maximum and minimum values
if signed:
max_v = (1 << n_bits - 1) - 1
min_v = -max_v - 1 # depends on [control=['if'], data=[]]
else:
min_v = 0
max_v = (1 << n_bits) - 1
# Compute the scale
scale = 2.0 ** n_frac
def bitsk(value):
"""Convert a floating point value to a fixed point value.
Parameters
----------
value : float
The value to convert.
"""
int_val = int(scale * value)
return max((min(max_v, int_val), min_v))
return bitsk |
def clean_params(self):
"""Retrieves the parameter OrderedDict without the context or self parameters.
Useful for inspecting signature.
"""
result = self.params.copy()
if self.cog is not None:
# first parameter is self
result.popitem(last=False)
try:
# first/second parameter is context
result.popitem(last=False)
except Exception:
raise ValueError('Missing context parameter') from None
return result | def function[clean_params, parameter[self]]:
constant[Retrieves the parameter OrderedDict without the context or self parameters.
Useful for inspecting signature.
]
variable[result] assign[=] call[name[self].params.copy, parameter[]]
if compare[name[self].cog is_not constant[None]] begin[:]
call[name[result].popitem, parameter[]]
<ast.Try object at 0x7da1b1ff3af0>
return[name[result]] | keyword[def] identifier[clean_params] ( identifier[self] ):
literal[string]
identifier[result] = identifier[self] . identifier[params] . identifier[copy] ()
keyword[if] identifier[self] . identifier[cog] keyword[is] keyword[not] keyword[None] :
identifier[result] . identifier[popitem] ( identifier[last] = keyword[False] )
keyword[try] :
identifier[result] . identifier[popitem] ( identifier[last] = keyword[False] )
keyword[except] identifier[Exception] :
keyword[raise] identifier[ValueError] ( literal[string] ) keyword[from] keyword[None]
keyword[return] identifier[result] | def clean_params(self):
"""Retrieves the parameter OrderedDict without the context or self parameters.
Useful for inspecting signature.
"""
result = self.params.copy()
if self.cog is not None:
# first parameter is self
result.popitem(last=False) # depends on [control=['if'], data=[]]
try:
# first/second parameter is context
result.popitem(last=False) # depends on [control=['try'], data=[]]
except Exception:
raise ValueError('Missing context parameter') from None # depends on [control=['except'], data=[]]
return result |
def chain_absent(name, table='filter', family='ipv4'):
'''
.. versionadded:: 2014.7.0
Verify the chain is absent.
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
chain_check = __salt__['nftables.check_chain'](table, name, family)
if not chain_check:
ret['result'] = True
ret['comment'] = ('nftables {0} chain is already absent in {1} table for {2}'
.format(name, table, family))
return ret
flush_chain = __salt__['nftables.flush'](table, name, family)
if flush_chain:
command = __salt__['nftables.delete_chain'](table, name, family)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('nftables {0} chain in {1} table delete success for {2}'
.format(name, table, family))
else:
ret['result'] = False
ret['comment'] = ('Failed to delete {0} chain in {1} table: {2} for {3}'
.format(name, table, command.strip(), family))
else:
ret['result'] = False
ret['comment'] = 'Failed to flush {0} chain in {1} table: {2} for {3}'.format(
name,
table,
flush_chain.strip(),
family
)
return ret | def function[chain_absent, parameter[name, table, family]]:
constant[
.. versionadded:: 2014.7.0
Verify the chain is absent.
family
Networking family, either ipv4 or ipv6
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b21a3a30>, <ast.Constant object at 0x7da1b21a3100>, <ast.Constant object at 0x7da1b21a31c0>, <ast.Constant object at 0x7da1b21a06a0>], [<ast.Name object at 0x7da1b21a2d10>, <ast.Dict object at 0x7da1b21a3190>, <ast.Constant object at 0x7da1b21a32e0>, <ast.Constant object at 0x7da1b21a0190>]]
variable[chain_check] assign[=] call[call[name[__salt__]][constant[nftables.check_chain]], parameter[name[table], name[name], name[family]]]
if <ast.UnaryOp object at 0x7da1b21a3850> begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[nftables {0} chain is already absent in {1} table for {2}].format, parameter[name[name], name[table], name[family]]]
return[name[ret]]
variable[flush_chain] assign[=] call[call[name[__salt__]][constant[nftables.flush]], parameter[name[table], name[name], name[family]]]
if name[flush_chain] begin[:]
variable[command] assign[=] call[call[name[__salt__]][constant[nftables.delete_chain]], parameter[name[table], name[name], name[family]]]
if compare[name[command] is constant[True]] begin[:]
call[name[ret]][constant[changes]] assign[=] dictionary[[<ast.Constant object at 0x7da1b21a3e20>], [<ast.Name object at 0x7da1b21a0f70>]]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[nftables {0} chain in {1} table delete success for {2}].format, parameter[name[name], name[table], name[family]]]
return[name[ret]] | keyword[def] identifier[chain_absent] ( identifier[name] , identifier[table] = literal[string] , identifier[family] = literal[string] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[None] ,
literal[string] : literal[string] }
identifier[chain_check] = identifier[__salt__] [ literal[string] ]( identifier[table] , identifier[name] , identifier[family] )
keyword[if] keyword[not] identifier[chain_check] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]=( literal[string]
. identifier[format] ( identifier[name] , identifier[table] , identifier[family] ))
keyword[return] identifier[ret]
identifier[flush_chain] = identifier[__salt__] [ literal[string] ]( identifier[table] , identifier[name] , identifier[family] )
keyword[if] identifier[flush_chain] :
identifier[command] = identifier[__salt__] [ literal[string] ]( identifier[table] , identifier[name] , identifier[family] )
keyword[if] identifier[command] keyword[is] keyword[True] :
identifier[ret] [ literal[string] ]={ literal[string] : identifier[name] }
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]=( literal[string]
. identifier[format] ( identifier[name] , identifier[table] , identifier[family] ))
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]=( literal[string]
. identifier[format] ( identifier[name] , identifier[table] , identifier[command] . identifier[strip] (), identifier[family] ))
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[name] ,
identifier[table] ,
identifier[flush_chain] . identifier[strip] (),
identifier[family]
)
keyword[return] identifier[ret] | def chain_absent(name, table='filter', family='ipv4'):
"""
.. versionadded:: 2014.7.0
Verify the chain is absent.
family
Networking family, either ipv4 or ipv6
"""
ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''}
chain_check = __salt__['nftables.check_chain'](table, name, family)
if not chain_check:
ret['result'] = True
ret['comment'] = 'nftables {0} chain is already absent in {1} table for {2}'.format(name, table, family)
return ret # depends on [control=['if'], data=[]]
flush_chain = __salt__['nftables.flush'](table, name, family)
if flush_chain:
command = __salt__['nftables.delete_chain'](table, name, family)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'nftables {0} chain in {1} table delete success for {2}'.format(name, table, family) # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Failed to delete {0} chain in {1} table: {2} for {3}'.format(name, table, command.strip(), family) # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Failed to flush {0} chain in {1} table: {2} for {3}'.format(name, table, flush_chain.strip(), family)
return ret |
def get_repos(self, since=github.GithubObject.NotSet):
"""
:calls: `GET /repositories <http://developer.github.com/v3/repos/#list-all-public-repositories>`_
:param since: integer
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert since is github.GithubObject.NotSet or isinstance(since, (int, long)), since
url_parameters = dict()
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self.__requester,
"/repositories",
url_parameters
) | def function[get_repos, parameter[self, since]]:
constant[
:calls: `GET /repositories <http://developer.github.com/v3/repos/#list-all-public-repositories>`_
:param since: integer
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
]
assert[<ast.BoolOp object at 0x7da1b217c370>]
variable[url_parameters] assign[=] call[name[dict], parameter[]]
if compare[name[since] is_not name[github].GithubObject.NotSet] begin[:]
call[name[url_parameters]][constant[since]] assign[=] name[since]
return[call[name[github].PaginatedList.PaginatedList, parameter[name[github].Repository.Repository, name[self].__requester, constant[/repositories], name[url_parameters]]]] | keyword[def] identifier[get_repos] ( identifier[self] , identifier[since] = identifier[github] . identifier[GithubObject] . identifier[NotSet] ):
literal[string]
keyword[assert] identifier[since] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[since] ,( identifier[int] , identifier[long] )), identifier[since]
identifier[url_parameters] = identifier[dict] ()
keyword[if] identifier[since] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] :
identifier[url_parameters] [ literal[string] ]= identifier[since]
keyword[return] identifier[github] . identifier[PaginatedList] . identifier[PaginatedList] (
identifier[github] . identifier[Repository] . identifier[Repository] ,
identifier[self] . identifier[__requester] ,
literal[string] ,
identifier[url_parameters]
) | def get_repos(self, since=github.GithubObject.NotSet):
"""
:calls: `GET /repositories <http://developer.github.com/v3/repos/#list-all-public-repositories>`_
:param since: integer
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert since is github.GithubObject.NotSet or isinstance(since, (int, long)), since
url_parameters = dict()
if since is not github.GithubObject.NotSet:
url_parameters['since'] = since # depends on [control=['if'], data=['since']]
return github.PaginatedList.PaginatedList(github.Repository.Repository, self.__requester, '/repositories', url_parameters) |
def copy(input, **params):
"""
Copies input or input's selected fields
:param input:
:param params:
:return: input
"""
PARAM_FIELDS = 'fields'
def filter_fields(obj, fields):
return {k:v for k,v in obj.items() if k in fields}
if PARAM_FIELDS in params:
fields = params.get(PARAM_FIELDS)
if isinstance(input, list):
res = []
for row in input:
res.append(filter_fields(row, fields))
return res
elif isinstance(input, dict):
return filter_fields(input, fields)
else:
raise NotImplementedError('{} is not supported'.format(type(input)))
else:
return input | def function[copy, parameter[input]]:
constant[
Copies input or input's selected fields
:param input:
:param params:
:return: input
]
variable[PARAM_FIELDS] assign[=] constant[fields]
def function[filter_fields, parameter[obj, fields]]:
return[<ast.DictComp object at 0x7da20c6aba00>]
if compare[name[PARAM_FIELDS] in name[params]] begin[:]
variable[fields] assign[=] call[name[params].get, parameter[name[PARAM_FIELDS]]]
if call[name[isinstance], parameter[name[input], name[list]]] begin[:]
variable[res] assign[=] list[[]]
for taget[name[row]] in starred[name[input]] begin[:]
call[name[res].append, parameter[call[name[filter_fields], parameter[name[row], name[fields]]]]]
return[name[res]] | keyword[def] identifier[copy] ( identifier[input] ,** identifier[params] ):
literal[string]
identifier[PARAM_FIELDS] = literal[string]
keyword[def] identifier[filter_fields] ( identifier[obj] , identifier[fields] ):
keyword[return] { identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[obj] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[fields] }
keyword[if] identifier[PARAM_FIELDS] keyword[in] identifier[params] :
identifier[fields] = identifier[params] . identifier[get] ( identifier[PARAM_FIELDS] )
keyword[if] identifier[isinstance] ( identifier[input] , identifier[list] ):
identifier[res] =[]
keyword[for] identifier[row] keyword[in] identifier[input] :
identifier[res] . identifier[append] ( identifier[filter_fields] ( identifier[row] , identifier[fields] ))
keyword[return] identifier[res]
keyword[elif] identifier[isinstance] ( identifier[input] , identifier[dict] ):
keyword[return] identifier[filter_fields] ( identifier[input] , identifier[fields] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[input] )))
keyword[else] :
keyword[return] identifier[input] | def copy(input, **params):
"""
Copies input or input's selected fields
:param input:
:param params:
:return: input
"""
PARAM_FIELDS = 'fields'
def filter_fields(obj, fields):
return {k: v for (k, v) in obj.items() if k in fields}
if PARAM_FIELDS in params:
fields = params.get(PARAM_FIELDS)
if isinstance(input, list):
res = []
for row in input:
res.append(filter_fields(row, fields)) # depends on [control=['for'], data=['row']]
return res # depends on [control=['if'], data=[]]
elif isinstance(input, dict):
return filter_fields(input, fields) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('{} is not supported'.format(type(input))) # depends on [control=['if'], data=['PARAM_FIELDS', 'params']]
else:
return input |
def _build_option_string(self, options):
"""
:param options: dictionary containing the options
:returns: option_string formatted for an API endpoint
"""
option_string = ""
if options is not None:
for key in options:
option_string += "/%s-%s" % (key, options[key])
return self._quote_url(option_string) | def function[_build_option_string, parameter[self, options]]:
constant[
:param options: dictionary containing the options
:returns: option_string formatted for an API endpoint
]
variable[option_string] assign[=] constant[]
if compare[name[options] is_not constant[None]] begin[:]
for taget[name[key]] in starred[name[options]] begin[:]
<ast.AugAssign object at 0x7da1b09d9150>
return[call[name[self]._quote_url, parameter[name[option_string]]]] | keyword[def] identifier[_build_option_string] ( identifier[self] , identifier[options] ):
literal[string]
identifier[option_string] = literal[string]
keyword[if] identifier[options] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[key] keyword[in] identifier[options] :
identifier[option_string] += literal[string] %( identifier[key] , identifier[options] [ identifier[key] ])
keyword[return] identifier[self] . identifier[_quote_url] ( identifier[option_string] ) | def _build_option_string(self, options):
"""
:param options: dictionary containing the options
:returns: option_string formatted for an API endpoint
"""
option_string = ''
if options is not None:
for key in options:
option_string += '/%s-%s' % (key, options[key]) # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=['options']]
return self._quote_url(option_string) |
def getEngineChangelist(self):
"""
Returns the compatible Perforce changelist identifier for the latest installed version of UE4
"""
# Newer versions of the engine use the key "CompatibleChangelist", older ones use "Changelist"
version = self._getEngineVersionDetails()
if 'CompatibleChangelist' in version:
return int(version['CompatibleChangelist'])
else:
return int(version['Changelist']) | def function[getEngineChangelist, parameter[self]]:
constant[
Returns the compatible Perforce changelist identifier for the latest installed version of UE4
]
variable[version] assign[=] call[name[self]._getEngineVersionDetails, parameter[]]
if compare[constant[CompatibleChangelist] in name[version]] begin[:]
return[call[name[int], parameter[call[name[version]][constant[CompatibleChangelist]]]]] | keyword[def] identifier[getEngineChangelist] ( identifier[self] ):
literal[string]
identifier[version] = identifier[self] . identifier[_getEngineVersionDetails] ()
keyword[if] literal[string] keyword[in] identifier[version] :
keyword[return] identifier[int] ( identifier[version] [ literal[string] ])
keyword[else] :
keyword[return] identifier[int] ( identifier[version] [ literal[string] ]) | def getEngineChangelist(self):
"""
Returns the compatible Perforce changelist identifier for the latest installed version of UE4
""" # Newer versions of the engine use the key "CompatibleChangelist", older ones use "Changelist"
version = self._getEngineVersionDetails()
if 'CompatibleChangelist' in version:
return int(version['CompatibleChangelist']) # depends on [control=['if'], data=['version']]
else:
return int(version['Changelist']) |
def ListTimeZones(self):
"""Lists the timezones."""
max_length = 0
for timezone_name in pytz.all_timezones:
if len(timezone_name) > max_length:
max_length = len(timezone_name)
utc_date_time = datetime.datetime.utcnow()
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Timezone', 'UTC Offset'],
title='Zones')
for timezone_name in pytz.all_timezones:
try:
local_timezone = pytz.timezone(timezone_name)
except AssertionError as exception:
logger.error((
'Unable to determine information about timezone: {0:s} with '
'error: {1!s}').format(timezone_name, exception))
continue
local_date_string = '{0!s}'.format(
local_timezone.localize(utc_date_time))
if '+' in local_date_string:
_, _, diff = local_date_string.rpartition('+')
diff_string = '+{0:s}'.format(diff)
else:
_, _, diff = local_date_string.rpartition('-')
diff_string = '-{0:s}'.format(diff)
table_view.AddRow([timezone_name, diff_string])
table_view.Write(self._output_writer) | def function[ListTimeZones, parameter[self]]:
constant[Lists the timezones.]
variable[max_length] assign[=] constant[0]
for taget[name[timezone_name]] in starred[name[pytz].all_timezones] begin[:]
if compare[call[name[len], parameter[name[timezone_name]]] greater[>] name[max_length]] begin[:]
variable[max_length] assign[=] call[name[len], parameter[name[timezone_name]]]
variable[utc_date_time] assign[=] call[name[datetime].datetime.utcnow, parameter[]]
variable[table_view] assign[=] call[name[views].ViewsFactory.GetTableView, parameter[name[self]._views_format_type]]
for taget[name[timezone_name]] in starred[name[pytz].all_timezones] begin[:]
<ast.Try object at 0x7da18bc72d40>
variable[local_date_string] assign[=] call[constant[{0!s}].format, parameter[call[name[local_timezone].localize, parameter[name[utc_date_time]]]]]
if compare[constant[+] in name[local_date_string]] begin[:]
<ast.Tuple object at 0x7da18bc71930> assign[=] call[name[local_date_string].rpartition, parameter[constant[+]]]
variable[diff_string] assign[=] call[constant[+{0:s}].format, parameter[name[diff]]]
call[name[table_view].AddRow, parameter[list[[<ast.Name object at 0x7da18bc73f70>, <ast.Name object at 0x7da18bc72d10>]]]]
call[name[table_view].Write, parameter[name[self]._output_writer]] | keyword[def] identifier[ListTimeZones] ( identifier[self] ):
literal[string]
identifier[max_length] = literal[int]
keyword[for] identifier[timezone_name] keyword[in] identifier[pytz] . identifier[all_timezones] :
keyword[if] identifier[len] ( identifier[timezone_name] )> identifier[max_length] :
identifier[max_length] = identifier[len] ( identifier[timezone_name] )
identifier[utc_date_time] = identifier[datetime] . identifier[datetime] . identifier[utcnow] ()
identifier[table_view] = identifier[views] . identifier[ViewsFactory] . identifier[GetTableView] (
identifier[self] . identifier[_views_format_type] , identifier[column_names] =[ literal[string] , literal[string] ],
identifier[title] = literal[string] )
keyword[for] identifier[timezone_name] keyword[in] identifier[pytz] . identifier[all_timezones] :
keyword[try] :
identifier[local_timezone] = identifier[pytz] . identifier[timezone] ( identifier[timezone_name] )
keyword[except] identifier[AssertionError] keyword[as] identifier[exception] :
identifier[logger] . identifier[error] ((
literal[string]
literal[string] ). identifier[format] ( identifier[timezone_name] , identifier[exception] ))
keyword[continue]
identifier[local_date_string] = literal[string] . identifier[format] (
identifier[local_timezone] . identifier[localize] ( identifier[utc_date_time] ))
keyword[if] literal[string] keyword[in] identifier[local_date_string] :
identifier[_] , identifier[_] , identifier[diff] = identifier[local_date_string] . identifier[rpartition] ( literal[string] )
identifier[diff_string] = literal[string] . identifier[format] ( identifier[diff] )
keyword[else] :
identifier[_] , identifier[_] , identifier[diff] = identifier[local_date_string] . identifier[rpartition] ( literal[string] )
identifier[diff_string] = literal[string] . identifier[format] ( identifier[diff] )
identifier[table_view] . identifier[AddRow] ([ identifier[timezone_name] , identifier[diff_string] ])
identifier[table_view] . identifier[Write] ( identifier[self] . identifier[_output_writer] ) | def ListTimeZones(self):
"""Lists the timezones."""
max_length = 0
for timezone_name in pytz.all_timezones:
if len(timezone_name) > max_length:
max_length = len(timezone_name) # depends on [control=['if'], data=['max_length']] # depends on [control=['for'], data=['timezone_name']]
utc_date_time = datetime.datetime.utcnow()
table_view = views.ViewsFactory.GetTableView(self._views_format_type, column_names=['Timezone', 'UTC Offset'], title='Zones')
for timezone_name in pytz.all_timezones:
try:
local_timezone = pytz.timezone(timezone_name) # depends on [control=['try'], data=[]]
except AssertionError as exception:
logger.error('Unable to determine information about timezone: {0:s} with error: {1!s}'.format(timezone_name, exception))
continue # depends on [control=['except'], data=['exception']]
local_date_string = '{0!s}'.format(local_timezone.localize(utc_date_time))
if '+' in local_date_string:
(_, _, diff) = local_date_string.rpartition('+')
diff_string = '+{0:s}'.format(diff) # depends on [control=['if'], data=['local_date_string']]
else:
(_, _, diff) = local_date_string.rpartition('-')
diff_string = '-{0:s}'.format(diff)
table_view.AddRow([timezone_name, diff_string]) # depends on [control=['for'], data=['timezone_name']]
table_view.Write(self._output_writer) |
def from_file(cls, config_path=None, *args, **kwargs):
"""
Create a Cihai instance from a JSON or YAML config.
Parameters
----------
config_path : str, optional
path to custom config file
Returns
-------
:class:`Cihai` :
application object
"""
config_reader = kaptan.Kaptan()
config = {}
if config_path:
if not os.path.exists(config_path):
raise exc.CihaiException(
'{0} does not exist.'.format(os.path.abspath(config_path))
)
if not any(
config_path.endswith(ext) for ext in ('json', 'yml', 'yaml', 'ini')
):
raise exc.CihaiException(
'{0} does not have a yaml,yml,json,ini extend.'.format(
os.path.abspath(config_path)
)
)
else:
custom_config = config_reader.import_config(config_path).get()
config = merge_dict(config, custom_config)
return cls(config) | def function[from_file, parameter[cls, config_path]]:
constant[
Create a Cihai instance from a JSON or YAML config.
Parameters
----------
config_path : str, optional
path to custom config file
Returns
-------
:class:`Cihai` :
application object
]
variable[config_reader] assign[=] call[name[kaptan].Kaptan, parameter[]]
variable[config] assign[=] dictionary[[], []]
if name[config_path] begin[:]
if <ast.UnaryOp object at 0x7da1b1971f30> begin[:]
<ast.Raise object at 0x7da1b1972bc0>
if <ast.UnaryOp object at 0x7da1b1972230> begin[:]
<ast.Raise object at 0x7da1b1972500>
return[call[name[cls], parameter[name[config]]]] | keyword[def] identifier[from_file] ( identifier[cls] , identifier[config_path] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[config_reader] = identifier[kaptan] . identifier[Kaptan] ()
identifier[config] ={}
keyword[if] identifier[config_path] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[config_path] ):
keyword[raise] identifier[exc] . identifier[CihaiException] (
literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[config_path] ))
)
keyword[if] keyword[not] identifier[any] (
identifier[config_path] . identifier[endswith] ( identifier[ext] ) keyword[for] identifier[ext] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] )
):
keyword[raise] identifier[exc] . identifier[CihaiException] (
literal[string] . identifier[format] (
identifier[os] . identifier[path] . identifier[abspath] ( identifier[config_path] )
)
)
keyword[else] :
identifier[custom_config] = identifier[config_reader] . identifier[import_config] ( identifier[config_path] ). identifier[get] ()
identifier[config] = identifier[merge_dict] ( identifier[config] , identifier[custom_config] )
keyword[return] identifier[cls] ( identifier[config] ) | def from_file(cls, config_path=None, *args, **kwargs):
"""
Create a Cihai instance from a JSON or YAML config.
Parameters
----------
config_path : str, optional
path to custom config file
Returns
-------
:class:`Cihai` :
application object
"""
config_reader = kaptan.Kaptan()
config = {}
if config_path:
if not os.path.exists(config_path):
raise exc.CihaiException('{0} does not exist.'.format(os.path.abspath(config_path))) # depends on [control=['if'], data=[]]
if not any((config_path.endswith(ext) for ext in ('json', 'yml', 'yaml', 'ini'))):
raise exc.CihaiException('{0} does not have a yaml,yml,json,ini extend.'.format(os.path.abspath(config_path))) # depends on [control=['if'], data=[]]
else:
custom_config = config_reader.import_config(config_path).get()
config = merge_dict(config, custom_config) # depends on [control=['if'], data=[]]
return cls(config) |
def get_interpreter_path(version=None):
"""Return the executable of a specified or current version."""
if version and version != str(sys.version_info[0]):
return settings.PYTHON_INTERPRETER + version
else:
return sys.executable | def function[get_interpreter_path, parameter[version]]:
constant[Return the executable of a specified or current version.]
if <ast.BoolOp object at 0x7da1b1bb0430> begin[:]
return[binary_operation[name[settings].PYTHON_INTERPRETER + name[version]]] | keyword[def] identifier[get_interpreter_path] ( identifier[version] = keyword[None] ):
literal[string]
keyword[if] identifier[version] keyword[and] identifier[version] != identifier[str] ( identifier[sys] . identifier[version_info] [ literal[int] ]):
keyword[return] identifier[settings] . identifier[PYTHON_INTERPRETER] + identifier[version]
keyword[else] :
keyword[return] identifier[sys] . identifier[executable] | def get_interpreter_path(version=None):
"""Return the executable of a specified or current version."""
if version and version != str(sys.version_info[0]):
return settings.PYTHON_INTERPRETER + version # depends on [control=['if'], data=[]]
else:
return sys.executable |
def bind(self, event_name, callback, first = False):
"""Bind a callback to an event
Params:
event_name (string):
Name of the event to bind to
callback (callable):
Callback that will be called when the event is triggered
first (boolean):
If True, this callback is placed before all the other events already
registered for this event, otherwise it is placed at the end.
"""
if event_name not in self.handlers:
self.handlers[event_name] = []
if first:
self.handlers[event_name].insert(0, callback)
else:
self.handlers[event_name].append(callback) | def function[bind, parameter[self, event_name, callback, first]]:
constant[Bind a callback to an event
Params:
event_name (string):
Name of the event to bind to
callback (callable):
Callback that will be called when the event is triggered
first (boolean):
If True, this callback is placed before all the other events already
registered for this event, otherwise it is placed at the end.
]
if compare[name[event_name] <ast.NotIn object at 0x7da2590d7190> name[self].handlers] begin[:]
call[name[self].handlers][name[event_name]] assign[=] list[[]]
if name[first] begin[:]
call[call[name[self].handlers][name[event_name]].insert, parameter[constant[0], name[callback]]] | keyword[def] identifier[bind] ( identifier[self] , identifier[event_name] , identifier[callback] , identifier[first] = keyword[False] ):
literal[string]
keyword[if] identifier[event_name] keyword[not] keyword[in] identifier[self] . identifier[handlers] :
identifier[self] . identifier[handlers] [ identifier[event_name] ]=[]
keyword[if] identifier[first] :
identifier[self] . identifier[handlers] [ identifier[event_name] ]. identifier[insert] ( literal[int] , identifier[callback] )
keyword[else] :
identifier[self] . identifier[handlers] [ identifier[event_name] ]. identifier[append] ( identifier[callback] ) | def bind(self, event_name, callback, first=False):
"""Bind a callback to an event
Params:
event_name (string):
Name of the event to bind to
callback (callable):
Callback that will be called when the event is triggered
first (boolean):
If True, this callback is placed before all the other events already
registered for this event, otherwise it is placed at the end.
"""
if event_name not in self.handlers:
self.handlers[event_name] = [] # depends on [control=['if'], data=['event_name']]
if first:
self.handlers[event_name].insert(0, callback) # depends on [control=['if'], data=[]]
else:
self.handlers[event_name].append(callback) |
def batch_scan_layers(self, cells: np.ndarray = None, genes: np.ndarray = None, axis: int = 0, batch_size: int = 1000, layers: Iterable = None) -> Iterable[Tuple[int, np.ndarray, Dict]]:
"""
**DEPRECATED** - Use `scan` instead
"""
deprecated("'batch_scan_layers' is deprecated. Use 'scan' instead")
if cells is None:
cells = np.fromiter(range(self.shape[1]), dtype='int')
if genes is None:
genes = np.fromiter(range(self.shape[0]), dtype='int')
if layers is None:
layers = self.layers.keys()
if axis == 1:
cols_per_chunk = batch_size
ix = 0
while ix < self.shape[1]:
cols_per_chunk = min(self.shape[1] - ix, cols_per_chunk)
selection = cells - ix
# Pick out the cells that are in this batch
selection = selection[np.where(np.logical_and(selection >= 0, selection < cols_per_chunk))[0]]
if selection.shape[0] == 0:
ix += cols_per_chunk
continue
# Load the whole chunk from the file, then extract genes and cells using fancy indexing
vals = dict()
for key in layers:
vals[key] = self.layers[key][:, ix:ix + cols_per_chunk]
vals[key] = vals[key][genes, :]
vals[key] = vals[key][:, selection]
yield (ix, ix + selection, vals)
ix += cols_per_chunk
if axis == 0:
rows_per_chunk = batch_size
ix = 0
while ix < self.shape[0]:
rows_per_chunk = min(self.shape[0] - ix, rows_per_chunk)
selection = genes - ix
# Pick out the genes that are in this batch
selection = selection[np.where(np.logical_and(selection >= 0, selection < rows_per_chunk))[0]]
if selection.shape[0] == 0:
ix += rows_per_chunk
continue
# Load the whole chunk from the file, then extract genes and cells using fancy indexing
vals = dict()
for key in layers:
vals[key] = self.layers[key][ix:ix + rows_per_chunk, :]
vals[key] = vals[key][selection, :]
vals[key] = vals[key][:, cells]
yield (ix, ix + selection, vals)
ix += rows_per_chunk | def function[batch_scan_layers, parameter[self, cells, genes, axis, batch_size, layers]]:
constant[
**DEPRECATED** - Use `scan` instead
]
call[name[deprecated], parameter[constant['batch_scan_layers' is deprecated. Use 'scan' instead]]]
if compare[name[cells] is constant[None]] begin[:]
variable[cells] assign[=] call[name[np].fromiter, parameter[call[name[range], parameter[call[name[self].shape][constant[1]]]]]]
if compare[name[genes] is constant[None]] begin[:]
variable[genes] assign[=] call[name[np].fromiter, parameter[call[name[range], parameter[call[name[self].shape][constant[0]]]]]]
if compare[name[layers] is constant[None]] begin[:]
variable[layers] assign[=] call[name[self].layers.keys, parameter[]]
if compare[name[axis] equal[==] constant[1]] begin[:]
variable[cols_per_chunk] assign[=] name[batch_size]
variable[ix] assign[=] constant[0]
while compare[name[ix] less[<] call[name[self].shape][constant[1]]] begin[:]
variable[cols_per_chunk] assign[=] call[name[min], parameter[binary_operation[call[name[self].shape][constant[1]] - name[ix]], name[cols_per_chunk]]]
variable[selection] assign[=] binary_operation[name[cells] - name[ix]]
variable[selection] assign[=] call[name[selection]][call[call[name[np].where, parameter[call[name[np].logical_and, parameter[compare[name[selection] greater_or_equal[>=] constant[0]], compare[name[selection] less[<] name[cols_per_chunk]]]]]]][constant[0]]]
if compare[call[name[selection].shape][constant[0]] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18dc9a020>
continue
variable[vals] assign[=] call[name[dict], parameter[]]
for taget[name[key]] in starred[name[layers]] begin[:]
call[name[vals]][name[key]] assign[=] call[call[name[self].layers][name[key]]][tuple[[<ast.Slice object at 0x7da18f00cf10>, <ast.Slice object at 0x7da18f00cd60>]]]
call[name[vals]][name[key]] assign[=] call[call[name[vals]][name[key]]][tuple[[<ast.Name object at 0x7da18f00dde0>, <ast.Slice object at 0x7da18f00c970>]]]
call[name[vals]][name[key]] assign[=] call[call[name[vals]][name[key]]][tuple[[<ast.Slice object at 0x7da18f00e080>, <ast.Name object at 0x7da18f00ec80>]]]
<ast.Yield object at 0x7da18f00dc60>
<ast.AugAssign object at 0x7da18f00c5b0>
if compare[name[axis] equal[==] constant[0]] begin[:]
variable[rows_per_chunk] assign[=] name[batch_size]
variable[ix] assign[=] constant[0]
while compare[name[ix] less[<] call[name[self].shape][constant[0]]] begin[:]
variable[rows_per_chunk] assign[=] call[name[min], parameter[binary_operation[call[name[self].shape][constant[0]] - name[ix]], name[rows_per_chunk]]]
variable[selection] assign[=] binary_operation[name[genes] - name[ix]]
variable[selection] assign[=] call[name[selection]][call[call[name[np].where, parameter[call[name[np].logical_and, parameter[compare[name[selection] greater_or_equal[>=] constant[0]], compare[name[selection] less[<] name[rows_per_chunk]]]]]]][constant[0]]]
if compare[call[name[selection].shape][constant[0]] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18f00f8b0>
continue
variable[vals] assign[=] call[name[dict], parameter[]]
for taget[name[key]] in starred[name[layers]] begin[:]
call[name[vals]][name[key]] assign[=] call[call[name[self].layers][name[key]]][tuple[[<ast.Slice object at 0x7da18f00f3a0>, <ast.Slice object at 0x7da18f00e620>]]]
call[name[vals]][name[key]] assign[=] call[call[name[vals]][name[key]]][tuple[[<ast.Name object at 0x7da18f00d480>, <ast.Slice object at 0x7da18f00f0a0>]]]
call[name[vals]][name[key]] assign[=] call[call[name[vals]][name[key]]][tuple[[<ast.Slice object at 0x7da20c9926e0>, <ast.Name object at 0x7da20c993490>]]]
<ast.Yield object at 0x7da20c9909a0>
<ast.AugAssign object at 0x7da20c9904f0> | keyword[def] identifier[batch_scan_layers] ( identifier[self] , identifier[cells] : identifier[np] . identifier[ndarray] = keyword[None] , identifier[genes] : identifier[np] . identifier[ndarray] = keyword[None] , identifier[axis] : identifier[int] = literal[int] , identifier[batch_size] : identifier[int] = literal[int] , identifier[layers] : identifier[Iterable] = keyword[None] )-> identifier[Iterable] [ identifier[Tuple] [ identifier[int] , identifier[np] . identifier[ndarray] , identifier[Dict] ]]:
literal[string]
identifier[deprecated] ( literal[string] )
keyword[if] identifier[cells] keyword[is] keyword[None] :
identifier[cells] = identifier[np] . identifier[fromiter] ( identifier[range] ( identifier[self] . identifier[shape] [ literal[int] ]), identifier[dtype] = literal[string] )
keyword[if] identifier[genes] keyword[is] keyword[None] :
identifier[genes] = identifier[np] . identifier[fromiter] ( identifier[range] ( identifier[self] . identifier[shape] [ literal[int] ]), identifier[dtype] = literal[string] )
keyword[if] identifier[layers] keyword[is] keyword[None] :
identifier[layers] = identifier[self] . identifier[layers] . identifier[keys] ()
keyword[if] identifier[axis] == literal[int] :
identifier[cols_per_chunk] = identifier[batch_size]
identifier[ix] = literal[int]
keyword[while] identifier[ix] < identifier[self] . identifier[shape] [ literal[int] ]:
identifier[cols_per_chunk] = identifier[min] ( identifier[self] . identifier[shape] [ literal[int] ]- identifier[ix] , identifier[cols_per_chunk] )
identifier[selection] = identifier[cells] - identifier[ix]
identifier[selection] = identifier[selection] [ identifier[np] . identifier[where] ( identifier[np] . identifier[logical_and] ( identifier[selection] >= literal[int] , identifier[selection] < identifier[cols_per_chunk] ))[ literal[int] ]]
keyword[if] identifier[selection] . identifier[shape] [ literal[int] ]== literal[int] :
identifier[ix] += identifier[cols_per_chunk]
keyword[continue]
identifier[vals] = identifier[dict] ()
keyword[for] identifier[key] keyword[in] identifier[layers] :
identifier[vals] [ identifier[key] ]= identifier[self] . identifier[layers] [ identifier[key] ][:, identifier[ix] : identifier[ix] + identifier[cols_per_chunk] ]
identifier[vals] [ identifier[key] ]= identifier[vals] [ identifier[key] ][ identifier[genes] ,:]
identifier[vals] [ identifier[key] ]= identifier[vals] [ identifier[key] ][:, identifier[selection] ]
keyword[yield] ( identifier[ix] , identifier[ix] + identifier[selection] , identifier[vals] )
identifier[ix] += identifier[cols_per_chunk]
keyword[if] identifier[axis] == literal[int] :
identifier[rows_per_chunk] = identifier[batch_size]
identifier[ix] = literal[int]
keyword[while] identifier[ix] < identifier[self] . identifier[shape] [ literal[int] ]:
identifier[rows_per_chunk] = identifier[min] ( identifier[self] . identifier[shape] [ literal[int] ]- identifier[ix] , identifier[rows_per_chunk] )
identifier[selection] = identifier[genes] - identifier[ix]
identifier[selection] = identifier[selection] [ identifier[np] . identifier[where] ( identifier[np] . identifier[logical_and] ( identifier[selection] >= literal[int] , identifier[selection] < identifier[rows_per_chunk] ))[ literal[int] ]]
keyword[if] identifier[selection] . identifier[shape] [ literal[int] ]== literal[int] :
identifier[ix] += identifier[rows_per_chunk]
keyword[continue]
identifier[vals] = identifier[dict] ()
keyword[for] identifier[key] keyword[in] identifier[layers] :
identifier[vals] [ identifier[key] ]= identifier[self] . identifier[layers] [ identifier[key] ][ identifier[ix] : identifier[ix] + identifier[rows_per_chunk] ,:]
identifier[vals] [ identifier[key] ]= identifier[vals] [ identifier[key] ][ identifier[selection] ,:]
identifier[vals] [ identifier[key] ]= identifier[vals] [ identifier[key] ][:, identifier[cells] ]
keyword[yield] ( identifier[ix] , identifier[ix] + identifier[selection] , identifier[vals] )
identifier[ix] += identifier[rows_per_chunk] | def batch_scan_layers(self, cells: np.ndarray=None, genes: np.ndarray=None, axis: int=0, batch_size: int=1000, layers: Iterable=None) -> Iterable[Tuple[int, np.ndarray, Dict]]:
"""
**DEPRECATED** - Use `scan` instead
"""
deprecated("'batch_scan_layers' is deprecated. Use 'scan' instead")
if cells is None:
cells = np.fromiter(range(self.shape[1]), dtype='int') # depends on [control=['if'], data=['cells']]
if genes is None:
genes = np.fromiter(range(self.shape[0]), dtype='int') # depends on [control=['if'], data=['genes']]
if layers is None:
layers = self.layers.keys() # depends on [control=['if'], data=['layers']]
if axis == 1:
cols_per_chunk = batch_size
ix = 0
while ix < self.shape[1]:
cols_per_chunk = min(self.shape[1] - ix, cols_per_chunk)
selection = cells - ix # Pick out the cells that are in this batch
selection = selection[np.where(np.logical_and(selection >= 0, selection < cols_per_chunk))[0]]
if selection.shape[0] == 0:
ix += cols_per_chunk
continue # depends on [control=['if'], data=[]] # Load the whole chunk from the file, then extract genes and cells using fancy indexing
vals = dict()
for key in layers:
vals[key] = self.layers[key][:, ix:ix + cols_per_chunk]
vals[key] = vals[key][genes, :]
vals[key] = vals[key][:, selection] # depends on [control=['for'], data=['key']]
yield (ix, ix + selection, vals)
ix += cols_per_chunk # depends on [control=['while'], data=['ix']] # depends on [control=['if'], data=[]]
if axis == 0:
rows_per_chunk = batch_size
ix = 0
while ix < self.shape[0]:
rows_per_chunk = min(self.shape[0] - ix, rows_per_chunk)
selection = genes - ix # Pick out the genes that are in this batch
selection = selection[np.where(np.logical_and(selection >= 0, selection < rows_per_chunk))[0]]
if selection.shape[0] == 0:
ix += rows_per_chunk
continue # depends on [control=['if'], data=[]] # Load the whole chunk from the file, then extract genes and cells using fancy indexing
vals = dict()
for key in layers:
vals[key] = self.layers[key][ix:ix + rows_per_chunk, :]
vals[key] = vals[key][selection, :]
vals[key] = vals[key][:, cells] # depends on [control=['for'], data=['key']]
yield (ix, ix + selection, vals)
ix += rows_per_chunk # depends on [control=['while'], data=['ix']] # depends on [control=['if'], data=[]] |
def expired(self):
"""Return boolean indicating token expiration."""
now = timezone.now()
if self.created < now - token_settings.EXPIRING_TOKEN_LIFESPAN:
return True
return False | def function[expired, parameter[self]]:
constant[Return boolean indicating token expiration.]
variable[now] assign[=] call[name[timezone].now, parameter[]]
if compare[name[self].created less[<] binary_operation[name[now] - name[token_settings].EXPIRING_TOKEN_LIFESPAN]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[expired] ( identifier[self] ):
literal[string]
identifier[now] = identifier[timezone] . identifier[now] ()
keyword[if] identifier[self] . identifier[created] < identifier[now] - identifier[token_settings] . identifier[EXPIRING_TOKEN_LIFESPAN] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def expired(self):
"""Return boolean indicating token expiration."""
now = timezone.now()
if self.created < now - token_settings.EXPIRING_TOKEN_LIFESPAN:
return True # depends on [control=['if'], data=[]]
return False |
def dt_to_qdatetime(dt):
"""Convert a python datetime.datetime object to QDateTime
:param dt: the datetime object
:type dt: :class:`datetime.datetime`
:returns: the QDateTime conversion
:rtype: :class:`QtCore.QDateTime`
:raises: None
"""
return QtCore.QDateTime(QtCore.QDate(dt.year, dt.month, dt.day),
QtCore.QTime(dt.hour, dt.minute, dt.second)) | def function[dt_to_qdatetime, parameter[dt]]:
constant[Convert a python datetime.datetime object to QDateTime
:param dt: the datetime object
:type dt: :class:`datetime.datetime`
:returns: the QDateTime conversion
:rtype: :class:`QtCore.QDateTime`
:raises: None
]
return[call[name[QtCore].QDateTime, parameter[call[name[QtCore].QDate, parameter[name[dt].year, name[dt].month, name[dt].day]], call[name[QtCore].QTime, parameter[name[dt].hour, name[dt].minute, name[dt].second]]]]] | keyword[def] identifier[dt_to_qdatetime] ( identifier[dt] ):
literal[string]
keyword[return] identifier[QtCore] . identifier[QDateTime] ( identifier[QtCore] . identifier[QDate] ( identifier[dt] . identifier[year] , identifier[dt] . identifier[month] , identifier[dt] . identifier[day] ),
identifier[QtCore] . identifier[QTime] ( identifier[dt] . identifier[hour] , identifier[dt] . identifier[minute] , identifier[dt] . identifier[second] )) | def dt_to_qdatetime(dt):
"""Convert a python datetime.datetime object to QDateTime
:param dt: the datetime object
:type dt: :class:`datetime.datetime`
:returns: the QDateTime conversion
:rtype: :class:`QtCore.QDateTime`
:raises: None
"""
return QtCore.QDateTime(QtCore.QDate(dt.year, dt.month, dt.day), QtCore.QTime(dt.hour, dt.minute, dt.second)) |
def _insert(self, name, value, timestamp, intervals, **kwargs):
'''
Insert the new value.
'''
# TODO: confirm that this is in fact using the indices correctly.
for interval,config in self._intervals.items():
timestamps = self._normalize_timestamps(timestamp, intervals, config)
for tstamp in timestamps:
self._insert_data(name, value, tstamp, interval, config, **kwargs) | def function[_insert, parameter[self, name, value, timestamp, intervals]]:
constant[
Insert the new value.
]
for taget[tuple[[<ast.Name object at 0x7da20c6a9db0>, <ast.Name object at 0x7da20c6a92a0>]]] in starred[call[name[self]._intervals.items, parameter[]]] begin[:]
variable[timestamps] assign[=] call[name[self]._normalize_timestamps, parameter[name[timestamp], name[intervals], name[config]]]
for taget[name[tstamp]] in starred[name[timestamps]] begin[:]
call[name[self]._insert_data, parameter[name[name], name[value], name[tstamp], name[interval], name[config]]] | keyword[def] identifier[_insert] ( identifier[self] , identifier[name] , identifier[value] , identifier[timestamp] , identifier[intervals] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[interval] , identifier[config] keyword[in] identifier[self] . identifier[_intervals] . identifier[items] ():
identifier[timestamps] = identifier[self] . identifier[_normalize_timestamps] ( identifier[timestamp] , identifier[intervals] , identifier[config] )
keyword[for] identifier[tstamp] keyword[in] identifier[timestamps] :
identifier[self] . identifier[_insert_data] ( identifier[name] , identifier[value] , identifier[tstamp] , identifier[interval] , identifier[config] ,** identifier[kwargs] ) | def _insert(self, name, value, timestamp, intervals, **kwargs):
"""
Insert the new value.
"""
# TODO: confirm that this is in fact using the indices correctly.
for (interval, config) in self._intervals.items():
timestamps = self._normalize_timestamps(timestamp, intervals, config)
for tstamp in timestamps:
self._insert_data(name, value, tstamp, interval, config, **kwargs) # depends on [control=['for'], data=['tstamp']] # depends on [control=['for'], data=[]] |
def unpack_post(environ, content_length):
"""
Unpacks a post request query string.
:param environ: whiskey application environment.
:return: A dictionary with parameters.
"""
post_body = environ['wsgi.input'].read(content_length).decode("utf-8")
data = None
if "application/x-www-form-urlencoded" in environ["CONTENT_TYPE"]:
data = dict(parse_qsl(post_body))
elif "application/json" in environ["CONTENT_TYPE"]:
data = json.loads(post_body)
logger.debug("unpack_post:: %s", data)
return data | def function[unpack_post, parameter[environ, content_length]]:
constant[
Unpacks a post request query string.
:param environ: whiskey application environment.
:return: A dictionary with parameters.
]
variable[post_body] assign[=] call[call[call[name[environ]][constant[wsgi.input]].read, parameter[name[content_length]]].decode, parameter[constant[utf-8]]]
variable[data] assign[=] constant[None]
if compare[constant[application/x-www-form-urlencoded] in call[name[environ]][constant[CONTENT_TYPE]]] begin[:]
variable[data] assign[=] call[name[dict], parameter[call[name[parse_qsl], parameter[name[post_body]]]]]
call[name[logger].debug, parameter[constant[unpack_post:: %s], name[data]]]
return[name[data]] | keyword[def] identifier[unpack_post] ( identifier[environ] , identifier[content_length] ):
literal[string]
identifier[post_body] = identifier[environ] [ literal[string] ]. identifier[read] ( identifier[content_length] ). identifier[decode] ( literal[string] )
identifier[data] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[environ] [ literal[string] ]:
identifier[data] = identifier[dict] ( identifier[parse_qsl] ( identifier[post_body] ))
keyword[elif] literal[string] keyword[in] identifier[environ] [ literal[string] ]:
identifier[data] = identifier[json] . identifier[loads] ( identifier[post_body] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[data] )
keyword[return] identifier[data] | def unpack_post(environ, content_length):
"""
Unpacks a post request query string.
:param environ: whiskey application environment.
:return: A dictionary with parameters.
"""
post_body = environ['wsgi.input'].read(content_length).decode('utf-8')
data = None
if 'application/x-www-form-urlencoded' in environ['CONTENT_TYPE']:
data = dict(parse_qsl(post_body)) # depends on [control=['if'], data=[]]
elif 'application/json' in environ['CONTENT_TYPE']:
data = json.loads(post_body) # depends on [control=['if'], data=[]]
logger.debug('unpack_post:: %s', data)
return data |
def _list_hosts():
'''
Return the hosts found in the hosts file in as an OrderedDict
'''
try:
return __context__['hosts._list_hosts']
except KeyError:
count = 0
hfn = __get_hosts_filename()
ret = odict.OrderedDict()
try:
with salt.utils.files.fopen(hfn) as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line).strip()
if not line:
continue
if line.startswith('#'):
ret.setdefault('comment-{0}'.format(count), []).append(line)
count += 1
continue
if '#' in line:
line = line[:line.index('#')].strip()
comps = line.split()
ip = comps.pop(0)
ret.setdefault(ip, []).extend(comps)
except (IOError, OSError) as exc:
salt.utils.files.process_read_exception(exc, hfn, ignore=errno.ENOENT)
# Don't set __context__ since we weren't able to read from the
# hosts file.
return ret
__context__['hosts._list_hosts'] = ret
return ret | def function[_list_hosts, parameter[]]:
constant[
Return the hosts found in the hosts file in as an OrderedDict
]
<ast.Try object at 0x7da18bcca0b0> | keyword[def] identifier[_list_hosts] ():
literal[string]
keyword[try] :
keyword[return] identifier[__context__] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[count] = literal[int]
identifier[hfn] = identifier[__get_hosts_filename] ()
identifier[ret] = identifier[odict] . identifier[OrderedDict] ()
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[hfn] ) keyword[as] identifier[ifile] :
keyword[for] identifier[line] keyword[in] identifier[ifile] :
identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] ). identifier[strip] ()
keyword[if] keyword[not] identifier[line] :
keyword[continue]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[ret] . identifier[setdefault] ( literal[string] . identifier[format] ( identifier[count] ),[]). identifier[append] ( identifier[line] )
identifier[count] += literal[int]
keyword[continue]
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[line] = identifier[line] [: identifier[line] . identifier[index] ( literal[string] )]. identifier[strip] ()
identifier[comps] = identifier[line] . identifier[split] ()
identifier[ip] = identifier[comps] . identifier[pop] ( literal[int] )
identifier[ret] . identifier[setdefault] ( identifier[ip] ,[]). identifier[extend] ( identifier[comps] )
keyword[except] ( identifier[IOError] , identifier[OSError] ) keyword[as] identifier[exc] :
identifier[salt] . identifier[utils] . identifier[files] . identifier[process_read_exception] ( identifier[exc] , identifier[hfn] , identifier[ignore] = identifier[errno] . identifier[ENOENT] )
keyword[return] identifier[ret]
identifier[__context__] [ literal[string] ]= identifier[ret]
keyword[return] identifier[ret] | def _list_hosts():
"""
Return the hosts found in the hosts file in as an OrderedDict
"""
try:
return __context__['hosts._list_hosts'] # depends on [control=['try'], data=[]]
except KeyError:
count = 0
hfn = __get_hosts_filename()
ret = odict.OrderedDict()
try:
with salt.utils.files.fopen(hfn) as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line).strip()
if not line:
continue # depends on [control=['if'], data=[]]
if line.startswith('#'):
ret.setdefault('comment-{0}'.format(count), []).append(line)
count += 1
continue # depends on [control=['if'], data=[]]
if '#' in line:
line = line[:line.index('#')].strip() # depends on [control=['if'], data=['line']]
comps = line.split()
ip = comps.pop(0)
ret.setdefault(ip, []).extend(comps) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['ifile']] # depends on [control=['try'], data=[]]
except (IOError, OSError) as exc:
salt.utils.files.process_read_exception(exc, hfn, ignore=errno.ENOENT)
# Don't set __context__ since we weren't able to read from the
# hosts file.
return ret # depends on [control=['except'], data=['exc']]
__context__['hosts._list_hosts'] = ret
return ret # depends on [control=['except'], data=[]] |
def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile,
storeStFeatures=False, storeToCSV=False, PLOT=False):
"""
This function is used as a wrapper to:
a) read the content of a WAV file
b) perform mid-term feature extraction on that signal
c) write the mid-term feature sequences to a numpy file
"""
[fs, x] = audioBasicIO.readAudioFile(fileName)
x = audioBasicIO.stereo2mono(x)
if storeStFeatures:
[mtF, stF, _] = mtFeatureExtraction(x, fs,
round(fs * midTermSize),
round(fs * midTermStep),
round(fs * shortTermSize),
round(fs * shortTermStep))
else:
[mtF, _, _] = mtFeatureExtraction(x, fs, round(fs*midTermSize),
round(fs * midTermStep),
round(fs * shortTermSize),
round(fs * shortTermStep))
# save mt features to numpy file
numpy.save(outPutFile, mtF)
if PLOT:
print("Mid-term numpy file: " + outPutFile + ".npy saved")
if storeToCSV:
numpy.savetxt(outPutFile+".csv", mtF.T, delimiter=",")
if PLOT:
print("Mid-term CSV file: " + outPutFile + ".csv saved")
if storeStFeatures:
# save st features to numpy file
numpy.save(outPutFile+"_st", stF)
if PLOT:
print("Short-term numpy file: " + outPutFile + "_st.npy saved")
if storeToCSV:
# store st features to CSV file
numpy.savetxt(outPutFile+"_st.csv", stF.T, delimiter=",")
if PLOT:
print("Short-term CSV file: " + outPutFile + "_st.csv saved") | def function[mtFeatureExtractionToFile, parameter[fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile, storeStFeatures, storeToCSV, PLOT]]:
constant[
This function is used as a wrapper to:
a) read the content of a WAV file
b) perform mid-term feature extraction on that signal
c) write the mid-term feature sequences to a numpy file
]
<ast.List object at 0x7da1b211dae0> assign[=] call[name[audioBasicIO].readAudioFile, parameter[name[fileName]]]
variable[x] assign[=] call[name[audioBasicIO].stereo2mono, parameter[name[x]]]
if name[storeStFeatures] begin[:]
<ast.List object at 0x7da1b211de10> assign[=] call[name[mtFeatureExtraction], parameter[name[x], name[fs], call[name[round], parameter[binary_operation[name[fs] * name[midTermSize]]]], call[name[round], parameter[binary_operation[name[fs] * name[midTermStep]]]], call[name[round], parameter[binary_operation[name[fs] * name[shortTermSize]]]], call[name[round], parameter[binary_operation[name[fs] * name[shortTermStep]]]]]]
call[name[numpy].save, parameter[name[outPutFile], name[mtF]]]
if name[PLOT] begin[:]
call[name[print], parameter[binary_operation[binary_operation[constant[Mid-term numpy file: ] + name[outPutFile]] + constant[.npy saved]]]]
if name[storeToCSV] begin[:]
call[name[numpy].savetxt, parameter[binary_operation[name[outPutFile] + constant[.csv]], name[mtF].T]]
if name[PLOT] begin[:]
call[name[print], parameter[binary_operation[binary_operation[constant[Mid-term CSV file: ] + name[outPutFile]] + constant[.csv saved]]]]
if name[storeStFeatures] begin[:]
call[name[numpy].save, parameter[binary_operation[name[outPutFile] + constant[_st]], name[stF]]]
if name[PLOT] begin[:]
call[name[print], parameter[binary_operation[binary_operation[constant[Short-term numpy file: ] + name[outPutFile]] + constant[_st.npy saved]]]]
if name[storeToCSV] begin[:]
call[name[numpy].savetxt, parameter[binary_operation[name[outPutFile] + constant[_st.csv]], name[stF].T]]
if name[PLOT] begin[:]
call[name[print], parameter[binary_operation[binary_operation[constant[Short-term CSV file: ] + name[outPutFile]] + constant[_st.csv saved]]]] | keyword[def] identifier[mtFeatureExtractionToFile] ( identifier[fileName] , identifier[midTermSize] , identifier[midTermStep] , identifier[shortTermSize] , identifier[shortTermStep] , identifier[outPutFile] ,
identifier[storeStFeatures] = keyword[False] , identifier[storeToCSV] = keyword[False] , identifier[PLOT] = keyword[False] ):
literal[string]
[ identifier[fs] , identifier[x] ]= identifier[audioBasicIO] . identifier[readAudioFile] ( identifier[fileName] )
identifier[x] = identifier[audioBasicIO] . identifier[stereo2mono] ( identifier[x] )
keyword[if] identifier[storeStFeatures] :
[ identifier[mtF] , identifier[stF] , identifier[_] ]= identifier[mtFeatureExtraction] ( identifier[x] , identifier[fs] ,
identifier[round] ( identifier[fs] * identifier[midTermSize] ),
identifier[round] ( identifier[fs] * identifier[midTermStep] ),
identifier[round] ( identifier[fs] * identifier[shortTermSize] ),
identifier[round] ( identifier[fs] * identifier[shortTermStep] ))
keyword[else] :
[ identifier[mtF] , identifier[_] , identifier[_] ]= identifier[mtFeatureExtraction] ( identifier[x] , identifier[fs] , identifier[round] ( identifier[fs] * identifier[midTermSize] ),
identifier[round] ( identifier[fs] * identifier[midTermStep] ),
identifier[round] ( identifier[fs] * identifier[shortTermSize] ),
identifier[round] ( identifier[fs] * identifier[shortTermStep] ))
identifier[numpy] . identifier[save] ( identifier[outPutFile] , identifier[mtF] )
keyword[if] identifier[PLOT] :
identifier[print] ( literal[string] + identifier[outPutFile] + literal[string] )
keyword[if] identifier[storeToCSV] :
identifier[numpy] . identifier[savetxt] ( identifier[outPutFile] + literal[string] , identifier[mtF] . identifier[T] , identifier[delimiter] = literal[string] )
keyword[if] identifier[PLOT] :
identifier[print] ( literal[string] + identifier[outPutFile] + literal[string] )
keyword[if] identifier[storeStFeatures] :
identifier[numpy] . identifier[save] ( identifier[outPutFile] + literal[string] , identifier[stF] )
keyword[if] identifier[PLOT] :
identifier[print] ( literal[string] + identifier[outPutFile] + literal[string] )
keyword[if] identifier[storeToCSV] :
identifier[numpy] . identifier[savetxt] ( identifier[outPutFile] + literal[string] , identifier[stF] . identifier[T] , identifier[delimiter] = literal[string] )
keyword[if] identifier[PLOT] :
identifier[print] ( literal[string] + identifier[outPutFile] + literal[string] ) | def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile, storeStFeatures=False, storeToCSV=False, PLOT=False):
"""
This function is used as a wrapper to:
a) read the content of a WAV file
b) perform mid-term feature extraction on that signal
c) write the mid-term feature sequences to a numpy file
"""
[fs, x] = audioBasicIO.readAudioFile(fileName)
x = audioBasicIO.stereo2mono(x)
if storeStFeatures:
[mtF, stF, _] = mtFeatureExtraction(x, fs, round(fs * midTermSize), round(fs * midTermStep), round(fs * shortTermSize), round(fs * shortTermStep)) # depends on [control=['if'], data=[]]
else:
[mtF, _, _] = mtFeatureExtraction(x, fs, round(fs * midTermSize), round(fs * midTermStep), round(fs * shortTermSize), round(fs * shortTermStep))
# save mt features to numpy file
numpy.save(outPutFile, mtF)
if PLOT:
print('Mid-term numpy file: ' + outPutFile + '.npy saved') # depends on [control=['if'], data=[]]
if storeToCSV:
numpy.savetxt(outPutFile + '.csv', mtF.T, delimiter=',')
if PLOT:
print('Mid-term CSV file: ' + outPutFile + '.csv saved') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if storeStFeatures:
# save st features to numpy file
numpy.save(outPutFile + '_st', stF)
if PLOT:
print('Short-term numpy file: ' + outPutFile + '_st.npy saved') # depends on [control=['if'], data=[]]
if storeToCSV:
# store st features to CSV file
numpy.savetxt(outPutFile + '_st.csv', stF.T, delimiter=',')
if PLOT:
print('Short-term CSV file: ' + outPutFile + '_st.csv saved') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def set_entries(self, entries, user_scope):
"""SetEntries.
[Preview API] Set the specified setting entry values for the given user/all-users scope
:param {object} entries: The entries to set
:param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users.
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
content = self._serialize.body(entries, '{object}')
self._send(http_method='PATCH',
location_id='cd006711-163d-4cd4-a597-b05bad2556ff',
version='5.0-preview.1',
route_values=route_values,
content=content) | def function[set_entries, parameter[self, entries, user_scope]]:
constant[SetEntries.
[Preview API] Set the specified setting entry values for the given user/all-users scope
:param {object} entries: The entries to set
:param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users.
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[user_scope] is_not constant[None]] begin[:]
call[name[route_values]][constant[userScope]] assign[=] call[name[self]._serialize.url, parameter[constant[user_scope], name[user_scope], constant[str]]]
variable[content] assign[=] call[name[self]._serialize.body, parameter[name[entries], constant[{object}]]]
call[name[self]._send, parameter[]] | keyword[def] identifier[set_entries] ( identifier[self] , identifier[entries] , identifier[user_scope] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[user_scope] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[user_scope] , literal[string] )
identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[entries] , literal[string] )
identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[content] = identifier[content] ) | def set_entries(self, entries, user_scope):
"""SetEntries.
[Preview API] Set the specified setting entry values for the given user/all-users scope
:param {object} entries: The entries to set
:param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users.
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str') # depends on [control=['if'], data=['user_scope']]
content = self._serialize.body(entries, '{object}')
self._send(http_method='PATCH', location_id='cd006711-163d-4cd4-a597-b05bad2556ff', version='5.0-preview.1', route_values=route_values, content=content) |
def _first_step_to_match(match_step):
"""Transform the very first MATCH step into a MATCH query string."""
parts = []
if match_step.root_block is not None:
if not isinstance(match_step.root_block, QueryRoot):
raise AssertionError(u'Expected None or QueryRoot root block, received: '
u'{} {}'.format(match_step.root_block, match_step))
match_step.root_block.validate()
start_class = get_only_element_from_collection(match_step.root_block.start_class)
parts.append(u'class: %s' % (start_class,))
# MATCH steps with a QueryRoot root block shouldn't have a 'coerce_type_block'.
if match_step.coerce_type_block is not None:
raise AssertionError(u'Invalid MATCH step: {}'.format(match_step))
if match_step.where_block:
match_step.where_block.validate()
parts.append(u'where: (%s)' % (match_step.where_block.predicate.to_match(),))
if match_step.as_block is None:
raise AssertionError(u'Found a MATCH step without a corresponding Location. '
u'This should never happen: {}'.format(match_step))
else:
match_step.as_block.validate()
parts.append(u'as: %s' % (_get_vertex_location_name(match_step.as_block.location),))
return u'{{ %s }}' % (u', '.join(parts),) | def function[_first_step_to_match, parameter[match_step]]:
constant[Transform the very first MATCH step into a MATCH query string.]
variable[parts] assign[=] list[[]]
if compare[name[match_step].root_block is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b16466b0> begin[:]
<ast.Raise object at 0x7da1b1645120>
call[name[match_step].root_block.validate, parameter[]]
variable[start_class] assign[=] call[name[get_only_element_from_collection], parameter[name[match_step].root_block.start_class]]
call[name[parts].append, parameter[binary_operation[constant[class: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1644580>]]]]]
if compare[name[match_step].coerce_type_block is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b1644df0>
if name[match_step].where_block begin[:]
call[name[match_step].where_block.validate, parameter[]]
call[name[parts].append, parameter[binary_operation[constant[where: (%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b16469b0>]]]]]
if compare[name[match_step].as_block is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1644400>
return[binary_operation[constant[{{ %s }}] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b1646da0>]]]] | keyword[def] identifier[_first_step_to_match] ( identifier[match_step] ):
literal[string]
identifier[parts] =[]
keyword[if] identifier[match_step] . identifier[root_block] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[match_step] . identifier[root_block] , identifier[QueryRoot] ):
keyword[raise] identifier[AssertionError] ( literal[string]
literal[string] . identifier[format] ( identifier[match_step] . identifier[root_block] , identifier[match_step] ))
identifier[match_step] . identifier[root_block] . identifier[validate] ()
identifier[start_class] = identifier[get_only_element_from_collection] ( identifier[match_step] . identifier[root_block] . identifier[start_class] )
identifier[parts] . identifier[append] ( literal[string] %( identifier[start_class] ,))
keyword[if] identifier[match_step] . identifier[coerce_type_block] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[AssertionError] ( literal[string] . identifier[format] ( identifier[match_step] ))
keyword[if] identifier[match_step] . identifier[where_block] :
identifier[match_step] . identifier[where_block] . identifier[validate] ()
identifier[parts] . identifier[append] ( literal[string] %( identifier[match_step] . identifier[where_block] . identifier[predicate] . identifier[to_match] (),))
keyword[if] identifier[match_step] . identifier[as_block] keyword[is] keyword[None] :
keyword[raise] identifier[AssertionError] ( literal[string]
literal[string] . identifier[format] ( identifier[match_step] ))
keyword[else] :
identifier[match_step] . identifier[as_block] . identifier[validate] ()
identifier[parts] . identifier[append] ( literal[string] %( identifier[_get_vertex_location_name] ( identifier[match_step] . identifier[as_block] . identifier[location] ),))
keyword[return] literal[string] %( literal[string] . identifier[join] ( identifier[parts] ),) | def _first_step_to_match(match_step):
"""Transform the very first MATCH step into a MATCH query string."""
parts = []
if match_step.root_block is not None:
if not isinstance(match_step.root_block, QueryRoot):
raise AssertionError(u'Expected None or QueryRoot root block, received: {} {}'.format(match_step.root_block, match_step)) # depends on [control=['if'], data=[]]
match_step.root_block.validate()
start_class = get_only_element_from_collection(match_step.root_block.start_class)
parts.append(u'class: %s' % (start_class,)) # depends on [control=['if'], data=[]]
# MATCH steps with a QueryRoot root block shouldn't have a 'coerce_type_block'.
if match_step.coerce_type_block is not None:
raise AssertionError(u'Invalid MATCH step: {}'.format(match_step)) # depends on [control=['if'], data=[]]
if match_step.where_block:
match_step.where_block.validate()
parts.append(u'where: (%s)' % (match_step.where_block.predicate.to_match(),)) # depends on [control=['if'], data=[]]
if match_step.as_block is None:
raise AssertionError(u'Found a MATCH step without a corresponding Location. This should never happen: {}'.format(match_step)) # depends on [control=['if'], data=[]]
else:
match_step.as_block.validate()
parts.append(u'as: %s' % (_get_vertex_location_name(match_step.as_block.location),))
return u'{{ %s }}' % (u', '.join(parts),) |
def epubcheck_help():
"""Return epubcheck.jar commandline help text.
:return unicode: helptext from epubcheck.jar
"""
# tc = locale.getdefaultlocale()[1]
with open(os.devnull, "w") as devnull:
p = subprocess.Popen(
[c.JAVA, '-Duser.language=en', '-jar', c.EPUBCHECK, '-h'],
stdout=subprocess.PIPE,
stderr=devnull,
)
result = p.communicate()[0]
return result.decode() | def function[epubcheck_help, parameter[]]:
constant[Return epubcheck.jar commandline help text.
:return unicode: helptext from epubcheck.jar
]
with call[name[open], parameter[name[os].devnull, constant[w]]] begin[:]
variable[p] assign[=] call[name[subprocess].Popen, parameter[list[[<ast.Attribute object at 0x7da1b0ae3370>, <ast.Constant object at 0x7da1b0ae11e0>, <ast.Constant object at 0x7da1b0ae0e80>, <ast.Attribute object at 0x7da1b0ae3220>, <ast.Constant object at 0x7da1b0ae0fd0>]]]]
variable[result] assign[=] call[call[name[p].communicate, parameter[]]][constant[0]]
return[call[name[result].decode, parameter[]]] | keyword[def] identifier[epubcheck_help] ():
literal[string]
keyword[with] identifier[open] ( identifier[os] . identifier[devnull] , literal[string] ) keyword[as] identifier[devnull] :
identifier[p] = identifier[subprocess] . identifier[Popen] (
[ identifier[c] . identifier[JAVA] , literal[string] , literal[string] , identifier[c] . identifier[EPUBCHECK] , literal[string] ],
identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[devnull] ,
)
identifier[result] = identifier[p] . identifier[communicate] ()[ literal[int] ]
keyword[return] identifier[result] . identifier[decode] () | def epubcheck_help():
"""Return epubcheck.jar commandline help text.
:return unicode: helptext from epubcheck.jar
"""
# tc = locale.getdefaultlocale()[1]
with open(os.devnull, 'w') as devnull:
p = subprocess.Popen([c.JAVA, '-Duser.language=en', '-jar', c.EPUBCHECK, '-h'], stdout=subprocess.PIPE, stderr=devnull)
result = p.communicate()[0] # depends on [control=['with'], data=['devnull']]
return result.decode() |
def load(file, use_yaml=None):
"""
Loads not only JSON files but also YAML files ending in .yml.
:param file: a filename or file handle to read from
:returns: the data loaded from the JSON or YAML file
:rtype: dict
"""
if isinstance(file, str):
fp = open(file)
filename = file
else:
fp = file
filename = getattr(fp, 'name', '')
try:
return loads(fp.read(), use_yaml, filename)
except Exception as e:
e.args = ('There was a error in the data file', filename) + e.args
raise | def function[load, parameter[file, use_yaml]]:
constant[
Loads not only JSON files but also YAML files ending in .yml.
:param file: a filename or file handle to read from
:returns: the data loaded from the JSON or YAML file
:rtype: dict
]
if call[name[isinstance], parameter[name[file], name[str]]] begin[:]
variable[fp] assign[=] call[name[open], parameter[name[file]]]
variable[filename] assign[=] name[file]
<ast.Try object at 0x7da1b00627d0> | keyword[def] identifier[load] ( identifier[file] , identifier[use_yaml] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[file] , identifier[str] ):
identifier[fp] = identifier[open] ( identifier[file] )
identifier[filename] = identifier[file]
keyword[else] :
identifier[fp] = identifier[file]
identifier[filename] = identifier[getattr] ( identifier[fp] , literal[string] , literal[string] )
keyword[try] :
keyword[return] identifier[loads] ( identifier[fp] . identifier[read] (), identifier[use_yaml] , identifier[filename] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[e] . identifier[args] =( literal[string] , identifier[filename] )+ identifier[e] . identifier[args]
keyword[raise] | def load(file, use_yaml=None):
"""
Loads not only JSON files but also YAML files ending in .yml.
:param file: a filename or file handle to read from
:returns: the data loaded from the JSON or YAML file
:rtype: dict
"""
if isinstance(file, str):
fp = open(file)
filename = file # depends on [control=['if'], data=[]]
else:
fp = file
filename = getattr(fp, 'name', '')
try:
return loads(fp.read(), use_yaml, filename) # depends on [control=['try'], data=[]]
except Exception as e:
e.args = ('There was a error in the data file', filename) + e.args
raise # depends on [control=['except'], data=['e']] |
def environ(request: httputil.HTTPServerRequest) -> Dict[Text, Any]:
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(
escape.url_unescape(request.path, encoding=None, plus=False)
),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ | def function[environ, parameter[request]]:
constant[Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
]
variable[hostport] assign[=] call[name[request].host.split, parameter[constant[:]]]
if compare[call[name[len], parameter[name[hostport]]] equal[==] constant[2]] begin[:]
variable[host] assign[=] call[name[hostport]][constant[0]]
variable[port] assign[=] call[name[int], parameter[call[name[hostport]][constant[1]]]]
variable[environ] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f2c9a0>, <ast.Constant object at 0x7da1b1f2c9d0>, <ast.Constant object at 0x7da1b1f2ca00>, <ast.Constant object at 0x7da1b1f2ca30>, <ast.Constant object at 0x7da1b1f2ca60>, <ast.Constant object at 0x7da1b1f2ca90>, <ast.Constant object at 0x7da1b1f2cac0>, <ast.Constant object at 0x7da1b1f2caf0>, <ast.Constant object at 0x7da1b1f2cb20>, <ast.Constant object at 0x7da1b1f2cb50>, <ast.Constant object at 0x7da1b1f2cb80>, <ast.Constant object at 0x7da1b1f2cbb0>, <ast.Constant object at 0x7da1b1f2cbe0>, <ast.Constant object at 0x7da1b1f2cc10>, <ast.Constant object at 0x7da1b1f2cc40>], [<ast.Attribute object at 0x7da1b1f2cc70>, <ast.Constant object at 0x7da1b1f2ccd0>, <ast.Call object at 0x7da1b1f2cd00>, <ast.Attribute object at 0x7da1b1f2cf10>, <ast.Attribute object at 0x7da1b1f2cf70>, <ast.Name object at 0x7da1b1f2cfd0>, <ast.Call object at 0x7da1b1f2d000>, <ast.Attribute object at 0x7da1b1f2d090>, <ast.Tuple object at 0x7da1b1f2d0f0>, <ast.Attribute object at 0x7da1b1f2d180>, <ast.Call object at 0x7da1b1f2d1e0>, <ast.Attribute object at 0x7da1b1f2d330>, <ast.Constant object at 0x7da1b1f2d390>, <ast.Constant object at 0x7da1b1f2d3c0>, <ast.Constant object at 0x7da1b1f2d3f0>]]
if compare[constant[Content-Type] in name[request].headers] begin[:]
call[name[environ]][constant[CONTENT_TYPE]] assign[=] call[name[request].headers.pop, parameter[constant[Content-Type]]]
if compare[constant[Content-Length] in name[request].headers] begin[:]
call[name[environ]][constant[CONTENT_LENGTH]] assign[=] call[name[request].headers.pop, parameter[constant[Content-Length]]]
for taget[tuple[[<ast.Name object at 0x7da1b1f2d9c0>, <ast.Name object at 0x7da1b1f2d9f0>]]] in starred[call[name[request].headers.items, parameter[]]] begin[:]
call[name[environ]][binary_operation[constant[HTTP_] + call[call[name[key].replace, parameter[constant[-], constant[_]]].upper, parameter[]]]] assign[=] name[value]
return[name[environ]] | keyword[def] identifier[environ] ( identifier[request] : identifier[httputil] . identifier[HTTPServerRequest] )-> identifier[Dict] [ identifier[Text] , identifier[Any] ]:
literal[string]
identifier[hostport] = identifier[request] . identifier[host] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[hostport] )== literal[int] :
identifier[host] = identifier[hostport] [ literal[int] ]
identifier[port] = identifier[int] ( identifier[hostport] [ literal[int] ])
keyword[else] :
identifier[host] = identifier[request] . identifier[host]
identifier[port] = literal[int] keyword[if] identifier[request] . identifier[protocol] == literal[string] keyword[else] literal[int]
identifier[environ] ={
literal[string] : identifier[request] . identifier[method] ,
literal[string] : literal[string] ,
literal[string] : identifier[to_wsgi_str] (
identifier[escape] . identifier[url_unescape] ( identifier[request] . identifier[path] , identifier[encoding] = keyword[None] , identifier[plus] = keyword[False] )
),
literal[string] : identifier[request] . identifier[query] ,
literal[string] : identifier[request] . identifier[remote_ip] ,
literal[string] : identifier[host] ,
literal[string] : identifier[str] ( identifier[port] ),
literal[string] : identifier[request] . identifier[version] ,
literal[string] :( literal[int] , literal[int] ),
literal[string] : identifier[request] . identifier[protocol] ,
literal[string] : identifier[BytesIO] ( identifier[escape] . identifier[utf8] ( identifier[request] . identifier[body] )),
literal[string] : identifier[sys] . identifier[stderr] ,
literal[string] : keyword[False] ,
literal[string] : keyword[True] ,
literal[string] : keyword[False] ,
}
keyword[if] literal[string] keyword[in] identifier[request] . identifier[headers] :
identifier[environ] [ literal[string] ]= identifier[request] . identifier[headers] . identifier[pop] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[request] . identifier[headers] :
identifier[environ] [ literal[string] ]= identifier[request] . identifier[headers] . identifier[pop] ( literal[string] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[request] . identifier[headers] . identifier[items] ():
identifier[environ] [ literal[string] + identifier[key] . identifier[replace] ( literal[string] , literal[string] ). identifier[upper] ()]= identifier[value]
keyword[return] identifier[environ] | def environ(request: httputil.HTTPServerRequest) -> Dict[Text, Any]:
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
"""
hostport = request.host.split(':')
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1]) # depends on [control=['if'], data=[]]
else:
host = request.host
port = 443 if request.protocol == 'https' else 80
environ = {'REQUEST_METHOD': request.method, 'SCRIPT_NAME': '', 'PATH_INFO': to_wsgi_str(escape.url_unescape(request.path, encoding=None, plus=False)), 'QUERY_STRING': request.query, 'REMOTE_ADDR': request.remote_ip, 'SERVER_NAME': host, 'SERVER_PORT': str(port), 'SERVER_PROTOCOL': request.version, 'wsgi.version': (1, 0), 'wsgi.url_scheme': request.protocol, 'wsgi.input': BytesIO(escape.utf8(request.body)), 'wsgi.errors': sys.stderr, 'wsgi.multithread': False, 'wsgi.multiprocess': True, 'wsgi.run_once': False}
if 'Content-Type' in request.headers:
environ['CONTENT_TYPE'] = request.headers.pop('Content-Type') # depends on [control=['if'], data=[]]
if 'Content-Length' in request.headers:
environ['CONTENT_LENGTH'] = request.headers.pop('Content-Length') # depends on [control=['if'], data=[]]
for (key, value) in request.headers.items():
environ['HTTP_' + key.replace('-', '_').upper()] = value # depends on [control=['for'], data=[]]
return environ |
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise HttpWebFault(status, reason)
else:
return (status, None) | def function[failed, parameter[self, binding, error]]:
constant[
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
]
<ast.Tuple object at 0x7da2044c2e00> assign[=] tuple[[<ast.Attribute object at 0x7da2044c1900>, <ast.Call object at 0x7da2044c3970>]]
variable[reply] assign[=] call[name[error].fp.read, parameter[]]
call[name[log].debug, parameter[constant[http failed:
%s], name[reply]]]
if compare[name[status] equal[==] constant[500]] begin[:]
if compare[call[name[len], parameter[name[reply]]] greater[>] constant[0]] begin[:]
<ast.Tuple object at 0x7da2044c3b50> assign[=] call[name[binding].get_fault, parameter[name[reply]]]
call[name[self].last_received, parameter[name[r]]]
return[tuple[[<ast.Name object at 0x7da2044c2620>, <ast.Name object at 0x7da2044c0070>]]]
if name[self].options.faults begin[:]
<ast.Raise object at 0x7da20ed9bd30> | keyword[def] identifier[failed] ( identifier[self] , identifier[binding] , identifier[error] ):
literal[string]
identifier[status] , identifier[reason] =( identifier[error] . identifier[httpcode] , identifier[tostr] ( identifier[error] ))
identifier[reply] = identifier[error] . identifier[fp] . identifier[read] ()
identifier[log] . identifier[debug] ( literal[string] , identifier[reply] )
keyword[if] identifier[status] == literal[int] :
keyword[if] identifier[len] ( identifier[reply] )> literal[int] :
identifier[r] , identifier[p] = identifier[binding] . identifier[get_fault] ( identifier[reply] )
identifier[self] . identifier[last_received] ( identifier[r] )
keyword[return] ( identifier[status] , identifier[p] )
keyword[else] :
keyword[return] ( identifier[status] , keyword[None] )
keyword[if] identifier[self] . identifier[options] . identifier[faults] :
keyword[raise] identifier[HttpWebFault] ( identifier[status] , identifier[reason] )
keyword[else] :
keyword[return] ( identifier[status] , keyword[None] ) | def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
(status, reason) = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
(r, p) = binding.get_fault(reply)
self.last_received(r)
return (status, p) # depends on [control=['if'], data=[]]
else:
return (status, None) # depends on [control=['if'], data=['status']]
if self.options.faults:
raise HttpWebFault(status, reason) # depends on [control=['if'], data=[]]
else:
return (status, None) |
def auth(self, token):
"""
Take an existing Skype token and refresh it, to extend the expiry time without other credentials.
Args:
token (str): existing Skype token
Returns:
(str, datetime.datetime) tuple: Skype token, and associated expiry if known
Raises:
.SkypeAuthException: if the login request is rejected
.SkypeApiException: if the login form can't be processed
"""
t = self.sendToken(token)
return self.getToken(t) | def function[auth, parameter[self, token]]:
constant[
Take an existing Skype token and refresh it, to extend the expiry time without other credentials.
Args:
token (str): existing Skype token
Returns:
(str, datetime.datetime) tuple: Skype token, and associated expiry if known
Raises:
.SkypeAuthException: if the login request is rejected
.SkypeApiException: if the login form can't be processed
]
variable[t] assign[=] call[name[self].sendToken, parameter[name[token]]]
return[call[name[self].getToken, parameter[name[t]]]] | keyword[def] identifier[auth] ( identifier[self] , identifier[token] ):
literal[string]
identifier[t] = identifier[self] . identifier[sendToken] ( identifier[token] )
keyword[return] identifier[self] . identifier[getToken] ( identifier[t] ) | def auth(self, token):
"""
Take an existing Skype token and refresh it, to extend the expiry time without other credentials.
Args:
token (str): existing Skype token
Returns:
(str, datetime.datetime) tuple: Skype token, and associated expiry if known
Raises:
.SkypeAuthException: if the login request is rejected
.SkypeApiException: if the login form can't be processed
"""
t = self.sendToken(token)
return self.getToken(t) |
def asdict(model, exclude=None, exclude_underscore=None, exclude_pk=None,
follow=None, include=None, only=None, method='asdict', **kwargs):
"""Get a dict from a model
Using the `method` parameter makes it possible to have multiple methods
that formats the result.
Additional keyword arguments will be passed to all relationships that are
followed. This can be used to pass on things like request or context.
:param follow: List or dict of relationships that should be followed.
If the parameter is a dict the value should be a dict of \
keyword arguments. Currently it follows InstrumentedList, \
MappedCollection and regular 1:1, 1:m, m:m relationships. Follow \
takes an extra argument, 'method', which is the method that \
should be used on the relation. It also takes the extra argument \
'parent' which determines where the relationships data should be \
added in the response dict. If 'parent' is set the relationship \
will be added with it's own key as a child to `parent`.
:param exclude: List of properties that should be excluded, will be \
merged with `model.dictalchemy_exclude`
:param exclude_pk: If True any column that refers to the primary key will \
be excluded.
:param exclude_underscore: Overides `model.dictalchemy_exclude_underscore`\
if set
:param include: List of properties that should be included. Use this to \
allow python properties to be called. This list will be merged \
with `model.dictalchemy_asdict_include` or \
`model.dictalchemy_include`.
:param only: List of properties that should be included. This will \
override everything else except `follow`.
:param method: Name of the method that is currently called. This will be \
the default method used in 'follow' unless another method is\
set.
:raises: :class:`dictalchemy.errors.MissingRelationError` \
if `follow` contains a non-existent relationship.
:raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` \
contains an existing relationship that currently isn't supported.
:returns: dict
"""
follow = arg_to_dict(follow)
info = inspect(model)
columns = [c.key for c in info.mapper.column_attrs]
synonyms = [c.key for c in info.mapper.synonyms]
if only:
attrs = only
else:
exclude = exclude or []
exclude += getattr(model, 'dictalchemy_exclude',
constants.default_exclude) or []
if exclude_underscore is None:
exclude_underscore = getattr(model,
'dictalchemy_exclude_underscore',
constants.default_exclude_underscore)
if exclude_underscore:
# Exclude all properties starting with underscore
exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_']
if exclude_pk is True:
exclude += [c.key for c in info.mapper.primary_key]
include = (include or []) + (getattr(model,
'dictalchemy_asdict_include',
getattr(model,
'dictalchemy_include',
None)) or [])
attrs = [k for k in columns + synonyms + include if k not in exclude]
data = dict([(k, getattr(model, k)) for k in attrs])
for (rel_key, orig_args) in follow.iteritems():
try:
rel = getattr(model, rel_key)
except AttributeError:
raise errors.MissingRelationError(rel_key)
args = copy.deepcopy(orig_args)
method = args.pop('method', method)
args['method'] = method
args.update(copy.copy(kwargs))
if hasattr(rel, method):
rel_data = getattr(rel, method)(**args)
elif isinstance(rel, (list, _AssociationList)):
rel_data = []
for child in rel:
if hasattr(child, method):
rel_data.append(getattr(child, method)(**args))
else:
try:
rel_data.append(dict(child))
# TypeError is for non-dictable children
except TypeError:
rel_data.append(copy.copy(child))
elif isinstance(rel, dict):
rel_data = {}
for (child_key, child) in rel.iteritems():
if hasattr(child, method):
rel_data[child_key] = getattr(child, method)(**args)
else:
try:
rel_data[child_key] = dict(child)
except ValueError:
rel_data[child_key] = copy.copy(child)
elif isinstance(rel, (AppenderMixin, Query)):
rel_data = []
for child in rel.all():
if hasattr(child, method):
rel_data.append(getattr(child, method)(**args))
else:
rel_data.append(dict(child))
elif rel is None:
rel_data = None
else:
raise errors.UnsupportedRelationError(rel_key)
ins_key = args.pop('parent', None)
if ins_key is None:
data[rel_key] = rel_data
else:
if ins_key not in data:
data[ins_key] = {}
data[ins_key][rel_key] = rel_data
return data | def function[asdict, parameter[model, exclude, exclude_underscore, exclude_pk, follow, include, only, method]]:
constant[Get a dict from a model
Using the `method` parameter makes it possible to have multiple methods
that formats the result.
Additional keyword arguments will be passed to all relationships that are
followed. This can be used to pass on things like request or context.
:param follow: List or dict of relationships that should be followed.
If the parameter is a dict the value should be a dict of keyword arguments. Currently it follows InstrumentedList, MappedCollection and regular 1:1, 1:m, m:m relationships. Follow takes an extra argument, 'method', which is the method that should be used on the relation. It also takes the extra argument 'parent' which determines where the relationships data should be added in the response dict. If 'parent' is set the relationship will be added with it's own key as a child to `parent`.
:param exclude: List of properties that should be excluded, will be merged with `model.dictalchemy_exclude`
:param exclude_pk: If True any column that refers to the primary key will be excluded.
:param exclude_underscore: Overides `model.dictalchemy_exclude_underscore` if set
:param include: List of properties that should be included. Use this to allow python properties to be called. This list will be merged with `model.dictalchemy_asdict_include` or `model.dictalchemy_include`.
:param only: List of properties that should be included. This will override everything else except `follow`.
:param method: Name of the method that is currently called. This will be the default method used in 'follow' unless another method is set.
:raises: :class:`dictalchemy.errors.MissingRelationError` if `follow` contains a non-existent relationship.
:raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` contains an existing relationship that currently isn't supported.
:returns: dict
]
variable[follow] assign[=] call[name[arg_to_dict], parameter[name[follow]]]
variable[info] assign[=] call[name[inspect], parameter[name[model]]]
variable[columns] assign[=] <ast.ListComp object at 0x7da18ede6410>
variable[synonyms] assign[=] <ast.ListComp object at 0x7da18ede6da0>
if name[only] begin[:]
variable[attrs] assign[=] name[only]
variable[data] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b12b98a0>]]
for taget[tuple[[<ast.Name object at 0x7da1b12b8a30>, <ast.Name object at 0x7da1b12bb310>]]] in starred[call[name[follow].iteritems, parameter[]]] begin[:]
<ast.Try object at 0x7da1b12bae00>
variable[args] assign[=] call[name[copy].deepcopy, parameter[name[orig_args]]]
variable[method] assign[=] call[name[args].pop, parameter[constant[method], name[method]]]
call[name[args]][constant[method]] assign[=] name[method]
call[name[args].update, parameter[call[name[copy].copy, parameter[name[kwargs]]]]]
if call[name[hasattr], parameter[name[rel], name[method]]] begin[:]
variable[rel_data] assign[=] call[call[name[getattr], parameter[name[rel], name[method]]], parameter[]]
variable[ins_key] assign[=] call[name[args].pop, parameter[constant[parent], constant[None]]]
if compare[name[ins_key] is constant[None]] begin[:]
call[name[data]][name[rel_key]] assign[=] name[rel_data]
return[name[data]] | keyword[def] identifier[asdict] ( identifier[model] , identifier[exclude] = keyword[None] , identifier[exclude_underscore] = keyword[None] , identifier[exclude_pk] = keyword[None] ,
identifier[follow] = keyword[None] , identifier[include] = keyword[None] , identifier[only] = keyword[None] , identifier[method] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[follow] = identifier[arg_to_dict] ( identifier[follow] )
identifier[info] = identifier[inspect] ( identifier[model] )
identifier[columns] =[ identifier[c] . identifier[key] keyword[for] identifier[c] keyword[in] identifier[info] . identifier[mapper] . identifier[column_attrs] ]
identifier[synonyms] =[ identifier[c] . identifier[key] keyword[for] identifier[c] keyword[in] identifier[info] . identifier[mapper] . identifier[synonyms] ]
keyword[if] identifier[only] :
identifier[attrs] = identifier[only]
keyword[else] :
identifier[exclude] = identifier[exclude] keyword[or] []
identifier[exclude] += identifier[getattr] ( identifier[model] , literal[string] ,
identifier[constants] . identifier[default_exclude] ) keyword[or] []
keyword[if] identifier[exclude_underscore] keyword[is] keyword[None] :
identifier[exclude_underscore] = identifier[getattr] ( identifier[model] ,
literal[string] ,
identifier[constants] . identifier[default_exclude_underscore] )
keyword[if] identifier[exclude_underscore] :
identifier[exclude] +=[ identifier[k] . identifier[key] keyword[for] identifier[k] keyword[in] identifier[info] . identifier[mapper] . identifier[attrs] keyword[if] identifier[k] . identifier[key] [ literal[int] ]== literal[string] ]
keyword[if] identifier[exclude_pk] keyword[is] keyword[True] :
identifier[exclude] +=[ identifier[c] . identifier[key] keyword[for] identifier[c] keyword[in] identifier[info] . identifier[mapper] . identifier[primary_key] ]
identifier[include] =( identifier[include] keyword[or] [])+( identifier[getattr] ( identifier[model] ,
literal[string] ,
identifier[getattr] ( identifier[model] ,
literal[string] ,
keyword[None] )) keyword[or] [])
identifier[attrs] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[columns] + identifier[synonyms] + identifier[include] keyword[if] identifier[k] keyword[not] keyword[in] identifier[exclude] ]
identifier[data] = identifier[dict] ([( identifier[k] , identifier[getattr] ( identifier[model] , identifier[k] )) keyword[for] identifier[k] keyword[in] identifier[attrs] ])
keyword[for] ( identifier[rel_key] , identifier[orig_args] ) keyword[in] identifier[follow] . identifier[iteritems] ():
keyword[try] :
identifier[rel] = identifier[getattr] ( identifier[model] , identifier[rel_key] )
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[errors] . identifier[MissingRelationError] ( identifier[rel_key] )
identifier[args] = identifier[copy] . identifier[deepcopy] ( identifier[orig_args] )
identifier[method] = identifier[args] . identifier[pop] ( literal[string] , identifier[method] )
identifier[args] [ literal[string] ]= identifier[method]
identifier[args] . identifier[update] ( identifier[copy] . identifier[copy] ( identifier[kwargs] ))
keyword[if] identifier[hasattr] ( identifier[rel] , identifier[method] ):
identifier[rel_data] = identifier[getattr] ( identifier[rel] , identifier[method] )(** identifier[args] )
keyword[elif] identifier[isinstance] ( identifier[rel] ,( identifier[list] , identifier[_AssociationList] )):
identifier[rel_data] =[]
keyword[for] identifier[child] keyword[in] identifier[rel] :
keyword[if] identifier[hasattr] ( identifier[child] , identifier[method] ):
identifier[rel_data] . identifier[append] ( identifier[getattr] ( identifier[child] , identifier[method] )(** identifier[args] ))
keyword[else] :
keyword[try] :
identifier[rel_data] . identifier[append] ( identifier[dict] ( identifier[child] ))
keyword[except] identifier[TypeError] :
identifier[rel_data] . identifier[append] ( identifier[copy] . identifier[copy] ( identifier[child] ))
keyword[elif] identifier[isinstance] ( identifier[rel] , identifier[dict] ):
identifier[rel_data] ={}
keyword[for] ( identifier[child_key] , identifier[child] ) keyword[in] identifier[rel] . identifier[iteritems] ():
keyword[if] identifier[hasattr] ( identifier[child] , identifier[method] ):
identifier[rel_data] [ identifier[child_key] ]= identifier[getattr] ( identifier[child] , identifier[method] )(** identifier[args] )
keyword[else] :
keyword[try] :
identifier[rel_data] [ identifier[child_key] ]= identifier[dict] ( identifier[child] )
keyword[except] identifier[ValueError] :
identifier[rel_data] [ identifier[child_key] ]= identifier[copy] . identifier[copy] ( identifier[child] )
keyword[elif] identifier[isinstance] ( identifier[rel] ,( identifier[AppenderMixin] , identifier[Query] )):
identifier[rel_data] =[]
keyword[for] identifier[child] keyword[in] identifier[rel] . identifier[all] ():
keyword[if] identifier[hasattr] ( identifier[child] , identifier[method] ):
identifier[rel_data] . identifier[append] ( identifier[getattr] ( identifier[child] , identifier[method] )(** identifier[args] ))
keyword[else] :
identifier[rel_data] . identifier[append] ( identifier[dict] ( identifier[child] ))
keyword[elif] identifier[rel] keyword[is] keyword[None] :
identifier[rel_data] = keyword[None]
keyword[else] :
keyword[raise] identifier[errors] . identifier[UnsupportedRelationError] ( identifier[rel_key] )
identifier[ins_key] = identifier[args] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[ins_key] keyword[is] keyword[None] :
identifier[data] [ identifier[rel_key] ]= identifier[rel_data]
keyword[else] :
keyword[if] identifier[ins_key] keyword[not] keyword[in] identifier[data] :
identifier[data] [ identifier[ins_key] ]={}
identifier[data] [ identifier[ins_key] ][ identifier[rel_key] ]= identifier[rel_data]
keyword[return] identifier[data] | def asdict(model, exclude=None, exclude_underscore=None, exclude_pk=None, follow=None, include=None, only=None, method='asdict', **kwargs):
"""Get a dict from a model
Using the `method` parameter makes it possible to have multiple methods
that formats the result.
Additional keyword arguments will be passed to all relationships that are
followed. This can be used to pass on things like request or context.
:param follow: List or dict of relationships that should be followed.
If the parameter is a dict the value should be a dict of keyword arguments. Currently it follows InstrumentedList, MappedCollection and regular 1:1, 1:m, m:m relationships. Follow takes an extra argument, 'method', which is the method that should be used on the relation. It also takes the extra argument 'parent' which determines where the relationships data should be added in the response dict. If 'parent' is set the relationship will be added with it's own key as a child to `parent`.
:param exclude: List of properties that should be excluded, will be merged with `model.dictalchemy_exclude`
:param exclude_pk: If True any column that refers to the primary key will be excluded.
:param exclude_underscore: Overides `model.dictalchemy_exclude_underscore` if set
:param include: List of properties that should be included. Use this to allow python properties to be called. This list will be merged with `model.dictalchemy_asdict_include` or `model.dictalchemy_include`.
:param only: List of properties that should be included. This will override everything else except `follow`.
:param method: Name of the method that is currently called. This will be the default method used in 'follow' unless another method is set.
:raises: :class:`dictalchemy.errors.MissingRelationError` if `follow` contains a non-existent relationship.
:raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` contains an existing relationship that currently isn't supported.
:returns: dict
"""
follow = arg_to_dict(follow)
info = inspect(model)
columns = [c.key for c in info.mapper.column_attrs]
synonyms = [c.key for c in info.mapper.synonyms]
if only:
attrs = only # depends on [control=['if'], data=[]]
else:
exclude = exclude or []
exclude += getattr(model, 'dictalchemy_exclude', constants.default_exclude) or []
if exclude_underscore is None:
exclude_underscore = getattr(model, 'dictalchemy_exclude_underscore', constants.default_exclude_underscore) # depends on [control=['if'], data=['exclude_underscore']]
if exclude_underscore:
# Exclude all properties starting with underscore
exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_'] # depends on [control=['if'], data=[]]
if exclude_pk is True:
exclude += [c.key for c in info.mapper.primary_key] # depends on [control=['if'], data=[]]
include = (include or []) + (getattr(model, 'dictalchemy_asdict_include', getattr(model, 'dictalchemy_include', None)) or [])
attrs = [k for k in columns + synonyms + include if k not in exclude]
data = dict([(k, getattr(model, k)) for k in attrs])
for (rel_key, orig_args) in follow.iteritems():
try:
rel = getattr(model, rel_key) # depends on [control=['try'], data=[]]
except AttributeError:
raise errors.MissingRelationError(rel_key) # depends on [control=['except'], data=[]]
args = copy.deepcopy(orig_args)
method = args.pop('method', method)
args['method'] = method
args.update(copy.copy(kwargs))
if hasattr(rel, method):
rel_data = getattr(rel, method)(**args) # depends on [control=['if'], data=[]]
elif isinstance(rel, (list, _AssociationList)):
rel_data = []
for child in rel:
if hasattr(child, method):
rel_data.append(getattr(child, method)(**args)) # depends on [control=['if'], data=[]]
else:
try:
rel_data.append(dict(child)) # depends on [control=['try'], data=[]]
# TypeError is for non-dictable children
except TypeError:
rel_data.append(copy.copy(child)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
elif isinstance(rel, dict):
rel_data = {}
for (child_key, child) in rel.iteritems():
if hasattr(child, method):
rel_data[child_key] = getattr(child, method)(**args) # depends on [control=['if'], data=[]]
else:
try:
rel_data[child_key] = dict(child) # depends on [control=['try'], data=[]]
except ValueError:
rel_data[child_key] = copy.copy(child) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(rel, (AppenderMixin, Query)):
rel_data = []
for child in rel.all():
if hasattr(child, method):
rel_data.append(getattr(child, method)(**args)) # depends on [control=['if'], data=[]]
else:
rel_data.append(dict(child)) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
elif rel is None:
rel_data = None # depends on [control=['if'], data=[]]
else:
raise errors.UnsupportedRelationError(rel_key)
ins_key = args.pop('parent', None)
if ins_key is None:
data[rel_key] = rel_data # depends on [control=['if'], data=[]]
else:
if ins_key not in data:
data[ins_key] = {} # depends on [control=['if'], data=['ins_key', 'data']]
data[ins_key][rel_key] = rel_data # depends on [control=['for'], data=[]]
return data |
def entities(self, entity_ids):
'''Get the default data for entities.
@param entity_ids A list of entity ids either as strings or references.
'''
url = '%s/meta/any?include=id&' % self.url
for entity_id in entity_ids:
url += 'id=%s&' % _get_path(entity_id)
# Remove the trailing '&' from the URL.
url = url[:-1]
data = self._get(url)
return data.json() | def function[entities, parameter[self, entity_ids]]:
constant[Get the default data for entities.
@param entity_ids A list of entity ids either as strings or references.
]
variable[url] assign[=] binary_operation[constant[%s/meta/any?include=id&] <ast.Mod object at 0x7da2590d6920> name[self].url]
for taget[name[entity_id]] in starred[name[entity_ids]] begin[:]
<ast.AugAssign object at 0x7da2047ea440>
variable[url] assign[=] call[name[url]][<ast.Slice object at 0x7da1b24b3d30>]
variable[data] assign[=] call[name[self]._get, parameter[name[url]]]
return[call[name[data].json, parameter[]]] | keyword[def] identifier[entities] ( identifier[self] , identifier[entity_ids] ):
literal[string]
identifier[url] = literal[string] % identifier[self] . identifier[url]
keyword[for] identifier[entity_id] keyword[in] identifier[entity_ids] :
identifier[url] += literal[string] % identifier[_get_path] ( identifier[entity_id] )
identifier[url] = identifier[url] [:- literal[int] ]
identifier[data] = identifier[self] . identifier[_get] ( identifier[url] )
keyword[return] identifier[data] . identifier[json] () | def entities(self, entity_ids):
"""Get the default data for entities.
@param entity_ids A list of entity ids either as strings or references.
"""
url = '%s/meta/any?include=id&' % self.url
for entity_id in entity_ids:
url += 'id=%s&' % _get_path(entity_id) # depends on [control=['for'], data=['entity_id']]
# Remove the trailing '&' from the URL.
url = url[:-1]
data = self._get(url)
return data.json() |
def _proxy_to_logger(self, method_name, event, *event_args,
**event_kw):
"""
Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the `event_dict` so
that the stdblib's support for format strings can be used.
"""
if isinstance(event, bytes):
event = event.decode('utf-8')
if event_args:
event_kw['positional_args'] = event_args
return super(BoundLevelLogger, self)._proxy_to_logger(method_name,
event=event,
**event_kw) | def function[_proxy_to_logger, parameter[self, method_name, event]]:
constant[
Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the `event_dict` so
that the stdblib's support for format strings can be used.
]
if call[name[isinstance], parameter[name[event], name[bytes]]] begin[:]
variable[event] assign[=] call[name[event].decode, parameter[constant[utf-8]]]
if name[event_args] begin[:]
call[name[event_kw]][constant[positional_args]] assign[=] name[event_args]
return[call[call[name[super], parameter[name[BoundLevelLogger], name[self]]]._proxy_to_logger, parameter[name[method_name]]]] | keyword[def] identifier[_proxy_to_logger] ( identifier[self] , identifier[method_name] , identifier[event] ,* identifier[event_args] ,
** identifier[event_kw] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[event] , identifier[bytes] ):
identifier[event] = identifier[event] . identifier[decode] ( literal[string] )
keyword[if] identifier[event_args] :
identifier[event_kw] [ literal[string] ]= identifier[event_args]
keyword[return] identifier[super] ( identifier[BoundLevelLogger] , identifier[self] ). identifier[_proxy_to_logger] ( identifier[method_name] ,
identifier[event] = identifier[event] ,
** identifier[event_kw] ) | def _proxy_to_logger(self, method_name, event, *event_args, **event_kw):
"""
Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the `event_dict` so
that the stdblib's support for format strings can be used.
"""
if isinstance(event, bytes):
event = event.decode('utf-8') # depends on [control=['if'], data=[]]
if event_args:
event_kw['positional_args'] = event_args # depends on [control=['if'], data=[]]
return super(BoundLevelLogger, self)._proxy_to_logger(method_name, event=event, **event_kw) |
def wrap_log_handler(handler):
"""
Helper function which takes a Python logging handler and wraps it.
Returns a callable taking a single argument, which will be wrapped
in a log record and passed to the handler's emit() method.
:param handler: A logging.Handler instance.
:returns: A callable of one argument, which will emit that
argument to the appropriate logging destination.
"""
# Set the formatter on the handler to be the SimpleFormatter
handler.setFormatter(SimpleFormatter())
# Get file name, line number, and function name for
# wrap_log_handler()
obj = wrap_log_handler
filename = inspect.getsourcefile(obj)
lineno = inspect.getsourcelines(obj)[1]
funcname = 'wrap_log_handler'
@functools.wraps(handler.emit)
def wrapper(msg):
# First, generate a LogRecord
record = logging.LogRecord('bark', logging.INFO, filename, lineno,
msg, (), None, funcname)
# Now, pass it to the handler's emit method
handler.emit(record)
return wrapper | def function[wrap_log_handler, parameter[handler]]:
constant[
Helper function which takes a Python logging handler and wraps it.
Returns a callable taking a single argument, which will be wrapped
in a log record and passed to the handler's emit() method.
:param handler: A logging.Handler instance.
:returns: A callable of one argument, which will emit that
argument to the appropriate logging destination.
]
call[name[handler].setFormatter, parameter[call[name[SimpleFormatter], parameter[]]]]
variable[obj] assign[=] name[wrap_log_handler]
variable[filename] assign[=] call[name[inspect].getsourcefile, parameter[name[obj]]]
variable[lineno] assign[=] call[call[name[inspect].getsourcelines, parameter[name[obj]]]][constant[1]]
variable[funcname] assign[=] constant[wrap_log_handler]
def function[wrapper, parameter[msg]]:
variable[record] assign[=] call[name[logging].LogRecord, parameter[constant[bark], name[logging].INFO, name[filename], name[lineno], name[msg], tuple[[]], constant[None], name[funcname]]]
call[name[handler].emit, parameter[name[record]]]
return[name[wrapper]] | keyword[def] identifier[wrap_log_handler] ( identifier[handler] ):
literal[string]
identifier[handler] . identifier[setFormatter] ( identifier[SimpleFormatter] ())
identifier[obj] = identifier[wrap_log_handler]
identifier[filename] = identifier[inspect] . identifier[getsourcefile] ( identifier[obj] )
identifier[lineno] = identifier[inspect] . identifier[getsourcelines] ( identifier[obj] )[ literal[int] ]
identifier[funcname] = literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[handler] . identifier[emit] )
keyword[def] identifier[wrapper] ( identifier[msg] ):
identifier[record] = identifier[logging] . identifier[LogRecord] ( literal[string] , identifier[logging] . identifier[INFO] , identifier[filename] , identifier[lineno] ,
identifier[msg] ,(), keyword[None] , identifier[funcname] )
identifier[handler] . identifier[emit] ( identifier[record] )
keyword[return] identifier[wrapper] | def wrap_log_handler(handler):
"""
Helper function which takes a Python logging handler and wraps it.
Returns a callable taking a single argument, which will be wrapped
in a log record and passed to the handler's emit() method.
:param handler: A logging.Handler instance.
:returns: A callable of one argument, which will emit that
argument to the appropriate logging destination.
"""
# Set the formatter on the handler to be the SimpleFormatter
handler.setFormatter(SimpleFormatter())
# Get file name, line number, and function name for
# wrap_log_handler()
obj = wrap_log_handler
filename = inspect.getsourcefile(obj)
lineno = inspect.getsourcelines(obj)[1]
funcname = 'wrap_log_handler'
@functools.wraps(handler.emit)
def wrapper(msg):
# First, generate a LogRecord
record = logging.LogRecord('bark', logging.INFO, filename, lineno, msg, (), None, funcname)
# Now, pass it to the handler's emit method
handler.emit(record)
return wrapper |
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(SignalfxHandler, self).get_default_config_help()
config.update({
'url': 'Where to send metrics',
'batch': 'How many to store before sending',
'filter_metrics_regex': 'Comma separated collector:regex filters',
'auth_token': 'Org API token to use when sending metrics',
})
return config | def function[get_default_config_help, parameter[self]]:
constant[
Returns the help text for the configuration options for this handler
]
variable[config] assign[=] call[call[name[super], parameter[name[SignalfxHandler], name[self]]].get_default_config_help, parameter[]]
call[name[config].update, parameter[dictionary[[<ast.Constant object at 0x7da18dc07d60>, <ast.Constant object at 0x7da18dc045e0>, <ast.Constant object at 0x7da18dc07bb0>, <ast.Constant object at 0x7da18dc06080>], [<ast.Constant object at 0x7da18dc066e0>, <ast.Constant object at 0x7da18dc06800>, <ast.Constant object at 0x7da18dc056c0>, <ast.Constant object at 0x7da18dc07010>]]]]
return[name[config]] | keyword[def] identifier[get_default_config_help] ( identifier[self] ):
literal[string]
identifier[config] = identifier[super] ( identifier[SignalfxHandler] , identifier[self] ). identifier[get_default_config_help] ()
identifier[config] . identifier[update] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
keyword[return] identifier[config] | def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(SignalfxHandler, self).get_default_config_help()
config.update({'url': 'Where to send metrics', 'batch': 'How many to store before sending', 'filter_metrics_regex': 'Comma separated collector:regex filters', 'auth_token': 'Org API token to use when sending metrics'})
return config |
def create_shot(self, ):
"""Create a shot and store it in the self.shot
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
if not name:
self.name_le.setPlaceholderText("Please enter a name!")
return
desc = self.desc_pte.toPlainText()
try:
shot = djadapter.models.Shot(sequence=self.sequence, project=self.sequence.project, name=name, description=desc)
shot.save()
self.shot = shot
self.accept()
except:
log.exception("Could not create new shot") | def function[create_shot, parameter[self]]:
constant[Create a shot and store it in the self.shot
:returns: None
:rtype: None
:raises: None
]
variable[name] assign[=] call[name[self].name_le.text, parameter[]]
if <ast.UnaryOp object at 0x7da1b164b580> begin[:]
call[name[self].name_le.setPlaceholderText, parameter[constant[Please enter a name!]]]
return[None]
variable[desc] assign[=] call[name[self].desc_pte.toPlainText, parameter[]]
<ast.Try object at 0x7da1b1605f60> | keyword[def] identifier[create_shot] ( identifier[self] ,):
literal[string]
identifier[name] = identifier[self] . identifier[name_le] . identifier[text] ()
keyword[if] keyword[not] identifier[name] :
identifier[self] . identifier[name_le] . identifier[setPlaceholderText] ( literal[string] )
keyword[return]
identifier[desc] = identifier[self] . identifier[desc_pte] . identifier[toPlainText] ()
keyword[try] :
identifier[shot] = identifier[djadapter] . identifier[models] . identifier[Shot] ( identifier[sequence] = identifier[self] . identifier[sequence] , identifier[project] = identifier[self] . identifier[sequence] . identifier[project] , identifier[name] = identifier[name] , identifier[description] = identifier[desc] )
identifier[shot] . identifier[save] ()
identifier[self] . identifier[shot] = identifier[shot]
identifier[self] . identifier[accept] ()
keyword[except] :
identifier[log] . identifier[exception] ( literal[string] ) | def create_shot(self):
"""Create a shot and store it in the self.shot
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
if not name:
self.name_le.setPlaceholderText('Please enter a name!')
return # depends on [control=['if'], data=[]]
desc = self.desc_pte.toPlainText()
try:
shot = djadapter.models.Shot(sequence=self.sequence, project=self.sequence.project, name=name, description=desc)
shot.save()
self.shot = shot
self.accept() # depends on [control=['try'], data=[]]
except:
log.exception('Could not create new shot') # depends on [control=['except'], data=[]] |
def speed_of_gait(self, x, wavelet_type='db3', wavelet_level=6):
"""
This method assess the speed of gait following :cite:`g-MartinSB11`.
It extracts the gait speed from the energies of the approximation coefficients of wavelet functions.
Prefferably you should use the magnitude of x, y and z (mag_acc_sum) here, as the time series.
:param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc.
:type x: pandas.Series
:param wavelet_type: The type of wavelet to use. See https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html for a full list ('db3' default).
:type wavelet_type: str
:param wavelet_level: The number of cycles the used wavelet should have. See https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html for a fill list (6 default).
:type wavelet_level: int
:return: The speed of gait [measured in meters/second].
:rtype: float
"""
coeffs = wavedec(x.values, wavelet=wavelet_type, level=wavelet_level)
energy = [sum(coeffs[wavelet_level - i]**2) / len(coeffs[wavelet_level - i]) for i in range(wavelet_level)]
WEd1 = energy[0] / (5 * np.sqrt(2))
WEd2 = energy[1] / (4 * np.sqrt(2))
WEd3 = energy[2] / (3 * np.sqrt(2))
WEd4 = energy[3] / (2 * np.sqrt(2))
WEd5 = energy[4] / np.sqrt(2)
WEd6 = energy[5] / np.sqrt(2)
gait_speed = 0.5 * np.sqrt(WEd1+(WEd2/2)+(WEd3/3)+(WEd4/4)+(WEd5/5)+(WEd6/6))
return gait_speed | def function[speed_of_gait, parameter[self, x, wavelet_type, wavelet_level]]:
constant[
This method assess the speed of gait following :cite:`g-MartinSB11`.
It extracts the gait speed from the energies of the approximation coefficients of wavelet functions.
Prefferably you should use the magnitude of x, y and z (mag_acc_sum) here, as the time series.
:param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc.
:type x: pandas.Series
:param wavelet_type: The type of wavelet to use. See https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html for a full list ('db3' default).
:type wavelet_type: str
:param wavelet_level: The number of cycles the used wavelet should have. See https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html for a fill list (6 default).
:type wavelet_level: int
:return: The speed of gait [measured in meters/second].
:rtype: float
]
variable[coeffs] assign[=] call[name[wavedec], parameter[name[x].values]]
variable[energy] assign[=] <ast.ListComp object at 0x7da18ede5840>
variable[WEd1] assign[=] binary_operation[call[name[energy]][constant[0]] / binary_operation[constant[5] * call[name[np].sqrt, parameter[constant[2]]]]]
variable[WEd2] assign[=] binary_operation[call[name[energy]][constant[1]] / binary_operation[constant[4] * call[name[np].sqrt, parameter[constant[2]]]]]
variable[WEd3] assign[=] binary_operation[call[name[energy]][constant[2]] / binary_operation[constant[3] * call[name[np].sqrt, parameter[constant[2]]]]]
variable[WEd4] assign[=] binary_operation[call[name[energy]][constant[3]] / binary_operation[constant[2] * call[name[np].sqrt, parameter[constant[2]]]]]
variable[WEd5] assign[=] binary_operation[call[name[energy]][constant[4]] / call[name[np].sqrt, parameter[constant[2]]]]
variable[WEd6] assign[=] binary_operation[call[name[energy]][constant[5]] / call[name[np].sqrt, parameter[constant[2]]]]
variable[gait_speed] assign[=] binary_operation[constant[0.5] * call[name[np].sqrt, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[WEd1] + binary_operation[name[WEd2] / constant[2]]] + binary_operation[name[WEd3] / constant[3]]] + binary_operation[name[WEd4] / constant[4]]] + binary_operation[name[WEd5] / constant[5]]] + binary_operation[name[WEd6] / constant[6]]]]]]
return[name[gait_speed]] | keyword[def] identifier[speed_of_gait] ( identifier[self] , identifier[x] , identifier[wavelet_type] = literal[string] , identifier[wavelet_level] = literal[int] ):
literal[string]
identifier[coeffs] = identifier[wavedec] ( identifier[x] . identifier[values] , identifier[wavelet] = identifier[wavelet_type] , identifier[level] = identifier[wavelet_level] )
identifier[energy] =[ identifier[sum] ( identifier[coeffs] [ identifier[wavelet_level] - identifier[i] ]** literal[int] )/ identifier[len] ( identifier[coeffs] [ identifier[wavelet_level] - identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[wavelet_level] )]
identifier[WEd1] = identifier[energy] [ literal[int] ]/( literal[int] * identifier[np] . identifier[sqrt] ( literal[int] ))
identifier[WEd2] = identifier[energy] [ literal[int] ]/( literal[int] * identifier[np] . identifier[sqrt] ( literal[int] ))
identifier[WEd3] = identifier[energy] [ literal[int] ]/( literal[int] * identifier[np] . identifier[sqrt] ( literal[int] ))
identifier[WEd4] = identifier[energy] [ literal[int] ]/( literal[int] * identifier[np] . identifier[sqrt] ( literal[int] ))
identifier[WEd5] = identifier[energy] [ literal[int] ]/ identifier[np] . identifier[sqrt] ( literal[int] )
identifier[WEd6] = identifier[energy] [ literal[int] ]/ identifier[np] . identifier[sqrt] ( literal[int] )
identifier[gait_speed] = literal[int] * identifier[np] . identifier[sqrt] ( identifier[WEd1] +( identifier[WEd2] / literal[int] )+( identifier[WEd3] / literal[int] )+( identifier[WEd4] / literal[int] )+( identifier[WEd5] / literal[int] )+( identifier[WEd6] / literal[int] ))
keyword[return] identifier[gait_speed] | def speed_of_gait(self, x, wavelet_type='db3', wavelet_level=6):
"""
This method assess the speed of gait following :cite:`g-MartinSB11`.
It extracts the gait speed from the energies of the approximation coefficients of wavelet functions.
Prefferably you should use the magnitude of x, y and z (mag_acc_sum) here, as the time series.
:param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc.
:type x: pandas.Series
:param wavelet_type: The type of wavelet to use. See https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html for a full list ('db3' default).
:type wavelet_type: str
:param wavelet_level: The number of cycles the used wavelet should have. See https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html for a fill list (6 default).
:type wavelet_level: int
:return: The speed of gait [measured in meters/second].
:rtype: float
"""
coeffs = wavedec(x.values, wavelet=wavelet_type, level=wavelet_level)
energy = [sum(coeffs[wavelet_level - i] ** 2) / len(coeffs[wavelet_level - i]) for i in range(wavelet_level)]
WEd1 = energy[0] / (5 * np.sqrt(2))
WEd2 = energy[1] / (4 * np.sqrt(2))
WEd3 = energy[2] / (3 * np.sqrt(2))
WEd4 = energy[3] / (2 * np.sqrt(2))
WEd5 = energy[4] / np.sqrt(2)
WEd6 = energy[5] / np.sqrt(2)
gait_speed = 0.5 * np.sqrt(WEd1 + WEd2 / 2 + WEd3 / 3 + WEd4 / 4 + WEd5 / 5 + WEd6 / 6)
return gait_speed |
def unioniter(self, *others):
""" The same as :meth:union, but returns iterator instead of #set
@others: one or several :class:RedisSet objects or #str redis set
keynames
-> yields members of the resulting set
"""
others = self._typesafe_others(others)
for other in self._client.sunion(self.key_prefix, *others):
yield self._loads(other) | def function[unioniter, parameter[self]]:
constant[ The same as :meth:union, but returns iterator instead of #set
@others: one or several :class:RedisSet objects or #str redis set
keynames
-> yields members of the resulting set
]
variable[others] assign[=] call[name[self]._typesafe_others, parameter[name[others]]]
for taget[name[other]] in starred[call[name[self]._client.sunion, parameter[name[self].key_prefix, <ast.Starred object at 0x7da1b28f6320>]]] begin[:]
<ast.Yield object at 0x7da1b28f4d30> | keyword[def] identifier[unioniter] ( identifier[self] ,* identifier[others] ):
literal[string]
identifier[others] = identifier[self] . identifier[_typesafe_others] ( identifier[others] )
keyword[for] identifier[other] keyword[in] identifier[self] . identifier[_client] . identifier[sunion] ( identifier[self] . identifier[key_prefix] ,* identifier[others] ):
keyword[yield] identifier[self] . identifier[_loads] ( identifier[other] ) | def unioniter(self, *others):
""" The same as :meth:union, but returns iterator instead of #set
@others: one or several :class:RedisSet objects or #str redis set
keynames
-> yields members of the resulting set
"""
others = self._typesafe_others(others)
for other in self._client.sunion(self.key_prefix, *others):
yield self._loads(other) # depends on [control=['for'], data=['other']] |
def immediate(self, name, value):
"""
Load something immediately
"""
setattr(self, name, value)
self._all.add(name) | def function[immediate, parameter[self, name, value]]:
constant[
Load something immediately
]
call[name[setattr], parameter[name[self], name[name], name[value]]]
call[name[self]._all.add, parameter[name[name]]] | keyword[def] identifier[immediate] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
identifier[setattr] ( identifier[self] , identifier[name] , identifier[value] )
identifier[self] . identifier[_all] . identifier[add] ( identifier[name] ) | def immediate(self, name, value):
"""
Load something immediately
"""
setattr(self, name, value)
self._all.add(name) |
def disconnectSignals(self, node):
"""
Disconnects from signals of the inputed node, if the node is a \
valid XNode type.
:param node <XNode> || None
:return <bool> success
"""
from projexui.widgets.xnodewidget.xnode import XNode
# make sure we're disconnecting from a valid node
if not isinstance(node, XNode):
return False
node.dispatch.geometryChanged.disconnect(self.setDirty)
node.dispatch.removed.disconnect(self.forceRemove)
return True | def function[disconnectSignals, parameter[self, node]]:
constant[
Disconnects from signals of the inputed node, if the node is a valid XNode type.
:param node <XNode> || None
:return <bool> success
]
from relative_module[projexui.widgets.xnodewidget.xnode] import module[XNode]
if <ast.UnaryOp object at 0x7da1b242b040> begin[:]
return[constant[False]]
call[name[node].dispatch.geometryChanged.disconnect, parameter[name[self].setDirty]]
call[name[node].dispatch.removed.disconnect, parameter[name[self].forceRemove]]
return[constant[True]] | keyword[def] identifier[disconnectSignals] ( identifier[self] , identifier[node] ):
literal[string]
keyword[from] identifier[projexui] . identifier[widgets] . identifier[xnodewidget] . identifier[xnode] keyword[import] identifier[XNode]
keyword[if] keyword[not] identifier[isinstance] ( identifier[node] , identifier[XNode] ):
keyword[return] keyword[False]
identifier[node] . identifier[dispatch] . identifier[geometryChanged] . identifier[disconnect] ( identifier[self] . identifier[setDirty] )
identifier[node] . identifier[dispatch] . identifier[removed] . identifier[disconnect] ( identifier[self] . identifier[forceRemove] )
keyword[return] keyword[True] | def disconnectSignals(self, node):
"""
Disconnects from signals of the inputed node, if the node is a valid XNode type.
:param node <XNode> || None
:return <bool> success
"""
from projexui.widgets.xnodewidget.xnode import XNode
# make sure we're disconnecting from a valid node
if not isinstance(node, XNode):
return False # depends on [control=['if'], data=[]]
node.dispatch.geometryChanged.disconnect(self.setDirty)
node.dispatch.removed.disconnect(self.forceRemove)
return True |
def on_button_press(self, event):
"""Handle button press events.
If the (mouse) button is pressed on top of a Handle (item.Handle), that handle is grabbed and can be
dragged around.
"""
if not event.get_button()[1] == 1: # left mouse button
return False
view = self.view
item, handle = HandleFinder(view.hovered_item, view).get_handle_at_point((event.x, event.y))
# Handle must be the end handle of a connection
if not handle or not isinstance(item, ConnectionView) or handle not in item.end_handles():
return False
if handle is item.from_handle():
self._start_port_v = item.from_port
else:
self._start_port_v = item.to_port
self._parent_state_v = item.parent
self._end_handle = handle
if isinstance(item, TransitionView):
self._is_transition = True
self._connection_v = item
return True | def function[on_button_press, parameter[self, event]]:
constant[Handle button press events.
If the (mouse) button is pressed on top of a Handle (item.Handle), that handle is grabbed and can be
dragged around.
]
if <ast.UnaryOp object at 0x7da18eb56770> begin[:]
return[constant[False]]
variable[view] assign[=] name[self].view
<ast.Tuple object at 0x7da18eb547f0> assign[=] call[call[name[HandleFinder], parameter[name[view].hovered_item, name[view]]].get_handle_at_point, parameter[tuple[[<ast.Attribute object at 0x7da18eb544f0>, <ast.Attribute object at 0x7da18eb57f40>]]]]
if <ast.BoolOp object at 0x7da18eb55090> begin[:]
return[constant[False]]
if compare[name[handle] is call[name[item].from_handle, parameter[]]] begin[:]
name[self]._start_port_v assign[=] name[item].from_port
name[self]._parent_state_v assign[=] name[item].parent
name[self]._end_handle assign[=] name[handle]
if call[name[isinstance], parameter[name[item], name[TransitionView]]] begin[:]
name[self]._is_transition assign[=] constant[True]
name[self]._connection_v assign[=] name[item]
return[constant[True]] | keyword[def] identifier[on_button_press] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] keyword[not] identifier[event] . identifier[get_button] ()[ literal[int] ]== literal[int] :
keyword[return] keyword[False]
identifier[view] = identifier[self] . identifier[view]
identifier[item] , identifier[handle] = identifier[HandleFinder] ( identifier[view] . identifier[hovered_item] , identifier[view] ). identifier[get_handle_at_point] (( identifier[event] . identifier[x] , identifier[event] . identifier[y] ))
keyword[if] keyword[not] identifier[handle] keyword[or] keyword[not] identifier[isinstance] ( identifier[item] , identifier[ConnectionView] ) keyword[or] identifier[handle] keyword[not] keyword[in] identifier[item] . identifier[end_handles] ():
keyword[return] keyword[False]
keyword[if] identifier[handle] keyword[is] identifier[item] . identifier[from_handle] ():
identifier[self] . identifier[_start_port_v] = identifier[item] . identifier[from_port]
keyword[else] :
identifier[self] . identifier[_start_port_v] = identifier[item] . identifier[to_port]
identifier[self] . identifier[_parent_state_v] = identifier[item] . identifier[parent]
identifier[self] . identifier[_end_handle] = identifier[handle]
keyword[if] identifier[isinstance] ( identifier[item] , identifier[TransitionView] ):
identifier[self] . identifier[_is_transition] = keyword[True]
identifier[self] . identifier[_connection_v] = identifier[item]
keyword[return] keyword[True] | def on_button_press(self, event):
"""Handle button press events.
If the (mouse) button is pressed on top of a Handle (item.Handle), that handle is grabbed and can be
dragged around.
"""
if not event.get_button()[1] == 1: # left mouse button
return False # depends on [control=['if'], data=[]]
view = self.view
(item, handle) = HandleFinder(view.hovered_item, view).get_handle_at_point((event.x, event.y))
# Handle must be the end handle of a connection
if not handle or not isinstance(item, ConnectionView) or handle not in item.end_handles():
return False # depends on [control=['if'], data=[]]
if handle is item.from_handle():
self._start_port_v = item.from_port # depends on [control=['if'], data=[]]
else:
self._start_port_v = item.to_port
self._parent_state_v = item.parent
self._end_handle = handle
if isinstance(item, TransitionView):
self._is_transition = True # depends on [control=['if'], data=[]]
self._connection_v = item
return True |
def ema_growth(eqdata, **kwargs):
"""
Growth of exponential moving average.
Parameters
----------
eqdata : DataFrame
span : int, optional
Span for exponential moving average. Defaults to 20.
outputcol : str, optional.
Column to use for output. Defaults to 'EMA Growth'.
selection : str, optional
Column of eqdata on which to calculate ema growth. If
`eqdata` has only 1 column, `selection` is ignored,
and ema growth is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
out : DataFrame
Growth of exponential moving average from one day to next
"""
_growth_outputcol = kwargs.get('outputcol', 'EMA Growth')
_ema_outputcol = 'EMA'
kwargs['outputcol'] = _ema_outputcol
_emadf = ema(eqdata, **kwargs)
return simple.growth(_emadf, selection=_ema_outputcol, outputcol=_growth_outputcol) | def function[ema_growth, parameter[eqdata]]:
constant[
Growth of exponential moving average.
Parameters
----------
eqdata : DataFrame
span : int, optional
Span for exponential moving average. Defaults to 20.
outputcol : str, optional.
Column to use for output. Defaults to 'EMA Growth'.
selection : str, optional
Column of eqdata on which to calculate ema growth. If
`eqdata` has only 1 column, `selection` is ignored,
and ema growth is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
out : DataFrame
Growth of exponential moving average from one day to next
]
variable[_growth_outputcol] assign[=] call[name[kwargs].get, parameter[constant[outputcol], constant[EMA Growth]]]
variable[_ema_outputcol] assign[=] constant[EMA]
call[name[kwargs]][constant[outputcol]] assign[=] name[_ema_outputcol]
variable[_emadf] assign[=] call[name[ema], parameter[name[eqdata]]]
return[call[name[simple].growth, parameter[name[_emadf]]]] | keyword[def] identifier[ema_growth] ( identifier[eqdata] ,** identifier[kwargs] ):
literal[string]
identifier[_growth_outputcol] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[_ema_outputcol] = literal[string]
identifier[kwargs] [ literal[string] ]= identifier[_ema_outputcol]
identifier[_emadf] = identifier[ema] ( identifier[eqdata] ,** identifier[kwargs] )
keyword[return] identifier[simple] . identifier[growth] ( identifier[_emadf] , identifier[selection] = identifier[_ema_outputcol] , identifier[outputcol] = identifier[_growth_outputcol] ) | def ema_growth(eqdata, **kwargs):
"""
Growth of exponential moving average.
Parameters
----------
eqdata : DataFrame
span : int, optional
Span for exponential moving average. Defaults to 20.
outputcol : str, optional.
Column to use for output. Defaults to 'EMA Growth'.
selection : str, optional
Column of eqdata on which to calculate ema growth. If
`eqdata` has only 1 column, `selection` is ignored,
and ema growth is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
out : DataFrame
Growth of exponential moving average from one day to next
"""
_growth_outputcol = kwargs.get('outputcol', 'EMA Growth')
_ema_outputcol = 'EMA'
kwargs['outputcol'] = _ema_outputcol
_emadf = ema(eqdata, **kwargs)
return simple.growth(_emadf, selection=_ema_outputcol, outputcol=_growth_outputcol) |
def _get_importer(input_file):
"""Selects importer based on input file type."""
__, ext = os.path.splitext(input_file)
ext = ext.lower()
if "ostriz" in input_file:
from dump2polarion.results import ostriztools
importer = ostriztools.import_ostriz
elif ext == ".xml":
# expect junit-report from pytest
from dump2polarion.results import junittools
importer = junittools.import_junit
elif ext == ".csv":
from dump2polarion.results import csvtools
importer = csvtools.import_csv
elif ext in dbtools.SQLITE_EXT:
importer = dbtools.import_sqlite
elif ext == ".json":
from dump2polarion.results import jsontools
importer = jsontools.import_json
else:
raise Dump2PolarionException("Cannot recognize type of input data, add file extension.")
return importer | def function[_get_importer, parameter[input_file]]:
constant[Selects importer based on input file type.]
<ast.Tuple object at 0x7da1b229a5f0> assign[=] call[name[os].path.splitext, parameter[name[input_file]]]
variable[ext] assign[=] call[name[ext].lower, parameter[]]
if compare[constant[ostriz] in name[input_file]] begin[:]
from relative_module[dump2polarion.results] import module[ostriztools]
variable[importer] assign[=] name[ostriztools].import_ostriz
return[name[importer]] | keyword[def] identifier[_get_importer] ( identifier[input_file] ):
literal[string]
identifier[__] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[input_file] )
identifier[ext] = identifier[ext] . identifier[lower] ()
keyword[if] literal[string] keyword[in] identifier[input_file] :
keyword[from] identifier[dump2polarion] . identifier[results] keyword[import] identifier[ostriztools]
identifier[importer] = identifier[ostriztools] . identifier[import_ostriz]
keyword[elif] identifier[ext] == literal[string] :
keyword[from] identifier[dump2polarion] . identifier[results] keyword[import] identifier[junittools]
identifier[importer] = identifier[junittools] . identifier[import_junit]
keyword[elif] identifier[ext] == literal[string] :
keyword[from] identifier[dump2polarion] . identifier[results] keyword[import] identifier[csvtools]
identifier[importer] = identifier[csvtools] . identifier[import_csv]
keyword[elif] identifier[ext] keyword[in] identifier[dbtools] . identifier[SQLITE_EXT] :
identifier[importer] = identifier[dbtools] . identifier[import_sqlite]
keyword[elif] identifier[ext] == literal[string] :
keyword[from] identifier[dump2polarion] . identifier[results] keyword[import] identifier[jsontools]
identifier[importer] = identifier[jsontools] . identifier[import_json]
keyword[else] :
keyword[raise] identifier[Dump2PolarionException] ( literal[string] )
keyword[return] identifier[importer] | def _get_importer(input_file):
"""Selects importer based on input file type."""
(__, ext) = os.path.splitext(input_file)
ext = ext.lower()
if 'ostriz' in input_file:
from dump2polarion.results import ostriztools
importer = ostriztools.import_ostriz # depends on [control=['if'], data=[]]
elif ext == '.xml':
# expect junit-report from pytest
from dump2polarion.results import junittools
importer = junittools.import_junit # depends on [control=['if'], data=[]]
elif ext == '.csv':
from dump2polarion.results import csvtools
importer = csvtools.import_csv # depends on [control=['if'], data=[]]
elif ext in dbtools.SQLITE_EXT:
importer = dbtools.import_sqlite # depends on [control=['if'], data=[]]
elif ext == '.json':
from dump2polarion.results import jsontools
importer = jsontools.import_json # depends on [control=['if'], data=[]]
else:
raise Dump2PolarionException('Cannot recognize type of input data, add file extension.')
return importer |
def getUniqueFilename(dir=None, base=None):
"""
DESCRP: Generate a filename in the directory <dir> which is
unique (i.e. not in use at the moment)
PARAMS: dir -- the directory to look in. If None, use CWD
base -- use this as the base name for the filename
RETURN: string -- the filename generated
"""
while True:
fn = str(random.randint(0, 100000)) + ".tmp"
if not os.path.exists(fn):
break
return fn | def function[getUniqueFilename, parameter[dir, base]]:
constant[
DESCRP: Generate a filename in the directory <dir> which is
unique (i.e. not in use at the moment)
PARAMS: dir -- the directory to look in. If None, use CWD
base -- use this as the base name for the filename
RETURN: string -- the filename generated
]
while constant[True] begin[:]
variable[fn] assign[=] binary_operation[call[name[str], parameter[call[name[random].randint, parameter[constant[0], constant[100000]]]]] + constant[.tmp]]
if <ast.UnaryOp object at 0x7da1b13401c0> begin[:]
break
return[name[fn]] | keyword[def] identifier[getUniqueFilename] ( identifier[dir] = keyword[None] , identifier[base] = keyword[None] ):
literal[string]
keyword[while] keyword[True] :
identifier[fn] = identifier[str] ( identifier[random] . identifier[randint] ( literal[int] , literal[int] ))+ literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[fn] ):
keyword[break]
keyword[return] identifier[fn] | def getUniqueFilename(dir=None, base=None):
"""
DESCRP: Generate a filename in the directory <dir> which is
unique (i.e. not in use at the moment)
PARAMS: dir -- the directory to look in. If None, use CWD
base -- use this as the base name for the filename
RETURN: string -- the filename generated
"""
while True:
fn = str(random.randint(0, 100000)) + '.tmp'
if not os.path.exists(fn):
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return fn |
def process_request(
self, path: str, request_headers: Headers
) -> Union[Optional[HTTPResponse], Awaitable[Optional[HTTPResponse]]]:
"""
Intercept the HTTP request and return an HTTP response if needed.
``request_headers`` is a :class:`~websockets.http.Headers` instance.
If this method returns ``None``, the WebSocket handshake continues.
If it returns a status code, headers and a response body, that HTTP
response is sent and the connection is closed.
The HTTP status must be a :class:`~http.HTTPStatus`.
HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
The HTTP response body must be :class:`bytes`. It may be empty.
This method may be overridden to check the request headers and set a
different status, for example to authenticate the request and return
``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``.
It can be declared as a function or as a coroutine because such
authentication checks are likely to require network requests.
It may also be overridden by passing a ``process_request`` argument to
the :class:`WebSocketServerProtocol` constructor or the :func:`serve`
function.
"""
if self._process_request is not None:
return self._process_request(path, request_headers)
return None | def function[process_request, parameter[self, path, request_headers]]:
constant[
Intercept the HTTP request and return an HTTP response if needed.
``request_headers`` is a :class:`~websockets.http.Headers` instance.
If this method returns ``None``, the WebSocket handshake continues.
If it returns a status code, headers and a response body, that HTTP
response is sent and the connection is closed.
The HTTP status must be a :class:`~http.HTTPStatus`.
HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
The HTTP response body must be :class:`bytes`. It may be empty.
This method may be overridden to check the request headers and set a
different status, for example to authenticate the request and return
``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``.
It can be declared as a function or as a coroutine because such
authentication checks are likely to require network requests.
It may also be overridden by passing a ``process_request`` argument to
the :class:`WebSocketServerProtocol` constructor or the :func:`serve`
function.
]
if compare[name[self]._process_request is_not constant[None]] begin[:]
return[call[name[self]._process_request, parameter[name[path], name[request_headers]]]]
return[constant[None]] | keyword[def] identifier[process_request] (
identifier[self] , identifier[path] : identifier[str] , identifier[request_headers] : identifier[Headers]
)-> identifier[Union] [ identifier[Optional] [ identifier[HTTPResponse] ], identifier[Awaitable] [ identifier[Optional] [ identifier[HTTPResponse] ]]]:
literal[string]
keyword[if] identifier[self] . identifier[_process_request] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_process_request] ( identifier[path] , identifier[request_headers] )
keyword[return] keyword[None] | def process_request(self, path: str, request_headers: Headers) -> Union[Optional[HTTPResponse], Awaitable[Optional[HTTPResponse]]]:
"""
Intercept the HTTP request and return an HTTP response if needed.
``request_headers`` is a :class:`~websockets.http.Headers` instance.
If this method returns ``None``, the WebSocket handshake continues.
If it returns a status code, headers and a response body, that HTTP
response is sent and the connection is closed.
The HTTP status must be a :class:`~http.HTTPStatus`.
HTTP headers must be a :class:`~websockets.http.Headers` instance, a
:class:`~collections.abc.Mapping`, or an iterable of ``(name, value)``
pairs.
The HTTP response body must be :class:`bytes`. It may be empty.
This method may be overridden to check the request headers and set a
different status, for example to authenticate the request and return
``HTTPStatus.UNAUTHORIZED`` or ``HTTPStatus.FORBIDDEN``.
It can be declared as a function or as a coroutine because such
authentication checks are likely to require network requests.
It may also be overridden by passing a ``process_request`` argument to
the :class:`WebSocketServerProtocol` constructor or the :func:`serve`
function.
"""
if self._process_request is not None:
return self._process_request(path, request_headers) # depends on [control=['if'], data=[]]
return None |
def create(
self, resource_group_name, account_name, certificate_name, parameters, if_match=None, if_none_match=None, custom_headers=None, raw=False, **operation_config):
"""Creates a new certificate inside the specified account.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param certificate_name: The identifier for the certificate. This must
be made up of algorithm and thumbprint separated by a dash, and must
match the certificate data in the request. For example SHA1-a3d1c5.
:type certificate_name: str
:param parameters: Additional parameters for certificate creation.
:type parameters:
~azure.mgmt.batch.models.CertificateCreateOrUpdateParameters
:param if_match: The entity state (ETag) version of the certificate to
update. A value of "*" can be used to apply the operation only if the
certificate already exists. If omitted, this operation will always be
applied.
:type if_match: str
:param if_none_match: Set to '*' to allow a new certificate to be
created, but to prevent updating an existing certificate. Other values
will be ignored.
:type if_none_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Certificate
or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.batch.models.Certificate]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
certificate_name=certificate_name,
parameters=parameters,
if_match=if_match,
if_none_match=if_none_match,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
header_dict = {
'ETag': 'str',
}
deserialized = self._deserialize('Certificate', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout) | def function[create, parameter[self, resource_group_name, account_name, certificate_name, parameters, if_match, if_none_match, custom_headers, raw]]:
constant[Creates a new certificate inside the specified account.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param certificate_name: The identifier for the certificate. This must
be made up of algorithm and thumbprint separated by a dash, and must
match the certificate data in the request. For example SHA1-a3d1c5.
:type certificate_name: str
:param parameters: Additional parameters for certificate creation.
:type parameters:
~azure.mgmt.batch.models.CertificateCreateOrUpdateParameters
:param if_match: The entity state (ETag) version of the certificate to
update. A value of "*" can be used to apply the operation only if the
certificate already exists. If omitted, this operation will always be
applied.
:type if_match: str
:param if_none_match: Set to '*' to allow a new certificate to be
created, but to prevent updating an existing certificate. Other values
will be ignored.
:type if_none_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Certificate
or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.batch.models.Certificate]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
]
variable[raw_result] assign[=] call[name[self]._create_initial, parameter[]]
if name[raw] begin[:]
return[name[raw_result]]
def function[long_running_send, parameter[]]:
return[name[raw_result].response]
def function[get_long_running_status, parameter[status_link, headers]]:
variable[request] assign[=] call[name[self]._client.get, parameter[name[status_link]]]
if name[headers] begin[:]
call[name[request].headers.update, parameter[name[headers]]]
variable[header_parameters] assign[=] dictionary[[], []]
call[name[header_parameters]][constant[x-ms-client-request-id]] assign[=] call[name[raw_result].response.request.headers][constant[x-ms-client-request-id]]
return[call[name[self]._client.send, parameter[name[request], name[header_parameters]]]]
def function[get_long_running_output, parameter[response]]:
if compare[name[response].status_code <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18f58cb20>]]] begin[:]
variable[exp] assign[=] call[name[CloudError], parameter[name[response]]]
name[exp].request_id assign[=] call[name[response].headers.get, parameter[constant[x-ms-request-id]]]
<ast.Raise object at 0x7da18f58e830>
variable[header_dict] assign[=] dictionary[[<ast.Constant object at 0x7da18f58d0c0>], [<ast.Constant object at 0x7da18f58d5a0>]]
variable[deserialized] assign[=] call[name[self]._deserialize, parameter[constant[Certificate], name[response]]]
if name[raw] begin[:]
variable[client_raw_response] assign[=] call[name[ClientRawResponse], parameter[name[deserialized], name[response]]]
call[name[client_raw_response].add_headers, parameter[name[header_dict]]]
return[name[client_raw_response]]
return[name[deserialized]]
variable[long_running_operation_timeout] assign[=] call[name[operation_config].get, parameter[constant[long_running_operation_timeout], name[self].config.long_running_operation_timeout]]
return[call[name[AzureOperationPoller], parameter[name[long_running_send], name[get_long_running_output], name[get_long_running_status], name[long_running_operation_timeout]]]] | keyword[def] identifier[create] (
identifier[self] , identifier[resource_group_name] , identifier[account_name] , identifier[certificate_name] , identifier[parameters] , identifier[if_match] = keyword[None] , identifier[if_none_match] = keyword[None] , identifier[custom_headers] = keyword[None] , identifier[raw] = keyword[False] ,** identifier[operation_config] ):
literal[string]
identifier[raw_result] = identifier[self] . identifier[_create_initial] (
identifier[resource_group_name] = identifier[resource_group_name] ,
identifier[account_name] = identifier[account_name] ,
identifier[certificate_name] = identifier[certificate_name] ,
identifier[parameters] = identifier[parameters] ,
identifier[if_match] = identifier[if_match] ,
identifier[if_none_match] = identifier[if_none_match] ,
identifier[custom_headers] = identifier[custom_headers] ,
identifier[raw] = keyword[True] ,
** identifier[operation_config]
)
keyword[if] identifier[raw] :
keyword[return] identifier[raw_result]
keyword[def] identifier[long_running_send] ():
keyword[return] identifier[raw_result] . identifier[response]
keyword[def] identifier[get_long_running_status] ( identifier[status_link] , identifier[headers] = keyword[None] ):
identifier[request] = identifier[self] . identifier[_client] . identifier[get] ( identifier[status_link] )
keyword[if] identifier[headers] :
identifier[request] . identifier[headers] . identifier[update] ( identifier[headers] )
identifier[header_parameters] ={}
identifier[header_parameters] [ literal[string] ]= identifier[raw_result] . identifier[response] . identifier[request] . identifier[headers] [ literal[string] ]
keyword[return] identifier[self] . identifier[_client] . identifier[send] (
identifier[request] , identifier[header_parameters] , identifier[stream] = keyword[False] ,** identifier[operation_config] )
keyword[def] identifier[get_long_running_output] ( identifier[response] ):
keyword[if] identifier[response] . identifier[status_code] keyword[not] keyword[in] [ literal[int] ]:
identifier[exp] = identifier[CloudError] ( identifier[response] )
identifier[exp] . identifier[request_id] = identifier[response] . identifier[headers] . identifier[get] ( literal[string] )
keyword[raise] identifier[exp]
identifier[header_dict] ={
literal[string] : literal[string] ,
}
identifier[deserialized] = identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] )
keyword[if] identifier[raw] :
identifier[client_raw_response] = identifier[ClientRawResponse] ( identifier[deserialized] , identifier[response] )
identifier[client_raw_response] . identifier[add_headers] ( identifier[header_dict] )
keyword[return] identifier[client_raw_response]
keyword[return] identifier[deserialized]
identifier[long_running_operation_timeout] = identifier[operation_config] . identifier[get] (
literal[string] ,
identifier[self] . identifier[config] . identifier[long_running_operation_timeout] )
keyword[return] identifier[AzureOperationPoller] (
identifier[long_running_send] , identifier[get_long_running_output] ,
identifier[get_long_running_status] , identifier[long_running_operation_timeout] ) | def create(self, resource_group_name, account_name, certificate_name, parameters, if_match=None, if_none_match=None, custom_headers=None, raw=False, **operation_config):
"""Creates a new certificate inside the specified account.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param certificate_name: The identifier for the certificate. This must
be made up of algorithm and thumbprint separated by a dash, and must
match the certificate data in the request. For example SHA1-a3d1c5.
:type certificate_name: str
:param parameters: Additional parameters for certificate creation.
:type parameters:
~azure.mgmt.batch.models.CertificateCreateOrUpdateParameters
:param if_match: The entity state (ETag) version of the certificate to
update. A value of "*" can be used to apply the operation only if the
certificate already exists. If omitted, this operation will always be
applied.
:type if_match: str
:param if_none_match: Set to '*' to allow a new certificate to be
created, but to prevent updating an existing certificate. Other values
will be ignored.
:type if_none_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns Certificate
or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.batch.models.Certificate]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_initial(resource_group_name=resource_group_name, account_name=account_name, certificate_name=certificate_name, parameters=parameters, if_match=if_match, if_none_match=if_none_match, custom_headers=custom_headers, raw=True, **operation_config)
if raw:
return raw_result # depends on [control=['if'], data=[]]
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers) # depends on [control=['if'], data=[]]
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp # depends on [control=['if'], data=[]]
header_dict = {'ETag': 'str'}
deserialized = self._deserialize('Certificate', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response # depends on [control=['if'], data=[]]
return deserialized
long_running_operation_timeout = operation_config.get('long_running_operation_timeout', self.config.long_running_operation_timeout)
return AzureOperationPoller(long_running_send, get_long_running_output, get_long_running_status, long_running_operation_timeout) |
def _fuzzy_time_parse(self, value):
""" Parses a fuzzy time value into a meaningful interpretation.
`value`
String value to parse.
"""
value = value.lower().strip()
today = datetime.date.today()
if value in ('today', 't'):
return today
else:
kwargs = {}
if value in ('y', 'yesterday'):
kwargs['days'] = -1
elif value in ('w', 'wk', 'week', 'last week'):
kwargs['days'] = -7
else:
# match days
match = re.match(r'(\d+)\s*(d|day|days)\s*(ago)?$', value)
if match:
kwargs['days'] = -int(match.groups(1)[0])
else:
# match weeks
match = re.match(r'(\d+)\s*(w|wk|week|weeks)\s*(ago)?$',
value)
if match:
kwargs['weeks'] = -int(match.groups(1)[0])
if kwargs:
return today + datetime.timedelta(**kwargs)
return None | def function[_fuzzy_time_parse, parameter[self, value]]:
constant[ Parses a fuzzy time value into a meaningful interpretation.
`value`
String value to parse.
]
variable[value] assign[=] call[call[name[value].lower, parameter[]].strip, parameter[]]
variable[today] assign[=] call[name[datetime].date.today, parameter[]]
if compare[name[value] in tuple[[<ast.Constant object at 0x7da1b13d6fe0>, <ast.Constant object at 0x7da1b13d6d10>]]] begin[:]
return[name[today]] | keyword[def] identifier[_fuzzy_time_parse] ( identifier[self] , identifier[value] ):
literal[string]
identifier[value] = identifier[value] . identifier[lower] (). identifier[strip] ()
identifier[today] = identifier[datetime] . identifier[date] . identifier[today] ()
keyword[if] identifier[value] keyword[in] ( literal[string] , literal[string] ):
keyword[return] identifier[today]
keyword[else] :
identifier[kwargs] ={}
keyword[if] identifier[value] keyword[in] ( literal[string] , literal[string] ):
identifier[kwargs] [ literal[string] ]=- literal[int]
keyword[elif] identifier[value] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[kwargs] [ literal[string] ]=- literal[int]
keyword[else] :
identifier[match] = identifier[re] . identifier[match] ( literal[string] , identifier[value] )
keyword[if] identifier[match] :
identifier[kwargs] [ literal[string] ]=- identifier[int] ( identifier[match] . identifier[groups] ( literal[int] )[ literal[int] ])
keyword[else] :
identifier[match] = identifier[re] . identifier[match] ( literal[string] ,
identifier[value] )
keyword[if] identifier[match] :
identifier[kwargs] [ literal[string] ]=- identifier[int] ( identifier[match] . identifier[groups] ( literal[int] )[ literal[int] ])
keyword[if] identifier[kwargs] :
keyword[return] identifier[today] + identifier[datetime] . identifier[timedelta] (** identifier[kwargs] )
keyword[return] keyword[None] | def _fuzzy_time_parse(self, value):
""" Parses a fuzzy time value into a meaningful interpretation.
`value`
String value to parse.
"""
value = value.lower().strip()
today = datetime.date.today()
if value in ('today', 't'):
return today # depends on [control=['if'], data=[]]
else:
kwargs = {}
if value in ('y', 'yesterday'):
kwargs['days'] = -1 # depends on [control=['if'], data=[]]
elif value in ('w', 'wk', 'week', 'last week'):
kwargs['days'] = -7 # depends on [control=['if'], data=[]]
else:
# match days
match = re.match('(\\d+)\\s*(d|day|days)\\s*(ago)?$', value)
if match:
kwargs['days'] = -int(match.groups(1)[0]) # depends on [control=['if'], data=[]]
else:
# match weeks
match = re.match('(\\d+)\\s*(w|wk|week|weeks)\\s*(ago)?$', value)
if match:
kwargs['weeks'] = -int(match.groups(1)[0]) # depends on [control=['if'], data=[]]
if kwargs:
return today + datetime.timedelta(**kwargs) # depends on [control=['if'], data=[]]
return None |
def get_fields(self, model):
"""
By default, returns all field names of a given model.
Override this method to limit field options. You can either return a
plain list of field names from it, like ['id', 'name'], or call
.super() and exclude unwanted fields from its result.
"""
return sorted(
[f.name for f in model._meta.get_fields() if f.name != 'password']
) | def function[get_fields, parameter[self, model]]:
constant[
By default, returns all field names of a given model.
Override this method to limit field options. You can either return a
plain list of field names from it, like ['id', 'name'], or call
.super() and exclude unwanted fields from its result.
]
return[call[name[sorted], parameter[<ast.ListComp object at 0x7da1b18322c0>]]] | keyword[def] identifier[get_fields] ( identifier[self] , identifier[model] ):
literal[string]
keyword[return] identifier[sorted] (
[ identifier[f] . identifier[name] keyword[for] identifier[f] keyword[in] identifier[model] . identifier[_meta] . identifier[get_fields] () keyword[if] identifier[f] . identifier[name] != literal[string] ]
) | def get_fields(self, model):
"""
By default, returns all field names of a given model.
Override this method to limit field options. You can either return a
plain list of field names from it, like ['id', 'name'], or call
.super() and exclude unwanted fields from its result.
"""
return sorted([f.name for f in model._meta.get_fields() if f.name != 'password']) |
def xf2rav(xform):
"""
This routine determines the rotation matrix and angular velocity
of the rotation from a state transformation matrix.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xf2rav_c.html
:param xform: state transformation matrix
:type xform: list[6][6]
:return:
rotation associated with xform,
angular velocity associated with xform.
:rtype: tuple
"""
xform = stypes.toDoubleMatrix(xform)
rot = stypes.emptyDoubleMatrix()
av = stypes.emptyDoubleVector(3)
libspice.xf2rav_c(xform, rot, av)
return stypes.cMatrixToNumpy(rot), stypes.cVectorToPython(av) | def function[xf2rav, parameter[xform]]:
constant[
This routine determines the rotation matrix and angular velocity
of the rotation from a state transformation matrix.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xf2rav_c.html
:param xform: state transformation matrix
:type xform: list[6][6]
:return:
rotation associated with xform,
angular velocity associated with xform.
:rtype: tuple
]
variable[xform] assign[=] call[name[stypes].toDoubleMatrix, parameter[name[xform]]]
variable[rot] assign[=] call[name[stypes].emptyDoubleMatrix, parameter[]]
variable[av] assign[=] call[name[stypes].emptyDoubleVector, parameter[constant[3]]]
call[name[libspice].xf2rav_c, parameter[name[xform], name[rot], name[av]]]
return[tuple[[<ast.Call object at 0x7da2054a73a0>, <ast.Call object at 0x7da18f09efe0>]]] | keyword[def] identifier[xf2rav] ( identifier[xform] ):
literal[string]
identifier[xform] = identifier[stypes] . identifier[toDoubleMatrix] ( identifier[xform] )
identifier[rot] = identifier[stypes] . identifier[emptyDoubleMatrix] ()
identifier[av] = identifier[stypes] . identifier[emptyDoubleVector] ( literal[int] )
identifier[libspice] . identifier[xf2rav_c] ( identifier[xform] , identifier[rot] , identifier[av] )
keyword[return] identifier[stypes] . identifier[cMatrixToNumpy] ( identifier[rot] ), identifier[stypes] . identifier[cVectorToPython] ( identifier[av] ) | def xf2rav(xform):
"""
This routine determines the rotation matrix and angular velocity
of the rotation from a state transformation matrix.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xf2rav_c.html
:param xform: state transformation matrix
:type xform: list[6][6]
:return:
rotation associated with xform,
angular velocity associated with xform.
:rtype: tuple
"""
xform = stypes.toDoubleMatrix(xform)
rot = stypes.emptyDoubleMatrix()
av = stypes.emptyDoubleVector(3)
libspice.xf2rav_c(xform, rot, av)
return (stypes.cMatrixToNumpy(rot), stypes.cVectorToPython(av)) |
def default_formatter(handler, item, value):
"""Default formatter. Convert value to string."""
if hasattr(value, '__unicode__'):
value = value.__unicode__()
return escape(str(value)) | def function[default_formatter, parameter[handler, item, value]]:
constant[Default formatter. Convert value to string.]
if call[name[hasattr], parameter[name[value], constant[__unicode__]]] begin[:]
variable[value] assign[=] call[name[value].__unicode__, parameter[]]
return[call[name[escape], parameter[call[name[str], parameter[name[value]]]]]] | keyword[def] identifier[default_formatter] ( identifier[handler] , identifier[item] , identifier[value] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[value] , literal[string] ):
identifier[value] = identifier[value] . identifier[__unicode__] ()
keyword[return] identifier[escape] ( identifier[str] ( identifier[value] )) | def default_formatter(handler, item, value):
"""Default formatter. Convert value to string."""
if hasattr(value, '__unicode__'):
value = value.__unicode__() # depends on [control=['if'], data=[]]
return escape(str(value)) |
def parse_plays_stream(self):
"""Generate and yield a stream of parsed plays. Useful for per play processing."""
lx_doc = self.html_doc()
if lx_doc is not None:
parser = PlayParser(self.game_key.season, self.game_key.game_type)
plays = lx_doc.xpath('//tr[@class = "evenColor"]')
for p in plays:
p_obj = parser.build_play(p)
self.plays.append(p_obj)
yield p_obj | def function[parse_plays_stream, parameter[self]]:
constant[Generate and yield a stream of parsed plays. Useful for per play processing.]
variable[lx_doc] assign[=] call[name[self].html_doc, parameter[]]
if compare[name[lx_doc] is_not constant[None]] begin[:]
variable[parser] assign[=] call[name[PlayParser], parameter[name[self].game_key.season, name[self].game_key.game_type]]
variable[plays] assign[=] call[name[lx_doc].xpath, parameter[constant[//tr[@class = "evenColor"]]]]
for taget[name[p]] in starred[name[plays]] begin[:]
variable[p_obj] assign[=] call[name[parser].build_play, parameter[name[p]]]
call[name[self].plays.append, parameter[name[p_obj]]]
<ast.Yield object at 0x7da1b0f0c700> | keyword[def] identifier[parse_plays_stream] ( identifier[self] ):
literal[string]
identifier[lx_doc] = identifier[self] . identifier[html_doc] ()
keyword[if] identifier[lx_doc] keyword[is] keyword[not] keyword[None] :
identifier[parser] = identifier[PlayParser] ( identifier[self] . identifier[game_key] . identifier[season] , identifier[self] . identifier[game_key] . identifier[game_type] )
identifier[plays] = identifier[lx_doc] . identifier[xpath] ( literal[string] )
keyword[for] identifier[p] keyword[in] identifier[plays] :
identifier[p_obj] = identifier[parser] . identifier[build_play] ( identifier[p] )
identifier[self] . identifier[plays] . identifier[append] ( identifier[p_obj] )
keyword[yield] identifier[p_obj] | def parse_plays_stream(self):
"""Generate and yield a stream of parsed plays. Useful for per play processing."""
lx_doc = self.html_doc()
if lx_doc is not None:
parser = PlayParser(self.game_key.season, self.game_key.game_type)
plays = lx_doc.xpath('//tr[@class = "evenColor"]')
for p in plays:
p_obj = parser.build_play(p)
self.plays.append(p_obj)
yield p_obj # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=['lx_doc']] |
def _checkIfClusterExists(self):
"""
Try deleting the resource group. This will fail if it exists and raise an exception.
"""
ansibleArgs = {
'resgrp': self.clusterName,
'region': self._zone
}
try:
self.callPlaybook(self.playbook['check-cluster'], ansibleArgs, wait=True)
except RuntimeError:
logger.info("The cluster could not be created. Try deleting the cluster if it already exits.")
raise | def function[_checkIfClusterExists, parameter[self]]:
constant[
Try deleting the resource group. This will fail if it exists and raise an exception.
]
variable[ansibleArgs] assign[=] dictionary[[<ast.Constant object at 0x7da20c993850>, <ast.Constant object at 0x7da20c990190>], [<ast.Attribute object at 0x7da20c993bb0>, <ast.Attribute object at 0x7da20c990640>]]
<ast.Try object at 0x7da20c9913f0> | keyword[def] identifier[_checkIfClusterExists] ( identifier[self] ):
literal[string]
identifier[ansibleArgs] ={
literal[string] : identifier[self] . identifier[clusterName] ,
literal[string] : identifier[self] . identifier[_zone]
}
keyword[try] :
identifier[self] . identifier[callPlaybook] ( identifier[self] . identifier[playbook] [ literal[string] ], identifier[ansibleArgs] , identifier[wait] = keyword[True] )
keyword[except] identifier[RuntimeError] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[raise] | def _checkIfClusterExists(self):
"""
Try deleting the resource group. This will fail if it exists and raise an exception.
"""
ansibleArgs = {'resgrp': self.clusterName, 'region': self._zone}
try:
self.callPlaybook(self.playbook['check-cluster'], ansibleArgs, wait=True) # depends on [control=['try'], data=[]]
except RuntimeError:
logger.info('The cluster could not be created. Try deleting the cluster if it already exits.')
raise # depends on [control=['except'], data=[]] |
def get_clique_tree(nodes, edges):
"""Given a set of int nodes i and edges (i,j), returns an nx.Graph object G
which is a clique tree, where:
- G.node[i]['members'] contains the set of original nodes in the ith
maximal clique
- G[i][j]['members'] contains the set of original nodes in the seperator
set between maximal cliques i and j
Note: This method is currently only implemented for chordal graphs; TODO:
add a step to triangulate non-chordal graphs.
"""
# Form the original graph G1
G1 = nx.Graph()
G1.add_nodes_from(nodes)
G1.add_edges_from(edges)
# Check if graph is chordal
# TODO: Add step to triangulate graph if not
if not nx.is_chordal(G1):
raise NotImplementedError("Graph triangulation not implemented.")
# Create maximal clique graph G2
# Each node is a maximal clique C_i
# Let w = |C_i \cap C_j|; C_i, C_j have an edge with weight w if w > 0
G2 = nx.Graph()
for i, c in enumerate(nx.chordal_graph_cliques(G1)):
G2.add_node(i, members=c)
for i in G2.nodes:
for j in G2.nodes:
S = G2.node[i]["members"].intersection(G2.node[j]["members"])
w = len(S)
if w > 0:
G2.add_edge(i, j, weight=w, members=S)
# Return a minimum spanning tree of G2
return nx.minimum_spanning_tree(G2) | def function[get_clique_tree, parameter[nodes, edges]]:
constant[Given a set of int nodes i and edges (i,j), returns an nx.Graph object G
which is a clique tree, where:
- G.node[i]['members'] contains the set of original nodes in the ith
maximal clique
- G[i][j]['members'] contains the set of original nodes in the seperator
set between maximal cliques i and j
Note: This method is currently only implemented for chordal graphs; TODO:
add a step to triangulate non-chordal graphs.
]
variable[G1] assign[=] call[name[nx].Graph, parameter[]]
call[name[G1].add_nodes_from, parameter[name[nodes]]]
call[name[G1].add_edges_from, parameter[name[edges]]]
if <ast.UnaryOp object at 0x7da1b216cf40> begin[:]
<ast.Raise object at 0x7da1b216d7b0>
variable[G2] assign[=] call[name[nx].Graph, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b216c430>, <ast.Name object at 0x7da1b216cca0>]]] in starred[call[name[enumerate], parameter[call[name[nx].chordal_graph_cliques, parameter[name[G1]]]]]] begin[:]
call[name[G2].add_node, parameter[name[i]]]
for taget[name[i]] in starred[name[G2].nodes] begin[:]
for taget[name[j]] in starred[name[G2].nodes] begin[:]
variable[S] assign[=] call[call[call[name[G2].node][name[i]]][constant[members]].intersection, parameter[call[call[name[G2].node][name[j]]][constant[members]]]]
variable[w] assign[=] call[name[len], parameter[name[S]]]
if compare[name[w] greater[>] constant[0]] begin[:]
call[name[G2].add_edge, parameter[name[i], name[j]]]
return[call[name[nx].minimum_spanning_tree, parameter[name[G2]]]] | keyword[def] identifier[get_clique_tree] ( identifier[nodes] , identifier[edges] ):
literal[string]
identifier[G1] = identifier[nx] . identifier[Graph] ()
identifier[G1] . identifier[add_nodes_from] ( identifier[nodes] )
identifier[G1] . identifier[add_edges_from] ( identifier[edges] )
keyword[if] keyword[not] identifier[nx] . identifier[is_chordal] ( identifier[G1] ):
keyword[raise] identifier[NotImplementedError] ( literal[string] )
identifier[G2] = identifier[nx] . identifier[Graph] ()
keyword[for] identifier[i] , identifier[c] keyword[in] identifier[enumerate] ( identifier[nx] . identifier[chordal_graph_cliques] ( identifier[G1] )):
identifier[G2] . identifier[add_node] ( identifier[i] , identifier[members] = identifier[c] )
keyword[for] identifier[i] keyword[in] identifier[G2] . identifier[nodes] :
keyword[for] identifier[j] keyword[in] identifier[G2] . identifier[nodes] :
identifier[S] = identifier[G2] . identifier[node] [ identifier[i] ][ literal[string] ]. identifier[intersection] ( identifier[G2] . identifier[node] [ identifier[j] ][ literal[string] ])
identifier[w] = identifier[len] ( identifier[S] )
keyword[if] identifier[w] > literal[int] :
identifier[G2] . identifier[add_edge] ( identifier[i] , identifier[j] , identifier[weight] = identifier[w] , identifier[members] = identifier[S] )
keyword[return] identifier[nx] . identifier[minimum_spanning_tree] ( identifier[G2] ) | def get_clique_tree(nodes, edges):
"""Given a set of int nodes i and edges (i,j), returns an nx.Graph object G
which is a clique tree, where:
- G.node[i]['members'] contains the set of original nodes in the ith
maximal clique
- G[i][j]['members'] contains the set of original nodes in the seperator
set between maximal cliques i and j
Note: This method is currently only implemented for chordal graphs; TODO:
add a step to triangulate non-chordal graphs.
"""
# Form the original graph G1
G1 = nx.Graph()
G1.add_nodes_from(nodes)
G1.add_edges_from(edges)
# Check if graph is chordal
# TODO: Add step to triangulate graph if not
if not nx.is_chordal(G1):
raise NotImplementedError('Graph triangulation not implemented.') # depends on [control=['if'], data=[]]
# Create maximal clique graph G2
# Each node is a maximal clique C_i
# Let w = |C_i \cap C_j|; C_i, C_j have an edge with weight w if w > 0
G2 = nx.Graph()
for (i, c) in enumerate(nx.chordal_graph_cliques(G1)):
G2.add_node(i, members=c) # depends on [control=['for'], data=[]]
for i in G2.nodes:
for j in G2.nodes:
S = G2.node[i]['members'].intersection(G2.node[j]['members'])
w = len(S)
if w > 0:
G2.add_edge(i, j, weight=w, members=S) # depends on [control=['if'], data=['w']] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
# Return a minimum spanning tree of G2
return nx.minimum_spanning_tree(G2) |
def bind(value, name):
"""A filter that prints %s, and stores the value
in an array, so that it can be bound using a prepared statement
This filter is automatically applied to every {{variable}}
during the lexing stage, so developers can't forget to bind
"""
if isinstance(value, Markup):
return value
elif requires_in_clause(value):
raise MissingInClauseException("""Got a list or tuple.
Did you forget to apply '|inclause' to your query?""")
else:
return _bind_param(_thread_local.bind_params, name, value) | def function[bind, parameter[value, name]]:
constant[A filter that prints %s, and stores the value
in an array, so that it can be bound using a prepared statement
This filter is automatically applied to every {{variable}}
during the lexing stage, so developers can't forget to bind
]
if call[name[isinstance], parameter[name[value], name[Markup]]] begin[:]
return[name[value]] | keyword[def] identifier[bind] ( identifier[value] , identifier[name] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[Markup] ):
keyword[return] identifier[value]
keyword[elif] identifier[requires_in_clause] ( identifier[value] ):
keyword[raise] identifier[MissingInClauseException] ( literal[string] )
keyword[else] :
keyword[return] identifier[_bind_param] ( identifier[_thread_local] . identifier[bind_params] , identifier[name] , identifier[value] ) | def bind(value, name):
"""A filter that prints %s, and stores the value
in an array, so that it can be bound using a prepared statement
This filter is automatically applied to every {{variable}}
during the lexing stage, so developers can't forget to bind
"""
if isinstance(value, Markup):
return value # depends on [control=['if'], data=[]]
elif requires_in_clause(value):
raise MissingInClauseException("Got a list or tuple. \n Did you forget to apply '|inclause' to your query?") # depends on [control=['if'], data=[]]
else:
return _bind_param(_thread_local.bind_params, name, value) |
def get_b_star(x_star, y_err, y_mean, y_segment):
"""
input: x_star, y_err, y_mean, y_segment
output: b_star (corrected slope for delta_pal statistic)
"""
#print "x_star, should be same as Xcorr / NRM"
#print x_star
x_star_mean = numpy.mean(x_star)
x_err = x_star - x_star_mean
b_star = -1* numpy.sqrt( old_div(sum(numpy.array(y_err)**2), sum(numpy.array(x_err)**2)) ) # averaged slope
#print "y_segment", y_segment
b_star = numpy.sign(sum(x_err * y_err)) * numpy.std(y_segment, ddof=1) / numpy.std(x_star, ddof=1)
#print "b_star (should be same as corr_slope)"
#print b_star
return b_star | def function[get_b_star, parameter[x_star, y_err, y_mean, y_segment]]:
constant[
input: x_star, y_err, y_mean, y_segment
output: b_star (corrected slope for delta_pal statistic)
]
variable[x_star_mean] assign[=] call[name[numpy].mean, parameter[name[x_star]]]
variable[x_err] assign[=] binary_operation[name[x_star] - name[x_star_mean]]
variable[b_star] assign[=] binary_operation[<ast.UnaryOp object at 0x7da2041db310> * call[name[numpy].sqrt, parameter[call[name[old_div], parameter[call[name[sum], parameter[binary_operation[call[name[numpy].array, parameter[name[y_err]]] ** constant[2]]]], call[name[sum], parameter[binary_operation[call[name[numpy].array, parameter[name[x_err]]] ** constant[2]]]]]]]]]
variable[b_star] assign[=] binary_operation[binary_operation[call[name[numpy].sign, parameter[call[name[sum], parameter[binary_operation[name[x_err] * name[y_err]]]]]] * call[name[numpy].std, parameter[name[y_segment]]]] / call[name[numpy].std, parameter[name[x_star]]]]
return[name[b_star]] | keyword[def] identifier[get_b_star] ( identifier[x_star] , identifier[y_err] , identifier[y_mean] , identifier[y_segment] ):
literal[string]
identifier[x_star_mean] = identifier[numpy] . identifier[mean] ( identifier[x_star] )
identifier[x_err] = identifier[x_star] - identifier[x_star_mean]
identifier[b_star] =- literal[int] * identifier[numpy] . identifier[sqrt] ( identifier[old_div] ( identifier[sum] ( identifier[numpy] . identifier[array] ( identifier[y_err] )** literal[int] ), identifier[sum] ( identifier[numpy] . identifier[array] ( identifier[x_err] )** literal[int] )))
identifier[b_star] = identifier[numpy] . identifier[sign] ( identifier[sum] ( identifier[x_err] * identifier[y_err] ))* identifier[numpy] . identifier[std] ( identifier[y_segment] , identifier[ddof] = literal[int] )/ identifier[numpy] . identifier[std] ( identifier[x_star] , identifier[ddof] = literal[int] )
keyword[return] identifier[b_star] | def get_b_star(x_star, y_err, y_mean, y_segment):
"""
input: x_star, y_err, y_mean, y_segment
output: b_star (corrected slope for delta_pal statistic)
"""
#print "x_star, should be same as Xcorr / NRM"
#print x_star
x_star_mean = numpy.mean(x_star)
x_err = x_star - x_star_mean
b_star = -1 * numpy.sqrt(old_div(sum(numpy.array(y_err) ** 2), sum(numpy.array(x_err) ** 2))) # averaged slope
#print "y_segment", y_segment
b_star = numpy.sign(sum(x_err * y_err)) * numpy.std(y_segment, ddof=1) / numpy.std(x_star, ddof=1)
#print "b_star (should be same as corr_slope)"
#print b_star
return b_star |
def _to_image_array(file_path):
"""
Converts the file in file_path to a numpy array (matrix) representing an RGB image
The dimensions of the image are calculated using __determine_dimensions.
Padding is added provide enough bytes to generate the image (between 1 and 3 bytes can be added).
"""
_log.debug("File '%s' to image", file_path)
data = numpy.fromfile(file_path, numpy.uint8)
orig_len = len(data)
pad_req = (3 - (orig_len % 3))
pad_req += 3 if pad_req == 0 else 0
final_len = orig_len + pad_req
num_of_pixels = final_len // 3
w, h = _determine_dimensions(num_of_pixels)
reshaped = numpy.zeros((w, h, 3), dtype=numpy.uint8)
for i in xrange(final_len):
sidx = i // 3
y = sidx % h
x = sidx // h
s = i % 3
reshaped[x, y, s] = data[i] if i < orig_len else 0
reshaped[-1, -1, 2] = pad_req
return reshaped | def function[_to_image_array, parameter[file_path]]:
constant[
Converts the file in file_path to a numpy array (matrix) representing an RGB image
The dimensions of the image are calculated using __determine_dimensions.
Padding is added provide enough bytes to generate the image (between 1 and 3 bytes can be added).
]
call[name[_log].debug, parameter[constant[File '%s' to image], name[file_path]]]
variable[data] assign[=] call[name[numpy].fromfile, parameter[name[file_path], name[numpy].uint8]]
variable[orig_len] assign[=] call[name[len], parameter[name[data]]]
variable[pad_req] assign[=] binary_operation[constant[3] - binary_operation[name[orig_len] <ast.Mod object at 0x7da2590d6920> constant[3]]]
<ast.AugAssign object at 0x7da2044c2d10>
variable[final_len] assign[=] binary_operation[name[orig_len] + name[pad_req]]
variable[num_of_pixels] assign[=] binary_operation[name[final_len] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]]
<ast.Tuple object at 0x7da2044c12d0> assign[=] call[name[_determine_dimensions], parameter[name[num_of_pixels]]]
variable[reshaped] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Name object at 0x7da2044c0970>, <ast.Name object at 0x7da2044c10f0>, <ast.Constant object at 0x7da2044c2ce0>]]]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[final_len]]]] begin[:]
variable[sidx] assign[=] binary_operation[name[i] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]]
variable[y] assign[=] binary_operation[name[sidx] <ast.Mod object at 0x7da2590d6920> name[h]]
variable[x] assign[=] binary_operation[name[sidx] <ast.FloorDiv object at 0x7da2590d6bc0> name[h]]
variable[s] assign[=] binary_operation[name[i] <ast.Mod object at 0x7da2590d6920> constant[3]]
call[name[reshaped]][tuple[[<ast.Name object at 0x7da2044c28f0>, <ast.Name object at 0x7da2044c2710>, <ast.Name object at 0x7da2044c0ca0>]]] assign[=] <ast.IfExp object at 0x7da2044c0310>
call[name[reshaped]][tuple[[<ast.UnaryOp object at 0x7da2054a6980>, <ast.UnaryOp object at 0x7da2054a7a00>, <ast.Constant object at 0x7da2054a6770>]]] assign[=] name[pad_req]
return[name[reshaped]] | keyword[def] identifier[_to_image_array] ( identifier[file_path] ):
literal[string]
identifier[_log] . identifier[debug] ( literal[string] , identifier[file_path] )
identifier[data] = identifier[numpy] . identifier[fromfile] ( identifier[file_path] , identifier[numpy] . identifier[uint8] )
identifier[orig_len] = identifier[len] ( identifier[data] )
identifier[pad_req] =( literal[int] -( identifier[orig_len] % literal[int] ))
identifier[pad_req] += literal[int] keyword[if] identifier[pad_req] == literal[int] keyword[else] literal[int]
identifier[final_len] = identifier[orig_len] + identifier[pad_req]
identifier[num_of_pixels] = identifier[final_len] // literal[int]
identifier[w] , identifier[h] = identifier[_determine_dimensions] ( identifier[num_of_pixels] )
identifier[reshaped] = identifier[numpy] . identifier[zeros] (( identifier[w] , identifier[h] , literal[int] ), identifier[dtype] = identifier[numpy] . identifier[uint8] )
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[final_len] ):
identifier[sidx] = identifier[i] // literal[int]
identifier[y] = identifier[sidx] % identifier[h]
identifier[x] = identifier[sidx] // identifier[h]
identifier[s] = identifier[i] % literal[int]
identifier[reshaped] [ identifier[x] , identifier[y] , identifier[s] ]= identifier[data] [ identifier[i] ] keyword[if] identifier[i] < identifier[orig_len] keyword[else] literal[int]
identifier[reshaped] [- literal[int] ,- literal[int] , literal[int] ]= identifier[pad_req]
keyword[return] identifier[reshaped] | def _to_image_array(file_path):
"""
Converts the file in file_path to a numpy array (matrix) representing an RGB image
The dimensions of the image are calculated using __determine_dimensions.
Padding is added provide enough bytes to generate the image (between 1 and 3 bytes can be added).
"""
_log.debug("File '%s' to image", file_path)
data = numpy.fromfile(file_path, numpy.uint8)
orig_len = len(data)
pad_req = 3 - orig_len % 3
pad_req += 3 if pad_req == 0 else 0
final_len = orig_len + pad_req
num_of_pixels = final_len // 3
(w, h) = _determine_dimensions(num_of_pixels)
reshaped = numpy.zeros((w, h, 3), dtype=numpy.uint8)
for i in xrange(final_len):
sidx = i // 3
y = sidx % h
x = sidx // h
s = i % 3
reshaped[x, y, s] = data[i] if i < orig_len else 0 # depends on [control=['for'], data=['i']]
reshaped[-1, -1, 2] = pad_req
return reshaped |
def put_rpc(self, address, rpc_id, arg_payload, response):
"""Place an RPC onto the RPC queue.
The rpc will be dispatched asynchronously by the background dispatch
task. This method must be called from the event loop. This method
does not block.
Args:
address (int): The address of the tile with the RPC
rpc_id (int): The id of the rpc you want to call
arg_payload (bytes): The RPC payload
respones (GenericResponse): The object to use to signal the result.
"""
self._rpc_queue.put_nowait((address, rpc_id, arg_payload, response)) | def function[put_rpc, parameter[self, address, rpc_id, arg_payload, response]]:
constant[Place an RPC onto the RPC queue.
The rpc will be dispatched asynchronously by the background dispatch
task. This method must be called from the event loop. This method
does not block.
Args:
address (int): The address of the tile with the RPC
rpc_id (int): The id of the rpc you want to call
arg_payload (bytes): The RPC payload
respones (GenericResponse): The object to use to signal the result.
]
call[name[self]._rpc_queue.put_nowait, parameter[tuple[[<ast.Name object at 0x7da20e9b25f0>, <ast.Name object at 0x7da20e9b29b0>, <ast.Name object at 0x7da20e9b2fb0>, <ast.Name object at 0x7da20e9b00d0>]]]] | keyword[def] identifier[put_rpc] ( identifier[self] , identifier[address] , identifier[rpc_id] , identifier[arg_payload] , identifier[response] ):
literal[string]
identifier[self] . identifier[_rpc_queue] . identifier[put_nowait] (( identifier[address] , identifier[rpc_id] , identifier[arg_payload] , identifier[response] )) | def put_rpc(self, address, rpc_id, arg_payload, response):
"""Place an RPC onto the RPC queue.
The rpc will be dispatched asynchronously by the background dispatch
task. This method must be called from the event loop. This method
does not block.
Args:
address (int): The address of the tile with the RPC
rpc_id (int): The id of the rpc you want to call
arg_payload (bytes): The RPC payload
respones (GenericResponse): The object to use to signal the result.
"""
self._rpc_queue.put_nowait((address, rpc_id, arg_payload, response)) |
def ltsa(geom, n_components, eigen_solver='auto',
random_state=None, solver_kwds=None):
"""
Perform a Local Tangent Space Alignment analysis on the data.
Parameters
----------
geom : a Geometry object from megaman.geometry.geometry
n_components : integer
number of coordinates for the manifold.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
embedding : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
* Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)
"""
if geom.X is None:
raise ValueError("Must pass data matrix X to Geometry")
(N, d_in) = geom.X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
# get the distance matrix and neighbors list
if geom.adjacency_matrix is None:
geom.compute_adjacency_matrix()
(rows, cols) = geom.adjacency_matrix.nonzero()
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=geom.adjacency_matrix.shape[0],
nvec=n_components + 1)
if eigen_solver != 'dense':
M = sparse.csr_matrix((N, N))
else:
M = np.zeros((N, N))
for i in range(N):
neighbors_i = cols[rows == i]
n_neighbors_i = len(neighbors_i)
use_svd = (n_neighbors_i > d_in)
Xi = geom.X[neighbors_i]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors_i, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors_i)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors_i, neighbors_i)
with warnings.catch_warnings():
# sparse will complain this is better with lil_matrix but it doesn't work
warnings.simplefilter("ignore")
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors_i, neighbors_i] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
random_state=random_state,solver_kwds=solver_kwds) | def function[ltsa, parameter[geom, n_components, eigen_solver, random_state, solver_kwds]]:
constant[
Perform a Local Tangent Space Alignment analysis on the data.
Parameters
----------
geom : a Geometry object from megaman.geometry.geometry
n_components : integer
number of coordinates for the manifold.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
embedding : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
* Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)
]
if compare[name[geom].X is constant[None]] begin[:]
<ast.Raise object at 0x7da1b26afdf0>
<ast.Tuple object at 0x7da1b26af070> assign[=] name[geom].X.shape
if compare[name[n_components] greater[>] name[d_in]] begin[:]
<ast.Raise object at 0x7da1b26aedd0>
if compare[name[geom].adjacency_matrix is constant[None]] begin[:]
call[name[geom].compute_adjacency_matrix, parameter[]]
<ast.Tuple object at 0x7da1b26ae920> assign[=] call[name[geom].adjacency_matrix.nonzero, parameter[]]
<ast.Tuple object at 0x7da1b26ad510> assign[=] call[name[check_eigen_solver], parameter[name[eigen_solver], name[solver_kwds]]]
if compare[name[eigen_solver] not_equal[!=] constant[dense]] begin[:]
variable[M] assign[=] call[name[sparse].csr_matrix, parameter[tuple[[<ast.Name object at 0x7da1b26ae650>, <ast.Name object at 0x7da1b26af910>]]]]
for taget[name[i]] in starred[call[name[range], parameter[name[N]]]] begin[:]
variable[neighbors_i] assign[=] call[name[cols]][compare[name[rows] equal[==] name[i]]]
variable[n_neighbors_i] assign[=] call[name[len], parameter[name[neighbors_i]]]
variable[use_svd] assign[=] compare[name[n_neighbors_i] greater[>] name[d_in]]
variable[Xi] assign[=] call[name[geom].X][name[neighbors_i]]
<ast.AugAssign object at 0x7da1b135bfa0>
if name[use_svd] begin[:]
variable[v] assign[=] call[call[name[svd], parameter[name[Xi]]]][constant[0]]
variable[Gi] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da2054a4eb0>, <ast.BinOp object at 0x7da2054a6500>]]]]
call[name[Gi]][tuple[[<ast.Slice object at 0x7da2054a6fb0>, <ast.Slice object at 0x7da2054a7dc0>]]] assign[=] call[name[v]][tuple[[<ast.Slice object at 0x7da2054a7fd0>, <ast.Slice object at 0x7da2054a7280>]]]
call[name[Gi]][tuple[[<ast.Slice object at 0x7da2054a70d0>, <ast.Constant object at 0x7da2054a57b0>]]] assign[=] binary_operation[constant[1.0] / call[name[np].sqrt, parameter[name[n_neighbors_i]]]]
variable[GiGiT] assign[=] call[name[np].dot, parameter[name[Gi], name[Gi].T]]
<ast.Tuple object at 0x7da2054a5900> assign[=] call[name[np].meshgrid, parameter[name[neighbors_i], name[neighbors_i]]]
with call[name[warnings].catch_warnings, parameter[]] begin[:]
call[name[warnings].simplefilter, parameter[constant[ignore]]]
<ast.AugAssign object at 0x7da2054a44c0>
<ast.AugAssign object at 0x7da2054a6410>
return[call[name[null_space], parameter[name[M], name[n_components]]]] | keyword[def] identifier[ltsa] ( identifier[geom] , identifier[n_components] , identifier[eigen_solver] = literal[string] ,
identifier[random_state] = keyword[None] , identifier[solver_kwds] = keyword[None] ):
literal[string]
keyword[if] identifier[geom] . identifier[X] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
( identifier[N] , identifier[d_in] )= identifier[geom] . identifier[X] . identifier[shape]
keyword[if] identifier[n_components] > identifier[d_in] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[geom] . identifier[adjacency_matrix] keyword[is] keyword[None] :
identifier[geom] . identifier[compute_adjacency_matrix] ()
( identifier[rows] , identifier[cols] )= identifier[geom] . identifier[adjacency_matrix] . identifier[nonzero] ()
identifier[eigen_solver] , identifier[solver_kwds] = identifier[check_eigen_solver] ( identifier[eigen_solver] , identifier[solver_kwds] ,
identifier[size] = identifier[geom] . identifier[adjacency_matrix] . identifier[shape] [ literal[int] ],
identifier[nvec] = identifier[n_components] + literal[int] )
keyword[if] identifier[eigen_solver] != literal[string] :
identifier[M] = identifier[sparse] . identifier[csr_matrix] (( identifier[N] , identifier[N] ))
keyword[else] :
identifier[M] = identifier[np] . identifier[zeros] (( identifier[N] , identifier[N] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[N] ):
identifier[neighbors_i] = identifier[cols] [ identifier[rows] == identifier[i] ]
identifier[n_neighbors_i] = identifier[len] ( identifier[neighbors_i] )
identifier[use_svd] =( identifier[n_neighbors_i] > identifier[d_in] )
identifier[Xi] = identifier[geom] . identifier[X] [ identifier[neighbors_i] ]
identifier[Xi] -= identifier[Xi] . identifier[mean] ( literal[int] )
keyword[if] identifier[use_svd] :
identifier[v] = identifier[svd] ( identifier[Xi] , identifier[full_matrices] = keyword[True] )[ literal[int] ]
keyword[else] :
identifier[Ci] = identifier[np] . identifier[dot] ( identifier[Xi] , identifier[Xi] . identifier[T] )
identifier[v] = identifier[eigh] ( identifier[Ci] )[ literal[int] ][:,::- literal[int] ]
identifier[Gi] = identifier[np] . identifier[zeros] (( identifier[n_neighbors_i] , identifier[n_components] + literal[int] ))
identifier[Gi] [:, literal[int] :]= identifier[v] [:,: identifier[n_components] ]
identifier[Gi] [:, literal[int] ]= literal[int] / identifier[np] . identifier[sqrt] ( identifier[n_neighbors_i] )
identifier[GiGiT] = identifier[np] . identifier[dot] ( identifier[Gi] , identifier[Gi] . identifier[T] )
identifier[nbrs_x] , identifier[nbrs_y] = identifier[np] . identifier[meshgrid] ( identifier[neighbors_i] , identifier[neighbors_i] )
keyword[with] identifier[warnings] . identifier[catch_warnings] ():
identifier[warnings] . identifier[simplefilter] ( literal[string] )
identifier[M] [ identifier[nbrs_x] , identifier[nbrs_y] ]-= identifier[GiGiT]
identifier[M] [ identifier[neighbors_i] , identifier[neighbors_i] ]+= literal[int]
keyword[return] identifier[null_space] ( identifier[M] , identifier[n_components] , identifier[k_skip] = literal[int] , identifier[eigen_solver] = identifier[eigen_solver] ,
identifier[random_state] = identifier[random_state] , identifier[solver_kwds] = identifier[solver_kwds] ) | def ltsa(geom, n_components, eigen_solver='auto', random_state=None, solver_kwds=None):
"""
Perform a Local Tangent Space Alignment analysis on the data.
Parameters
----------
geom : a Geometry object from megaman.geometry.geometry
n_components : integer
number of coordinates for the manifold.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
embedding : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
* Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)
"""
if geom.X is None:
raise ValueError('Must pass data matrix X to Geometry') # depends on [control=['if'], data=[]]
(N, d_in) = geom.X.shape
if n_components > d_in:
raise ValueError('output dimension must be less than or equal to input dimension') # depends on [control=['if'], data=[]]
# get the distance matrix and neighbors list
if geom.adjacency_matrix is None:
geom.compute_adjacency_matrix() # depends on [control=['if'], data=[]]
(rows, cols) = geom.adjacency_matrix.nonzero()
(eigen_solver, solver_kwds) = check_eigen_solver(eigen_solver, solver_kwds, size=geom.adjacency_matrix.shape[0], nvec=n_components + 1)
if eigen_solver != 'dense':
M = sparse.csr_matrix((N, N)) # depends on [control=['if'], data=[]]
else:
M = np.zeros((N, N))
for i in range(N):
neighbors_i = cols[rows == i]
n_neighbors_i = len(neighbors_i)
use_svd = n_neighbors_i > d_in
Xi = geom.X[neighbors_i]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0] # depends on [control=['if'], data=[]]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors_i, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1.0 / np.sqrt(n_neighbors_i)
GiGiT = np.dot(Gi, Gi.T)
(nbrs_x, nbrs_y) = np.meshgrid(neighbors_i, neighbors_i)
with warnings.catch_warnings():
# sparse will complain this is better with lil_matrix but it doesn't work
warnings.simplefilter('ignore')
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors_i, neighbors_i] += 1 # depends on [control=['with'], data=[]] # depends on [control=['for'], data=['i']]
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver, random_state=random_state, solver_kwds=solver_kwds) |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):
"""
Read the data encoding the ValidationInformation structure and decode
it into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the validation authority type,
validation version major, validation type, and/or validation
level are missing from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the ValidationInformation structure.
"""
if kmip_version < enums.KMIPVersion.KMIP_1_3:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the ValidationInformation "
"object.".format(
kmip_version.value
)
)
super(ValidationInformation, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(
enums.Tags.VALIDATION_AUTHORITY_TYPE,
local_buffer
):
validation_authority_type = primitives.Enumeration(
enums.ValidationAuthorityType,
tag=enums.Tags.VALIDATION_AUTHORITY_TYPE
)
validation_authority_type.read(
local_buffer,
kmip_version=kmip_version
)
self._validation_authority_type = validation_authority_type
else:
raise exceptions.InvalidKmipEncoding(
"The ValidationInformation encoding is missing the "
"validation authority type."
)
if self.is_tag_next(
enums.Tags.VALIDATION_AUTHORITY_COUNTRY,
local_buffer
):
validation_authority_country = primitives.TextString(
tag=enums.Tags.VALIDATION_AUTHORITY_COUNTRY
)
validation_authority_country.read(
local_buffer,
kmip_version=kmip_version
)
self._validation_authority_country = validation_authority_country
if self.is_tag_next(enums.Tags.VALIDATION_AUTHORITY_URI, local_buffer):
validation_authority_uri = primitives.TextString(
tag=enums.Tags.VALIDATION_AUTHORITY_URI
)
validation_authority_uri.read(
local_buffer,
kmip_version=kmip_version
)
self._validation_authority_uri = validation_authority_uri
if self.is_tag_next(
enums.Tags.VALIDATION_VERSION_MAJOR,
local_buffer
):
validation_version_major = primitives.Integer(
tag=enums.Tags.VALIDATION_VERSION_MAJOR
)
validation_version_major.read(
local_buffer,
kmip_version=kmip_version
)
self._validation_version_major = validation_version_major
else:
raise exceptions.InvalidKmipEncoding(
"The ValidationInformation encoding is missing the "
"validation version major."
)
if self.is_tag_next(
enums.Tags.VALIDATION_VERSION_MINOR,
local_buffer
):
validation_version_minor = primitives.Integer(
tag=enums.Tags.VALIDATION_VERSION_MINOR
)
validation_version_minor.read(
local_buffer,
kmip_version=kmip_version
)
self._validation_version_minor = validation_version_minor
if self.is_tag_next(enums.Tags.VALIDATION_TYPE, local_buffer):
validation_type = primitives.Enumeration(
enums.ValidationType,
tag=enums.Tags.VALIDATION_TYPE
)
validation_type.read(
local_buffer,
kmip_version=kmip_version
)
self._validation_type = validation_type
else:
raise exceptions.InvalidKmipEncoding(
"The ValidationInformation encoding is missing the "
"validation type."
)
if self.is_tag_next(enums.Tags.VALIDATION_LEVEL, local_buffer):
validation_level = primitives.Integer(
tag=enums.Tags.VALIDATION_LEVEL
)
validation_level.read(local_buffer, kmip_version=kmip_version)
self._validation_level = validation_level
else:
raise exceptions.InvalidKmipEncoding(
"The ValidationInformation encoding is missing the "
"validation level."
)
if self.is_tag_next(
enums.Tags.VALIDATION_CERTIFICATE_IDENTIFIER,
local_buffer
):
validation_certificate_identifier = primitives.TextString(
tag=enums.Tags.VALIDATION_CERTIFICATE_IDENTIFIER
)
validation_certificate_identifier.read(
local_buffer,
kmip_version=kmip_version
)
self._validation_certificate_identifier = \
validation_certificate_identifier
if self.is_tag_next(
enums.Tags.VALIDATION_CERTIFICATE_URI,
local_buffer
):
validation_certificate_uri = primitives.TextString(
tag=enums.Tags.VALIDATION_CERTIFICATE_URI
)
validation_certificate_uri.read(
local_buffer,
kmip_version=kmip_version
)
self._validation_certificate_uri = validation_certificate_uri
if self.is_tag_next(enums.Tags.VALIDATION_VENDOR_URI, local_buffer):
validation_vendor_uri = primitives.TextString(
tag=enums.Tags.VALIDATION_VENDOR_URI
)
validation_vendor_uri.read(local_buffer, kmip_version=kmip_version)
self._validation_vendor_uri = validation_vendor_uri
validation_profiles = []
while self.is_tag_next(enums.Tags.VALIDATION_PROFILE, local_buffer):
validation_profile = primitives.TextString(
tag=enums.Tags.VALIDATION_PROFILE
)
validation_profile.read(local_buffer, kmip_version=kmip_version)
validation_profiles.append(validation_profile)
self._validation_profiles = validation_profiles
self.is_oversized(local_buffer) | def function[read, parameter[self, input_buffer, kmip_version]]:
constant[
Read the data encoding the ValidationInformation structure and decode
it into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the validation authority type,
validation version major, validation type, and/or validation
level are missing from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the ValidationInformation structure.
]
if compare[name[kmip_version] less[<] name[enums].KMIPVersion.KMIP_1_3] begin[:]
<ast.Raise object at 0x7da18fe93fa0>
call[call[name[super], parameter[name[ValidationInformation], name[self]]].read, parameter[name[input_buffer]]]
variable[local_buffer] assign[=] call[name[utils].BytearrayStream, parameter[call[name[input_buffer].read, parameter[name[self].length]]]]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_AUTHORITY_TYPE, name[local_buffer]]] begin[:]
variable[validation_authority_type] assign[=] call[name[primitives].Enumeration, parameter[name[enums].ValidationAuthorityType]]
call[name[validation_authority_type].read, parameter[name[local_buffer]]]
name[self]._validation_authority_type assign[=] name[validation_authority_type]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_AUTHORITY_COUNTRY, name[local_buffer]]] begin[:]
variable[validation_authority_country] assign[=] call[name[primitives].TextString, parameter[]]
call[name[validation_authority_country].read, parameter[name[local_buffer]]]
name[self]._validation_authority_country assign[=] name[validation_authority_country]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_AUTHORITY_URI, name[local_buffer]]] begin[:]
variable[validation_authority_uri] assign[=] call[name[primitives].TextString, parameter[]]
call[name[validation_authority_uri].read, parameter[name[local_buffer]]]
name[self]._validation_authority_uri assign[=] name[validation_authority_uri]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_VERSION_MAJOR, name[local_buffer]]] begin[:]
variable[validation_version_major] assign[=] call[name[primitives].Integer, parameter[]]
call[name[validation_version_major].read, parameter[name[local_buffer]]]
name[self]._validation_version_major assign[=] name[validation_version_major]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_VERSION_MINOR, name[local_buffer]]] begin[:]
variable[validation_version_minor] assign[=] call[name[primitives].Integer, parameter[]]
call[name[validation_version_minor].read, parameter[name[local_buffer]]]
name[self]._validation_version_minor assign[=] name[validation_version_minor]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_TYPE, name[local_buffer]]] begin[:]
variable[validation_type] assign[=] call[name[primitives].Enumeration, parameter[name[enums].ValidationType]]
call[name[validation_type].read, parameter[name[local_buffer]]]
name[self]._validation_type assign[=] name[validation_type]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_LEVEL, name[local_buffer]]] begin[:]
variable[validation_level] assign[=] call[name[primitives].Integer, parameter[]]
call[name[validation_level].read, parameter[name[local_buffer]]]
name[self]._validation_level assign[=] name[validation_level]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_CERTIFICATE_IDENTIFIER, name[local_buffer]]] begin[:]
variable[validation_certificate_identifier] assign[=] call[name[primitives].TextString, parameter[]]
call[name[validation_certificate_identifier].read, parameter[name[local_buffer]]]
name[self]._validation_certificate_identifier assign[=] name[validation_certificate_identifier]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_CERTIFICATE_URI, name[local_buffer]]] begin[:]
variable[validation_certificate_uri] assign[=] call[name[primitives].TextString, parameter[]]
call[name[validation_certificate_uri].read, parameter[name[local_buffer]]]
name[self]._validation_certificate_uri assign[=] name[validation_certificate_uri]
if call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_VENDOR_URI, name[local_buffer]]] begin[:]
variable[validation_vendor_uri] assign[=] call[name[primitives].TextString, parameter[]]
call[name[validation_vendor_uri].read, parameter[name[local_buffer]]]
name[self]._validation_vendor_uri assign[=] name[validation_vendor_uri]
variable[validation_profiles] assign[=] list[[]]
while call[name[self].is_tag_next, parameter[name[enums].Tags.VALIDATION_PROFILE, name[local_buffer]]] begin[:]
variable[validation_profile] assign[=] call[name[primitives].TextString, parameter[]]
call[name[validation_profile].read, parameter[name[local_buffer]]]
call[name[validation_profiles].append, parameter[name[validation_profile]]]
name[self]._validation_profiles assign[=] name[validation_profiles]
call[name[self].is_oversized, parameter[name[local_buffer]]] | keyword[def] identifier[read] ( identifier[self] , identifier[input_buffer] , identifier[kmip_version] = identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_1_3] ):
literal[string]
keyword[if] identifier[kmip_version] < identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_1_3] :
keyword[raise] identifier[exceptions] . identifier[VersionNotSupported] (
literal[string]
literal[string] . identifier[format] (
identifier[kmip_version] . identifier[value]
)
)
identifier[super] ( identifier[ValidationInformation] , identifier[self] ). identifier[read] (
identifier[input_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[local_buffer] = identifier[utils] . identifier[BytearrayStream] ( identifier[input_buffer] . identifier[read] ( identifier[self] . identifier[length] ))
keyword[if] identifier[self] . identifier[is_tag_next] (
identifier[enums] . identifier[Tags] . identifier[VALIDATION_AUTHORITY_TYPE] ,
identifier[local_buffer]
):
identifier[validation_authority_type] = identifier[primitives] . identifier[Enumeration] (
identifier[enums] . identifier[ValidationAuthorityType] ,
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_AUTHORITY_TYPE]
)
identifier[validation_authority_type] . identifier[read] (
identifier[local_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[_validation_authority_type] = identifier[validation_authority_type]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[InvalidKmipEncoding] (
literal[string]
literal[string]
)
keyword[if] identifier[self] . identifier[is_tag_next] (
identifier[enums] . identifier[Tags] . identifier[VALIDATION_AUTHORITY_COUNTRY] ,
identifier[local_buffer]
):
identifier[validation_authority_country] = identifier[primitives] . identifier[TextString] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_AUTHORITY_COUNTRY]
)
identifier[validation_authority_country] . identifier[read] (
identifier[local_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[_validation_authority_country] = identifier[validation_authority_country]
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[VALIDATION_AUTHORITY_URI] , identifier[local_buffer] ):
identifier[validation_authority_uri] = identifier[primitives] . identifier[TextString] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_AUTHORITY_URI]
)
identifier[validation_authority_uri] . identifier[read] (
identifier[local_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[_validation_authority_uri] = identifier[validation_authority_uri]
keyword[if] identifier[self] . identifier[is_tag_next] (
identifier[enums] . identifier[Tags] . identifier[VALIDATION_VERSION_MAJOR] ,
identifier[local_buffer]
):
identifier[validation_version_major] = identifier[primitives] . identifier[Integer] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_VERSION_MAJOR]
)
identifier[validation_version_major] . identifier[read] (
identifier[local_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[_validation_version_major] = identifier[validation_version_major]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[InvalidKmipEncoding] (
literal[string]
literal[string]
)
keyword[if] identifier[self] . identifier[is_tag_next] (
identifier[enums] . identifier[Tags] . identifier[VALIDATION_VERSION_MINOR] ,
identifier[local_buffer]
):
identifier[validation_version_minor] = identifier[primitives] . identifier[Integer] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_VERSION_MINOR]
)
identifier[validation_version_minor] . identifier[read] (
identifier[local_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[_validation_version_minor] = identifier[validation_version_minor]
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[VALIDATION_TYPE] , identifier[local_buffer] ):
identifier[validation_type] = identifier[primitives] . identifier[Enumeration] (
identifier[enums] . identifier[ValidationType] ,
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_TYPE]
)
identifier[validation_type] . identifier[read] (
identifier[local_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[_validation_type] = identifier[validation_type]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[InvalidKmipEncoding] (
literal[string]
literal[string]
)
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[VALIDATION_LEVEL] , identifier[local_buffer] ):
identifier[validation_level] = identifier[primitives] . identifier[Integer] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_LEVEL]
)
identifier[validation_level] . identifier[read] ( identifier[local_buffer] , identifier[kmip_version] = identifier[kmip_version] )
identifier[self] . identifier[_validation_level] = identifier[validation_level]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[InvalidKmipEncoding] (
literal[string]
literal[string]
)
keyword[if] identifier[self] . identifier[is_tag_next] (
identifier[enums] . identifier[Tags] . identifier[VALIDATION_CERTIFICATE_IDENTIFIER] ,
identifier[local_buffer]
):
identifier[validation_certificate_identifier] = identifier[primitives] . identifier[TextString] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_CERTIFICATE_IDENTIFIER]
)
identifier[validation_certificate_identifier] . identifier[read] (
identifier[local_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[_validation_certificate_identifier] = identifier[validation_certificate_identifier]
keyword[if] identifier[self] . identifier[is_tag_next] (
identifier[enums] . identifier[Tags] . identifier[VALIDATION_CERTIFICATE_URI] ,
identifier[local_buffer]
):
identifier[validation_certificate_uri] = identifier[primitives] . identifier[TextString] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_CERTIFICATE_URI]
)
identifier[validation_certificate_uri] . identifier[read] (
identifier[local_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[self] . identifier[_validation_certificate_uri] = identifier[validation_certificate_uri]
keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[VALIDATION_VENDOR_URI] , identifier[local_buffer] ):
identifier[validation_vendor_uri] = identifier[primitives] . identifier[TextString] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_VENDOR_URI]
)
identifier[validation_vendor_uri] . identifier[read] ( identifier[local_buffer] , identifier[kmip_version] = identifier[kmip_version] )
identifier[self] . identifier[_validation_vendor_uri] = identifier[validation_vendor_uri]
identifier[validation_profiles] =[]
keyword[while] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[VALIDATION_PROFILE] , identifier[local_buffer] ):
identifier[validation_profile] = identifier[primitives] . identifier[TextString] (
identifier[tag] = identifier[enums] . identifier[Tags] . identifier[VALIDATION_PROFILE]
)
identifier[validation_profile] . identifier[read] ( identifier[local_buffer] , identifier[kmip_version] = identifier[kmip_version] )
identifier[validation_profiles] . identifier[append] ( identifier[validation_profile] )
identifier[self] . identifier[_validation_profiles] = identifier[validation_profiles]
identifier[self] . identifier[is_oversized] ( identifier[local_buffer] ) | def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):
"""
Read the data encoding the ValidationInformation structure and decode
it into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the validation authority type,
validation version major, validation type, and/or validation
level are missing from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the ValidationInformation structure.
"""
if kmip_version < enums.KMIPVersion.KMIP_1_3:
raise exceptions.VersionNotSupported('KMIP {} does not support the ValidationInformation object.'.format(kmip_version.value)) # depends on [control=['if'], data=['kmip_version']]
super(ValidationInformation, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.VALIDATION_AUTHORITY_TYPE, local_buffer):
validation_authority_type = primitives.Enumeration(enums.ValidationAuthorityType, tag=enums.Tags.VALIDATION_AUTHORITY_TYPE)
validation_authority_type.read(local_buffer, kmip_version=kmip_version)
self._validation_authority_type = validation_authority_type # depends on [control=['if'], data=[]]
else:
raise exceptions.InvalidKmipEncoding('The ValidationInformation encoding is missing the validation authority type.')
if self.is_tag_next(enums.Tags.VALIDATION_AUTHORITY_COUNTRY, local_buffer):
validation_authority_country = primitives.TextString(tag=enums.Tags.VALIDATION_AUTHORITY_COUNTRY)
validation_authority_country.read(local_buffer, kmip_version=kmip_version)
self._validation_authority_country = validation_authority_country # depends on [control=['if'], data=[]]
if self.is_tag_next(enums.Tags.VALIDATION_AUTHORITY_URI, local_buffer):
validation_authority_uri = primitives.TextString(tag=enums.Tags.VALIDATION_AUTHORITY_URI)
validation_authority_uri.read(local_buffer, kmip_version=kmip_version)
self._validation_authority_uri = validation_authority_uri # depends on [control=['if'], data=[]]
if self.is_tag_next(enums.Tags.VALIDATION_VERSION_MAJOR, local_buffer):
validation_version_major = primitives.Integer(tag=enums.Tags.VALIDATION_VERSION_MAJOR)
validation_version_major.read(local_buffer, kmip_version=kmip_version)
self._validation_version_major = validation_version_major # depends on [control=['if'], data=[]]
else:
raise exceptions.InvalidKmipEncoding('The ValidationInformation encoding is missing the validation version major.')
if self.is_tag_next(enums.Tags.VALIDATION_VERSION_MINOR, local_buffer):
validation_version_minor = primitives.Integer(tag=enums.Tags.VALIDATION_VERSION_MINOR)
validation_version_minor.read(local_buffer, kmip_version=kmip_version)
self._validation_version_minor = validation_version_minor # depends on [control=['if'], data=[]]
if self.is_tag_next(enums.Tags.VALIDATION_TYPE, local_buffer):
validation_type = primitives.Enumeration(enums.ValidationType, tag=enums.Tags.VALIDATION_TYPE)
validation_type.read(local_buffer, kmip_version=kmip_version)
self._validation_type = validation_type # depends on [control=['if'], data=[]]
else:
raise exceptions.InvalidKmipEncoding('The ValidationInformation encoding is missing the validation type.')
if self.is_tag_next(enums.Tags.VALIDATION_LEVEL, local_buffer):
validation_level = primitives.Integer(tag=enums.Tags.VALIDATION_LEVEL)
validation_level.read(local_buffer, kmip_version=kmip_version)
self._validation_level = validation_level # depends on [control=['if'], data=[]]
else:
raise exceptions.InvalidKmipEncoding('The ValidationInformation encoding is missing the validation level.')
if self.is_tag_next(enums.Tags.VALIDATION_CERTIFICATE_IDENTIFIER, local_buffer):
validation_certificate_identifier = primitives.TextString(tag=enums.Tags.VALIDATION_CERTIFICATE_IDENTIFIER)
validation_certificate_identifier.read(local_buffer, kmip_version=kmip_version)
self._validation_certificate_identifier = validation_certificate_identifier # depends on [control=['if'], data=[]]
if self.is_tag_next(enums.Tags.VALIDATION_CERTIFICATE_URI, local_buffer):
validation_certificate_uri = primitives.TextString(tag=enums.Tags.VALIDATION_CERTIFICATE_URI)
validation_certificate_uri.read(local_buffer, kmip_version=kmip_version)
self._validation_certificate_uri = validation_certificate_uri # depends on [control=['if'], data=[]]
if self.is_tag_next(enums.Tags.VALIDATION_VENDOR_URI, local_buffer):
validation_vendor_uri = primitives.TextString(tag=enums.Tags.VALIDATION_VENDOR_URI)
validation_vendor_uri.read(local_buffer, kmip_version=kmip_version)
self._validation_vendor_uri = validation_vendor_uri # depends on [control=['if'], data=[]]
validation_profiles = []
while self.is_tag_next(enums.Tags.VALIDATION_PROFILE, local_buffer):
validation_profile = primitives.TextString(tag=enums.Tags.VALIDATION_PROFILE)
validation_profile.read(local_buffer, kmip_version=kmip_version)
validation_profiles.append(validation_profile) # depends on [control=['while'], data=[]]
self._validation_profiles = validation_profiles
self.is_oversized(local_buffer) |
def update_gradients_full(self, dL_dK, X, X2=None):
"""Derivative of the covariance with respect to the parameters."""
dvar, dw, db = self._comp_grads(dL_dK, X, X2)[:3]
self.variance.gradient = dvar
self.weight_variance.gradient = dw
self.bias_variance.gradient = db | def function[update_gradients_full, parameter[self, dL_dK, X, X2]]:
constant[Derivative of the covariance with respect to the parameters.]
<ast.Tuple object at 0x7da1b1bbd420> assign[=] call[call[name[self]._comp_grads, parameter[name[dL_dK], name[X], name[X2]]]][<ast.Slice object at 0x7da1b1bbe620>]
name[self].variance.gradient assign[=] name[dvar]
name[self].weight_variance.gradient assign[=] name[dw]
name[self].bias_variance.gradient assign[=] name[db] | keyword[def] identifier[update_gradients_full] ( identifier[self] , identifier[dL_dK] , identifier[X] , identifier[X2] = keyword[None] ):
literal[string]
identifier[dvar] , identifier[dw] , identifier[db] = identifier[self] . identifier[_comp_grads] ( identifier[dL_dK] , identifier[X] , identifier[X2] )[: literal[int] ]
identifier[self] . identifier[variance] . identifier[gradient] = identifier[dvar]
identifier[self] . identifier[weight_variance] . identifier[gradient] = identifier[dw]
identifier[self] . identifier[bias_variance] . identifier[gradient] = identifier[db] | def update_gradients_full(self, dL_dK, X, X2=None):
"""Derivative of the covariance with respect to the parameters."""
(dvar, dw, db) = self._comp_grads(dL_dK, X, X2)[:3]
self.variance.gradient = dvar
self.weight_variance.gradient = dw
self.bias_variance.gradient = db |
def validate_json_field(dist, attr, value):
"""
Check for json validity.
"""
try:
is_json_compat(value)
except ValueError as e:
raise DistutilsSetupError("%r %s" % (attr, e))
return True | def function[validate_json_field, parameter[dist, attr, value]]:
constant[
Check for json validity.
]
<ast.Try object at 0x7da1b195f1c0>
return[constant[True]] | keyword[def] identifier[validate_json_field] ( identifier[dist] , identifier[attr] , identifier[value] ):
literal[string]
keyword[try] :
identifier[is_json_compat] ( identifier[value] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
keyword[raise] identifier[DistutilsSetupError] ( literal[string] %( identifier[attr] , identifier[e] ))
keyword[return] keyword[True] | def validate_json_field(dist, attr, value):
"""
Check for json validity.
"""
try:
is_json_compat(value) # depends on [control=['try'], data=[]]
except ValueError as e:
raise DistutilsSetupError('%r %s' % (attr, e)) # depends on [control=['except'], data=['e']]
return True |
def _infer_all_types(self):
'''
Infer all variables' shapes in the computational graph.
'''
self._initialize_graph_status_for_traversing()
# Deliver user-specified types to root variables
for raw_name, initial_type in self.initial_types:
# Check all variables declared using raw_name in the whole graph
for scope in self.scopes:
# Skip scopes without having the considered variable name
if raw_name not in scope.variable_name_mapping:
continue
# Assign initial_type to all variables declared using raw_name
for onnx_name in scope.variable_name_mapping[raw_name]:
variable = scope.variables[onnx_name]
if variable.is_root:
# Assign type to the root; existing type produced by parser may be overwritten
variable.type = initial_type
# Traverse the graph from roots to leaves
for operator in self.topological_operator_iterator():
if operator.type in self.custom_shape_calculators:
self.custom_shape_calculators[operator.type](operator)
elif operator.type in self.custom_conversion_functions:
pass # in Keras converter, the shape calculator can be optional.
else:
operator.infer_types() | def function[_infer_all_types, parameter[self]]:
constant[
Infer all variables' shapes in the computational graph.
]
call[name[self]._initialize_graph_status_for_traversing, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c6a98d0>, <ast.Name object at 0x7da20c6aa050>]]] in starred[name[self].initial_types] begin[:]
for taget[name[scope]] in starred[name[self].scopes] begin[:]
if compare[name[raw_name] <ast.NotIn object at 0x7da2590d7190> name[scope].variable_name_mapping] begin[:]
continue
for taget[name[onnx_name]] in starred[call[name[scope].variable_name_mapping][name[raw_name]]] begin[:]
variable[variable] assign[=] call[name[scope].variables][name[onnx_name]]
if name[variable].is_root begin[:]
name[variable].type assign[=] name[initial_type]
for taget[name[operator]] in starred[call[name[self].topological_operator_iterator, parameter[]]] begin[:]
if compare[name[operator].type in name[self].custom_shape_calculators] begin[:]
call[call[name[self].custom_shape_calculators][name[operator].type], parameter[name[operator]]] | keyword[def] identifier[_infer_all_types] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_initialize_graph_status_for_traversing] ()
keyword[for] identifier[raw_name] , identifier[initial_type] keyword[in] identifier[self] . identifier[initial_types] :
keyword[for] identifier[scope] keyword[in] identifier[self] . identifier[scopes] :
keyword[if] identifier[raw_name] keyword[not] keyword[in] identifier[scope] . identifier[variable_name_mapping] :
keyword[continue]
keyword[for] identifier[onnx_name] keyword[in] identifier[scope] . identifier[variable_name_mapping] [ identifier[raw_name] ]:
identifier[variable] = identifier[scope] . identifier[variables] [ identifier[onnx_name] ]
keyword[if] identifier[variable] . identifier[is_root] :
identifier[variable] . identifier[type] = identifier[initial_type]
keyword[for] identifier[operator] keyword[in] identifier[self] . identifier[topological_operator_iterator] ():
keyword[if] identifier[operator] . identifier[type] keyword[in] identifier[self] . identifier[custom_shape_calculators] :
identifier[self] . identifier[custom_shape_calculators] [ identifier[operator] . identifier[type] ]( identifier[operator] )
keyword[elif] identifier[operator] . identifier[type] keyword[in] identifier[self] . identifier[custom_conversion_functions] :
keyword[pass]
keyword[else] :
identifier[operator] . identifier[infer_types] () | def _infer_all_types(self):
"""
Infer all variables' shapes in the computational graph.
"""
self._initialize_graph_status_for_traversing()
# Deliver user-specified types to root variables
for (raw_name, initial_type) in self.initial_types:
# Check all variables declared using raw_name in the whole graph
for scope in self.scopes:
# Skip scopes without having the considered variable name
if raw_name not in scope.variable_name_mapping:
continue # depends on [control=['if'], data=[]]
# Assign initial_type to all variables declared using raw_name
for onnx_name in scope.variable_name_mapping[raw_name]:
variable = scope.variables[onnx_name]
if variable.is_root:
# Assign type to the root; existing type produced by parser may be overwritten
variable.type = initial_type # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['onnx_name']] # depends on [control=['for'], data=['scope']] # depends on [control=['for'], data=[]]
# Traverse the graph from roots to leaves
for operator in self.topological_operator_iterator():
if operator.type in self.custom_shape_calculators:
self.custom_shape_calculators[operator.type](operator) # depends on [control=['if'], data=[]]
elif operator.type in self.custom_conversion_functions:
pass # in Keras converter, the shape calculator can be optional. # depends on [control=['if'], data=[]]
else:
operator.infer_types() # depends on [control=['for'], data=['operator']] |
def from_json_file(cls, json_file):
"""Constructs a `GPT2Config` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text)) | def function[from_json_file, parameter[cls, json_file]]:
constant[Constructs a `GPT2Config` from a json file of parameters.]
with call[name[open], parameter[name[json_file], constant[r]]] begin[:]
variable[text] assign[=] call[name[reader].read, parameter[]]
return[call[name[cls].from_dict, parameter[call[name[json].loads, parameter[name[text]]]]]] | keyword[def] identifier[from_json_file] ( identifier[cls] , identifier[json_file] ):
literal[string]
keyword[with] identifier[open] ( identifier[json_file] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[reader] :
identifier[text] = identifier[reader] . identifier[read] ()
keyword[return] identifier[cls] . identifier[from_dict] ( identifier[json] . identifier[loads] ( identifier[text] )) | def from_json_file(cls, json_file):
"""Constructs a `GPT2Config` from a json file of parameters."""
with open(json_file, 'r', encoding='utf-8') as reader:
text = reader.read() # depends on [control=['with'], data=['reader']]
return cls.from_dict(json.loads(text)) |
def _prepare_audio(self, basename, replace_already_indexed=False):
"""
Prepares and stages the audio file to be indexed.
Parameters
----------
basename : str, None
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
If basename is `None`, it'll prepare all the audio files.
"""
if basename is not None:
if basename in self.get_timestamps():
if self.get_verbosity():
print("File specified was already indexed. Reindexing...")
del self.__timestamps[basename]
self._filtering_step(basename)
self._staging_step(basename)
else:
for audio_basename in self._list_audio_files():
if audio_basename in self.__timestamps:
if replace_already_indexed:
if self.get_verbosity():
print("Already indexed {}. Reindexing...".format(
audio_basename))
del self.__timestamps[audio_basename]
else:
if self.get_verbosity():
print("Already indexed {}. Skipping...".format(
audio_basename))
continue
self._filtering_step(audio_basename)
self._staging_step(audio_basename) | def function[_prepare_audio, parameter[self, basename, replace_already_indexed]]:
constant[
Prepares and stages the audio file to be indexed.
Parameters
----------
basename : str, None
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
If basename is `None`, it'll prepare all the audio files.
]
if compare[name[basename] is_not constant[None]] begin[:]
if compare[name[basename] in call[name[self].get_timestamps, parameter[]]] begin[:]
if call[name[self].get_verbosity, parameter[]] begin[:]
call[name[print], parameter[constant[File specified was already indexed. Reindexing...]]]
<ast.Delete object at 0x7da1b0472c80>
call[name[self]._filtering_step, parameter[name[basename]]]
call[name[self]._staging_step, parameter[name[basename]]] | keyword[def] identifier[_prepare_audio] ( identifier[self] , identifier[basename] , identifier[replace_already_indexed] = keyword[False] ):
literal[string]
keyword[if] identifier[basename] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[basename] keyword[in] identifier[self] . identifier[get_timestamps] ():
keyword[if] identifier[self] . identifier[get_verbosity] ():
identifier[print] ( literal[string] )
keyword[del] identifier[self] . identifier[__timestamps] [ identifier[basename] ]
identifier[self] . identifier[_filtering_step] ( identifier[basename] )
identifier[self] . identifier[_staging_step] ( identifier[basename] )
keyword[else] :
keyword[for] identifier[audio_basename] keyword[in] identifier[self] . identifier[_list_audio_files] ():
keyword[if] identifier[audio_basename] keyword[in] identifier[self] . identifier[__timestamps] :
keyword[if] identifier[replace_already_indexed] :
keyword[if] identifier[self] . identifier[get_verbosity] ():
identifier[print] ( literal[string] . identifier[format] (
identifier[audio_basename] ))
keyword[del] identifier[self] . identifier[__timestamps] [ identifier[audio_basename] ]
keyword[else] :
keyword[if] identifier[self] . identifier[get_verbosity] ():
identifier[print] ( literal[string] . identifier[format] (
identifier[audio_basename] ))
keyword[continue]
identifier[self] . identifier[_filtering_step] ( identifier[audio_basename] )
identifier[self] . identifier[_staging_step] ( identifier[audio_basename] ) | def _prepare_audio(self, basename, replace_already_indexed=False):
"""
Prepares and stages the audio file to be indexed.
Parameters
----------
basename : str, None
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
If basename is `None`, it'll prepare all the audio files.
"""
if basename is not None:
if basename in self.get_timestamps():
if self.get_verbosity():
print('File specified was already indexed. Reindexing...') # depends on [control=['if'], data=[]]
del self.__timestamps[basename] # depends on [control=['if'], data=['basename']]
self._filtering_step(basename)
self._staging_step(basename) # depends on [control=['if'], data=['basename']]
else:
for audio_basename in self._list_audio_files():
if audio_basename in self.__timestamps:
if replace_already_indexed:
if self.get_verbosity():
print('Already indexed {}. Reindexing...'.format(audio_basename)) # depends on [control=['if'], data=[]]
del self.__timestamps[audio_basename] # depends on [control=['if'], data=[]]
else:
if self.get_verbosity():
print('Already indexed {}. Skipping...'.format(audio_basename)) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=['audio_basename']]
self._filtering_step(audio_basename)
self._staging_step(audio_basename) # depends on [control=['for'], data=['audio_basename']] |
def clear( self ):
"""
Clears out all of the rollout items from the widget.
"""
self.blockSignals(True)
self.setUpdatesEnabled(False)
for child in self.findChildren(XRolloutItem):
child.setParent(None)
child.deleteLater()
self.setUpdatesEnabled(True)
self.blockSignals(False) | def function[clear, parameter[self]]:
constant[
Clears out all of the rollout items from the widget.
]
call[name[self].blockSignals, parameter[constant[True]]]
call[name[self].setUpdatesEnabled, parameter[constant[False]]]
for taget[name[child]] in starred[call[name[self].findChildren, parameter[name[XRolloutItem]]]] begin[:]
call[name[child].setParent, parameter[constant[None]]]
call[name[child].deleteLater, parameter[]]
call[name[self].setUpdatesEnabled, parameter[constant[True]]]
call[name[self].blockSignals, parameter[constant[False]]] | keyword[def] identifier[clear] ( identifier[self] ):
literal[string]
identifier[self] . identifier[blockSignals] ( keyword[True] )
identifier[self] . identifier[setUpdatesEnabled] ( keyword[False] )
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[findChildren] ( identifier[XRolloutItem] ):
identifier[child] . identifier[setParent] ( keyword[None] )
identifier[child] . identifier[deleteLater] ()
identifier[self] . identifier[setUpdatesEnabled] ( keyword[True] )
identifier[self] . identifier[blockSignals] ( keyword[False] ) | def clear(self):
"""
Clears out all of the rollout items from the widget.
"""
self.blockSignals(True)
self.setUpdatesEnabled(False)
for child in self.findChildren(XRolloutItem):
child.setParent(None)
child.deleteLater() # depends on [control=['for'], data=['child']]
self.setUpdatesEnabled(True)
self.blockSignals(False) |
def irrad_frac(b, component, solve_for=None, **kwargs):
"""
Create a constraint to ensure that energy is conserved and all incident
light is accounted for.
"""
comp_ps = b.get_component(component=component)
irrad_frac_refl_bol = comp_ps.get_parameter(qualifier='irrad_frac_refl_bol')
irrad_frac_lost_bol = comp_ps.get_parameter(qualifier='irrad_frac_lost_bol')
if solve_for in [irrad_frac_lost_bol, None]:
lhs = irrad_frac_lost_bol
rhs = 1.0 - irrad_frac_refl_bol
elif solve_for in [irrad_frac_refl_bol]:
lhs = irrad_frac_refl_bol
rhs = 1.0 - irrad_frac_lost_bol
else:
raise NotImplementedError
return lhs, rhs, {'component': component} | def function[irrad_frac, parameter[b, component, solve_for]]:
constant[
Create a constraint to ensure that energy is conserved and all incident
light is accounted for.
]
variable[comp_ps] assign[=] call[name[b].get_component, parameter[]]
variable[irrad_frac_refl_bol] assign[=] call[name[comp_ps].get_parameter, parameter[]]
variable[irrad_frac_lost_bol] assign[=] call[name[comp_ps].get_parameter, parameter[]]
if compare[name[solve_for] in list[[<ast.Name object at 0x7da18f811db0>, <ast.Constant object at 0x7da18f8130a0>]]] begin[:]
variable[lhs] assign[=] name[irrad_frac_lost_bol]
variable[rhs] assign[=] binary_operation[constant[1.0] - name[irrad_frac_refl_bol]]
return[tuple[[<ast.Name object at 0x7da18eb553c0>, <ast.Name object at 0x7da18eb563e0>, <ast.Dict object at 0x7da18eb56440>]]] | keyword[def] identifier[irrad_frac] ( identifier[b] , identifier[component] , identifier[solve_for] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[comp_ps] = identifier[b] . identifier[get_component] ( identifier[component] = identifier[component] )
identifier[irrad_frac_refl_bol] = identifier[comp_ps] . identifier[get_parameter] ( identifier[qualifier] = literal[string] )
identifier[irrad_frac_lost_bol] = identifier[comp_ps] . identifier[get_parameter] ( identifier[qualifier] = literal[string] )
keyword[if] identifier[solve_for] keyword[in] [ identifier[irrad_frac_lost_bol] , keyword[None] ]:
identifier[lhs] = identifier[irrad_frac_lost_bol]
identifier[rhs] = literal[int] - identifier[irrad_frac_refl_bol]
keyword[elif] identifier[solve_for] keyword[in] [ identifier[irrad_frac_refl_bol] ]:
identifier[lhs] = identifier[irrad_frac_refl_bol]
identifier[rhs] = literal[int] - identifier[irrad_frac_lost_bol]
keyword[else] :
keyword[raise] identifier[NotImplementedError]
keyword[return] identifier[lhs] , identifier[rhs] ,{ literal[string] : identifier[component] } | def irrad_frac(b, component, solve_for=None, **kwargs):
"""
Create a constraint to ensure that energy is conserved and all incident
light is accounted for.
"""
comp_ps = b.get_component(component=component)
irrad_frac_refl_bol = comp_ps.get_parameter(qualifier='irrad_frac_refl_bol')
irrad_frac_lost_bol = comp_ps.get_parameter(qualifier='irrad_frac_lost_bol')
if solve_for in [irrad_frac_lost_bol, None]:
lhs = irrad_frac_lost_bol
rhs = 1.0 - irrad_frac_refl_bol # depends on [control=['if'], data=[]]
elif solve_for in [irrad_frac_refl_bol]:
lhs = irrad_frac_refl_bol
rhs = 1.0 - irrad_frac_lost_bol # depends on [control=['if'], data=[]]
else:
raise NotImplementedError
return (lhs, rhs, {'component': component}) |
def delete_agent_profile(self, profile):
"""Delete agent profile doc from LRS
:param profile: Agent profile document to be deleted
:type profile: :class:`tincan.documents.agent_profile_document.AgentProfileDocument`
:return: LRS Response object
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
request = HTTPRequest(
method="DELETE",
resource="agents/profile"
)
request.query_params = {
"profileId": profile.id,
"agent": profile.agent.to_json(self.version)
}
if profile.etag is not None:
request.headers["If-Match"] = profile.etag
return self._send_request(request) | def function[delete_agent_profile, parameter[self, profile]]:
constant[Delete agent profile doc from LRS
:param profile: Agent profile document to be deleted
:type profile: :class:`tincan.documents.agent_profile_document.AgentProfileDocument`
:return: LRS Response object
:rtype: :class:`tincan.lrs_response.LRSResponse`
]
variable[request] assign[=] call[name[HTTPRequest], parameter[]]
name[request].query_params assign[=] dictionary[[<ast.Constant object at 0x7da1b0c65600>, <ast.Constant object at 0x7da1b0c645b0>], [<ast.Attribute object at 0x7da1b0c66920>, <ast.Call object at 0x7da1b0c66f20>]]
if compare[name[profile].etag is_not constant[None]] begin[:]
call[name[request].headers][constant[If-Match]] assign[=] name[profile].etag
return[call[name[self]._send_request, parameter[name[request]]]] | keyword[def] identifier[delete_agent_profile] ( identifier[self] , identifier[profile] ):
literal[string]
identifier[request] = identifier[HTTPRequest] (
identifier[method] = literal[string] ,
identifier[resource] = literal[string]
)
identifier[request] . identifier[query_params] ={
literal[string] : identifier[profile] . identifier[id] ,
literal[string] : identifier[profile] . identifier[agent] . identifier[to_json] ( identifier[self] . identifier[version] )
}
keyword[if] identifier[profile] . identifier[etag] keyword[is] keyword[not] keyword[None] :
identifier[request] . identifier[headers] [ literal[string] ]= identifier[profile] . identifier[etag]
keyword[return] identifier[self] . identifier[_send_request] ( identifier[request] ) | def delete_agent_profile(self, profile):
"""Delete agent profile doc from LRS
:param profile: Agent profile document to be deleted
:type profile: :class:`tincan.documents.agent_profile_document.AgentProfileDocument`
:return: LRS Response object
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
request = HTTPRequest(method='DELETE', resource='agents/profile')
request.query_params = {'profileId': profile.id, 'agent': profile.agent.to_json(self.version)}
if profile.etag is not None:
request.headers['If-Match'] = profile.etag # depends on [control=['if'], data=[]]
return self._send_request(request) |
def reduce_max(x,
disable_positional_args=None,
output_shape=None,
reduced_dim=None,
name=None):
"""Reduction on 1 or more axes.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: an optional Dimension
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
assert disable_positional_args is None
output_shape = _reduction_output_shape(x, output_shape, reduced_dim)
if output_shape is None:
output_shape = Shape([])
if output_shape == x.shape:
return x
return ReduceOperation(
x, output_shape, "MAX", name=name or "reduce_max").outputs[0] | def function[reduce_max, parameter[x, disable_positional_args, output_shape, reduced_dim, name]]:
constant[Reduction on 1 or more axes.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: an optional Dimension
name: an optional string
Returns:
a Tensor
]
variable[output_shape] assign[=] call[name[convert_to_shape], parameter[name[output_shape]]]
variable[reduced_dim] assign[=] call[name[convert_to_dimension], parameter[name[reduced_dim]]]
assert[compare[name[disable_positional_args] is constant[None]]]
variable[output_shape] assign[=] call[name[_reduction_output_shape], parameter[name[x], name[output_shape], name[reduced_dim]]]
if compare[name[output_shape] is constant[None]] begin[:]
variable[output_shape] assign[=] call[name[Shape], parameter[list[[]]]]
if compare[name[output_shape] equal[==] name[x].shape] begin[:]
return[name[x]]
return[call[call[name[ReduceOperation], parameter[name[x], name[output_shape], constant[MAX]]].outputs][constant[0]]] | keyword[def] identifier[reduce_max] ( identifier[x] ,
identifier[disable_positional_args] = keyword[None] ,
identifier[output_shape] = keyword[None] ,
identifier[reduced_dim] = keyword[None] ,
identifier[name] = keyword[None] ):
literal[string]
identifier[output_shape] = identifier[convert_to_shape] ( identifier[output_shape] )
identifier[reduced_dim] = identifier[convert_to_dimension] ( identifier[reduced_dim] )
keyword[assert] identifier[disable_positional_args] keyword[is] keyword[None]
identifier[output_shape] = identifier[_reduction_output_shape] ( identifier[x] , identifier[output_shape] , identifier[reduced_dim] )
keyword[if] identifier[output_shape] keyword[is] keyword[None] :
identifier[output_shape] = identifier[Shape] ([])
keyword[if] identifier[output_shape] == identifier[x] . identifier[shape] :
keyword[return] identifier[x]
keyword[return] identifier[ReduceOperation] (
identifier[x] , identifier[output_shape] , literal[string] , identifier[name] = identifier[name] keyword[or] literal[string] ). identifier[outputs] [ literal[int] ] | def reduce_max(x, disable_positional_args=None, output_shape=None, reduced_dim=None, name=None):
"""Reduction on 1 or more axes.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: an optional Dimension
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
assert disable_positional_args is None
output_shape = _reduction_output_shape(x, output_shape, reduced_dim)
if output_shape is None:
output_shape = Shape([]) # depends on [control=['if'], data=['output_shape']]
if output_shape == x.shape:
return x # depends on [control=['if'], data=[]]
return ReduceOperation(x, output_shape, 'MAX', name=name or 'reduce_max').outputs[0] |
def get_configuration_set_by_id(self, id):
'''Finds a configuration set in the component by its ID.
@param id The ID of the configuration set to search for.
@return The ConfigurationSet object for the set, or None if it was not
found.
'''
for cs in self.configuration_sets:
if cs.id == id:
return cs
return None | def function[get_configuration_set_by_id, parameter[self, id]]:
constant[Finds a configuration set in the component by its ID.
@param id The ID of the configuration set to search for.
@return The ConfigurationSet object for the set, or None if it was not
found.
]
for taget[name[cs]] in starred[name[self].configuration_sets] begin[:]
if compare[name[cs].id equal[==] name[id]] begin[:]
return[name[cs]]
return[constant[None]] | keyword[def] identifier[get_configuration_set_by_id] ( identifier[self] , identifier[id] ):
literal[string]
keyword[for] identifier[cs] keyword[in] identifier[self] . identifier[configuration_sets] :
keyword[if] identifier[cs] . identifier[id] == identifier[id] :
keyword[return] identifier[cs]
keyword[return] keyword[None] | def get_configuration_set_by_id(self, id):
"""Finds a configuration set in the component by its ID.
@param id The ID of the configuration set to search for.
@return The ConfigurationSet object for the set, or None if it was not
found.
"""
for cs in self.configuration_sets:
if cs.id == id:
return cs # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cs']]
return None |
def signature_unsafe(m: bytes, sk: bytes, pk: bytes) -> bytes:
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = decodecoord(h)
r = Hint(h[b // 8 : b // 4] + m)
R = scalarmult_B(r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S) | def function[signature_unsafe, parameter[m, sk, pk]]:
constant[
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
]
variable[h] assign[=] call[name[H], parameter[name[sk]]]
variable[a] assign[=] call[name[decodecoord], parameter[name[h]]]
variable[r] assign[=] call[name[Hint], parameter[binary_operation[call[name[h]][<ast.Slice object at 0x7da1b07798d0>] + name[m]]]]
variable[R] assign[=] call[name[scalarmult_B], parameter[name[r]]]
variable[S] assign[=] binary_operation[binary_operation[name[r] + binary_operation[call[name[Hint], parameter[binary_operation[binary_operation[call[name[encodepoint], parameter[name[R]]] + name[pk]] + name[m]]]] * name[a]]] <ast.Mod object at 0x7da2590d6920> name[l]]
return[binary_operation[call[name[encodepoint], parameter[name[R]]] + call[name[encodeint], parameter[name[S]]]]] | keyword[def] identifier[signature_unsafe] ( identifier[m] : identifier[bytes] , identifier[sk] : identifier[bytes] , identifier[pk] : identifier[bytes] )-> identifier[bytes] :
literal[string]
identifier[h] = identifier[H] ( identifier[sk] )
identifier[a] = identifier[decodecoord] ( identifier[h] )
identifier[r] = identifier[Hint] ( identifier[h] [ identifier[b] // literal[int] : identifier[b] // literal[int] ]+ identifier[m] )
identifier[R] = identifier[scalarmult_B] ( identifier[r] )
identifier[S] =( identifier[r] + identifier[Hint] ( identifier[encodepoint] ( identifier[R] )+ identifier[pk] + identifier[m] )* identifier[a] )% identifier[l]
keyword[return] identifier[encodepoint] ( identifier[R] )+ identifier[encodeint] ( identifier[S] ) | def signature_unsafe(m: bytes, sk: bytes, pk: bytes) -> bytes:
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = decodecoord(h)
r = Hint(h[b // 8:b // 4] + m)
R = scalarmult_B(r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S) |
def check_status(response):
'''check the status of the response and if needed raise an APIException'''
status = response.status_code
if status < 300:
return
exception = {
400: exceptions.BadQuery,
401: exceptions.InvalidAPIKey,
403: exceptions.NoPermission,
404: exceptions.MissingResource,
429: exceptions.TooManyRequests,
500: exceptions.ServerError
}.get(status, None)
# differentiate between over quota and rate-limiting
if status == 429 and 'quota' in response.text.lower():
exception = exceptions.OverQuota
if exception:
raise exception(response.text)
raise exceptions.APIException('%s: %s' % (status, response.text)) | def function[check_status, parameter[response]]:
constant[check the status of the response and if needed raise an APIException]
variable[status] assign[=] name[response].status_code
if compare[name[status] less[<] constant[300]] begin[:]
return[None]
variable[exception] assign[=] call[dictionary[[<ast.Constant object at 0x7da1b1c38a60>, <ast.Constant object at 0x7da1b1c38a00>, <ast.Constant object at 0x7da1b1c3aec0>, <ast.Constant object at 0x7da1b1c38a30>, <ast.Constant object at 0x7da1b1c39780>, <ast.Constant object at 0x7da1b1c38eb0>], [<ast.Attribute object at 0x7da1b1c3b2b0>, <ast.Attribute object at 0x7da1b1c38430>, <ast.Attribute object at 0x7da1b1c396c0>, <ast.Attribute object at 0x7da1b1c39720>, <ast.Attribute object at 0x7da1b1c3afb0>, <ast.Attribute object at 0x7da1b246c940>]].get, parameter[name[status], constant[None]]]
if <ast.BoolOp object at 0x7da1b246de70> begin[:]
variable[exception] assign[=] name[exceptions].OverQuota
if name[exception] begin[:]
<ast.Raise object at 0x7da1b246df00>
<ast.Raise object at 0x7da1b246c790> | keyword[def] identifier[check_status] ( identifier[response] ):
literal[string]
identifier[status] = identifier[response] . identifier[status_code]
keyword[if] identifier[status] < literal[int] :
keyword[return]
identifier[exception] ={
literal[int] : identifier[exceptions] . identifier[BadQuery] ,
literal[int] : identifier[exceptions] . identifier[InvalidAPIKey] ,
literal[int] : identifier[exceptions] . identifier[NoPermission] ,
literal[int] : identifier[exceptions] . identifier[MissingResource] ,
literal[int] : identifier[exceptions] . identifier[TooManyRequests] ,
literal[int] : identifier[exceptions] . identifier[ServerError]
}. identifier[get] ( identifier[status] , keyword[None] )
keyword[if] identifier[status] == literal[int] keyword[and] literal[string] keyword[in] identifier[response] . identifier[text] . identifier[lower] ():
identifier[exception] = identifier[exceptions] . identifier[OverQuota]
keyword[if] identifier[exception] :
keyword[raise] identifier[exception] ( identifier[response] . identifier[text] )
keyword[raise] identifier[exceptions] . identifier[APIException] ( literal[string] %( identifier[status] , identifier[response] . identifier[text] )) | def check_status(response):
"""check the status of the response and if needed raise an APIException"""
status = response.status_code
if status < 300:
return # depends on [control=['if'], data=[]]
exception = {400: exceptions.BadQuery, 401: exceptions.InvalidAPIKey, 403: exceptions.NoPermission, 404: exceptions.MissingResource, 429: exceptions.TooManyRequests, 500: exceptions.ServerError}.get(status, None)
# differentiate between over quota and rate-limiting
if status == 429 and 'quota' in response.text.lower():
exception = exceptions.OverQuota # depends on [control=['if'], data=[]]
if exception:
raise exception(response.text) # depends on [control=['if'], data=[]]
raise exceptions.APIException('%s: %s' % (status, response.text)) |
def text(self, x, y, text, attr=None):
u'''Write text at the given position.'''
if attr is None:
attr = self.attr
pos = self.fixcoord(x, y)
n = DWORD(0)
self.WriteConsoleOutputCharacterW(self.hout, text,
len(text), pos, byref(n))
self.FillConsoleOutputAttribute(self.hout, attr, n, pos, byref(n)) | def function[text, parameter[self, x, y, text, attr]]:
constant[Write text at the given position.]
if compare[name[attr] is constant[None]] begin[:]
variable[attr] assign[=] name[self].attr
variable[pos] assign[=] call[name[self].fixcoord, parameter[name[x], name[y]]]
variable[n] assign[=] call[name[DWORD], parameter[constant[0]]]
call[name[self].WriteConsoleOutputCharacterW, parameter[name[self].hout, name[text], call[name[len], parameter[name[text]]], name[pos], call[name[byref], parameter[name[n]]]]]
call[name[self].FillConsoleOutputAttribute, parameter[name[self].hout, name[attr], name[n], name[pos], call[name[byref], parameter[name[n]]]]] | keyword[def] identifier[text] ( identifier[self] , identifier[x] , identifier[y] , identifier[text] , identifier[attr] = keyword[None] ):
literal[string]
keyword[if] identifier[attr] keyword[is] keyword[None] :
identifier[attr] = identifier[self] . identifier[attr]
identifier[pos] = identifier[self] . identifier[fixcoord] ( identifier[x] , identifier[y] )
identifier[n] = identifier[DWORD] ( literal[int] )
identifier[self] . identifier[WriteConsoleOutputCharacterW] ( identifier[self] . identifier[hout] , identifier[text] ,
identifier[len] ( identifier[text] ), identifier[pos] , identifier[byref] ( identifier[n] ))
identifier[self] . identifier[FillConsoleOutputAttribute] ( identifier[self] . identifier[hout] , identifier[attr] , identifier[n] , identifier[pos] , identifier[byref] ( identifier[n] )) | def text(self, x, y, text, attr=None):
u"""Write text at the given position."""
if attr is None:
attr = self.attr # depends on [control=['if'], data=['attr']]
pos = self.fixcoord(x, y)
n = DWORD(0)
self.WriteConsoleOutputCharacterW(self.hout, text, len(text), pos, byref(n))
self.FillConsoleOutputAttribute(self.hout, attr, n, pos, byref(n)) |
def repval(self, limitsok=False):
"""Get a best-effort representative value as a float. This can be
DANGEROUS because it discards limit information, which is rarely wise."""
if not limitsok and self.dkind in ('lower', 'upper'):
raise LimitError()
if self.dkind == 'unif':
lower, upper = map(float, self.data)
v = 0.5 * (lower + upper)
elif self.dkind in _noextra_dkinds:
v = float(self.data)
elif self.dkind in _yesextra_dkinds:
v = float(self.data[0])
else:
raise RuntimeError('can\'t happen')
if self.tkind == 'log10':
return 10**v
return v | def function[repval, parameter[self, limitsok]]:
constant[Get a best-effort representative value as a float. This can be
DANGEROUS because it discards limit information, which is rarely wise.]
if <ast.BoolOp object at 0x7da2054a5120> begin[:]
<ast.Raise object at 0x7da2054a7910>
if compare[name[self].dkind equal[==] constant[unif]] begin[:]
<ast.Tuple object at 0x7da2054a7970> assign[=] call[name[map], parameter[name[float], name[self].data]]
variable[v] assign[=] binary_operation[constant[0.5] * binary_operation[name[lower] + name[upper]]]
if compare[name[self].tkind equal[==] constant[log10]] begin[:]
return[binary_operation[constant[10] ** name[v]]]
return[name[v]] | keyword[def] identifier[repval] ( identifier[self] , identifier[limitsok] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[limitsok] keyword[and] identifier[self] . identifier[dkind] keyword[in] ( literal[string] , literal[string] ):
keyword[raise] identifier[LimitError] ()
keyword[if] identifier[self] . identifier[dkind] == literal[string] :
identifier[lower] , identifier[upper] = identifier[map] ( identifier[float] , identifier[self] . identifier[data] )
identifier[v] = literal[int] *( identifier[lower] + identifier[upper] )
keyword[elif] identifier[self] . identifier[dkind] keyword[in] identifier[_noextra_dkinds] :
identifier[v] = identifier[float] ( identifier[self] . identifier[data] )
keyword[elif] identifier[self] . identifier[dkind] keyword[in] identifier[_yesextra_dkinds] :
identifier[v] = identifier[float] ( identifier[self] . identifier[data] [ literal[int] ])
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] identifier[self] . identifier[tkind] == literal[string] :
keyword[return] literal[int] ** identifier[v]
keyword[return] identifier[v] | def repval(self, limitsok=False):
"""Get a best-effort representative value as a float. This can be
DANGEROUS because it discards limit information, which is rarely wise."""
if not limitsok and self.dkind in ('lower', 'upper'):
raise LimitError() # depends on [control=['if'], data=[]]
if self.dkind == 'unif':
(lower, upper) = map(float, self.data)
v = 0.5 * (lower + upper) # depends on [control=['if'], data=[]]
elif self.dkind in _noextra_dkinds:
v = float(self.data) # depends on [control=['if'], data=[]]
elif self.dkind in _yesextra_dkinds:
v = float(self.data[0]) # depends on [control=['if'], data=[]]
else:
raise RuntimeError("can't happen")
if self.tkind == 'log10':
return 10 ** v # depends on [control=['if'], data=[]]
return v |
def _set_defined_policy(self, v, load=False):
"""
Setter method for defined_policy, mapped from YANG variable /rbridge_id/secpolicy/defined_policy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_defined_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_defined_policy() directly.
YANG Description: Set the defined policy
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=defined_policy.defined_policy, is_container='container', presence=False, yang_name="defined-policy", rest_name="defined-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Defined policy set', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """defined_policy must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=defined_policy.defined_policy, is_container='container', presence=False, yang_name="defined-policy", rest_name="defined-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Defined policy set', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True)""",
})
self.__defined_policy = t
if hasattr(self, '_set'):
self._set() | def function[_set_defined_policy, parameter[self, v, load]]:
constant[
Setter method for defined_policy, mapped from YANG variable /rbridge_id/secpolicy/defined_policy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_defined_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_defined_policy() directly.
YANG Description: Set the defined policy
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18eb55e40>
name[self].__defined_policy assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_defined_policy] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[defined_policy] . identifier[defined_policy] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__defined_policy] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_defined_policy(self, v, load=False):
"""
Setter method for defined_policy, mapped from YANG variable /rbridge_id/secpolicy/defined_policy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_defined_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_defined_policy() directly.
YANG Description: Set the defined policy
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=defined_policy.defined_policy, is_container='container', presence=False, yang_name='defined-policy', rest_name='defined-policy', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Defined policy set', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'defined_policy must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=defined_policy.defined_policy, is_container=\'container\', presence=False, yang_name="defined-policy", rest_name="defined-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Defined policy set\', u\'cli-incomplete-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-fc-auth\', defining_module=\'brocade-fc-auth\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__defined_policy = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def generate(self, data, width, height, padding=(0, 0, 0, 0), output_format="png", inverted=False):
"""
Generates an identicon image with requested width, height, padding, and
output format, optionally inverting the colours in the indeticon
(swapping background and foreground colours) if requested.
Arguments:
data - Hashed or raw data that will be used for generating the
identicon.
width - Width of resulting identicon image in pixels.
height - Height of resulting identicon image in pixels.
padding - Tuple describing padding around the generated identicon. The
tuple should consist out of four values, where each value is the
number of pixels to use for padding. The order in tuple is: top,
bottom, left, right.
output_format - Output format of resulting identicon image. Supported
formats are anything that is supported by Pillow, plus a special
"ascii" mode.
inverted - Specifies whether the block colours should be inverted or
not. Default is False.
Returns:
Byte representation of an identicon image.
"""
# Calculate the digest, and get byte list.
digest_byte_list = self._data_to_digest_byte_list(data)
# Create the matrix describing which block should be filled-in.
matrix = self._generate_matrix(digest_byte_list)
# Determine the background and foreground colours.
if output_format == "ascii":
foreground = "+"
background = "-"
else:
background = self.background
foreground = self.foreground[digest_byte_list[0] % len(self.foreground)]
# Swtich the colours if inverted image was requested.
if inverted:
foreground, background = background, foreground
# Generate the identicon in requested format.
if output_format == "ascii":
return self._generate_ascii(matrix, foreground, background)
else:
return self._generate_image(matrix, width, height, padding, foreground, background, output_format) | def function[generate, parameter[self, data, width, height, padding, output_format, inverted]]:
constant[
Generates an identicon image with requested width, height, padding, and
output format, optionally inverting the colours in the indeticon
(swapping background and foreground colours) if requested.
Arguments:
data - Hashed or raw data that will be used for generating the
identicon.
width - Width of resulting identicon image in pixels.
height - Height of resulting identicon image in pixels.
padding - Tuple describing padding around the generated identicon. The
tuple should consist out of four values, where each value is the
number of pixels to use for padding. The order in tuple is: top,
bottom, left, right.
output_format - Output format of resulting identicon image. Supported
formats are anything that is supported by Pillow, plus a special
"ascii" mode.
inverted - Specifies whether the block colours should be inverted or
not. Default is False.
Returns:
Byte representation of an identicon image.
]
variable[digest_byte_list] assign[=] call[name[self]._data_to_digest_byte_list, parameter[name[data]]]
variable[matrix] assign[=] call[name[self]._generate_matrix, parameter[name[digest_byte_list]]]
if compare[name[output_format] equal[==] constant[ascii]] begin[:]
variable[foreground] assign[=] constant[+]
variable[background] assign[=] constant[-]
if name[inverted] begin[:]
<ast.Tuple object at 0x7da1b23b0340> assign[=] tuple[[<ast.Name object at 0x7da1b23b0970>, <ast.Name object at 0x7da1b23b0ac0>]]
if compare[name[output_format] equal[==] constant[ascii]] begin[:]
return[call[name[self]._generate_ascii, parameter[name[matrix], name[foreground], name[background]]]] | keyword[def] identifier[generate] ( identifier[self] , identifier[data] , identifier[width] , identifier[height] , identifier[padding] =( literal[int] , literal[int] , literal[int] , literal[int] ), identifier[output_format] = literal[string] , identifier[inverted] = keyword[False] ):
literal[string]
identifier[digest_byte_list] = identifier[self] . identifier[_data_to_digest_byte_list] ( identifier[data] )
identifier[matrix] = identifier[self] . identifier[_generate_matrix] ( identifier[digest_byte_list] )
keyword[if] identifier[output_format] == literal[string] :
identifier[foreground] = literal[string]
identifier[background] = literal[string]
keyword[else] :
identifier[background] = identifier[self] . identifier[background]
identifier[foreground] = identifier[self] . identifier[foreground] [ identifier[digest_byte_list] [ literal[int] ]% identifier[len] ( identifier[self] . identifier[foreground] )]
keyword[if] identifier[inverted] :
identifier[foreground] , identifier[background] = identifier[background] , identifier[foreground]
keyword[if] identifier[output_format] == literal[string] :
keyword[return] identifier[self] . identifier[_generate_ascii] ( identifier[matrix] , identifier[foreground] , identifier[background] )
keyword[else] :
keyword[return] identifier[self] . identifier[_generate_image] ( identifier[matrix] , identifier[width] , identifier[height] , identifier[padding] , identifier[foreground] , identifier[background] , identifier[output_format] ) | def generate(self, data, width, height, padding=(0, 0, 0, 0), output_format='png', inverted=False):
"""
Generates an identicon image with requested width, height, padding, and
output format, optionally inverting the colours in the indeticon
(swapping background and foreground colours) if requested.
Arguments:
data - Hashed or raw data that will be used for generating the
identicon.
width - Width of resulting identicon image in pixels.
height - Height of resulting identicon image in pixels.
padding - Tuple describing padding around the generated identicon. The
tuple should consist out of four values, where each value is the
number of pixels to use for padding. The order in tuple is: top,
bottom, left, right.
output_format - Output format of resulting identicon image. Supported
formats are anything that is supported by Pillow, plus a special
"ascii" mode.
inverted - Specifies whether the block colours should be inverted or
not. Default is False.
Returns:
Byte representation of an identicon image.
"""
# Calculate the digest, and get byte list.
digest_byte_list = self._data_to_digest_byte_list(data)
# Create the matrix describing which block should be filled-in.
matrix = self._generate_matrix(digest_byte_list)
# Determine the background and foreground colours.
if output_format == 'ascii':
foreground = '+'
background = '-' # depends on [control=['if'], data=[]]
else:
background = self.background
foreground = self.foreground[digest_byte_list[0] % len(self.foreground)]
# Swtich the colours if inverted image was requested.
if inverted:
(foreground, background) = (background, foreground) # depends on [control=['if'], data=[]]
# Generate the identicon in requested format.
if output_format == 'ascii':
return self._generate_ascii(matrix, foreground, background) # depends on [control=['if'], data=[]]
else:
return self._generate_image(matrix, width, height, padding, foreground, background, output_format) |
def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
"""
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
"""
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message) | def function[codes_write, parameter[handle, outfile]]:
constant[
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
]
variable[mess] assign[=] call[name[ffi].new, parameter[constant[const void **]]]
variable[mess_len] assign[=] call[name[ffi].new, parameter[constant[size_t*]]]
variable[codes_get_message] assign[=] call[name[check_return], parameter[name[lib].codes_get_message]]
call[name[codes_get_message], parameter[name[handle], name[mess], name[mess_len]]]
variable[message] assign[=] call[name[ffi].buffer, parameter[call[name[mess]][constant[0]]]]
call[name[outfile].write, parameter[name[message]]] | keyword[def] identifier[codes_write] ( identifier[handle] , identifier[outfile] ):
literal[string]
identifier[mess] = identifier[ffi] . identifier[new] ( literal[string] )
identifier[mess_len] = identifier[ffi] . identifier[new] ( literal[string] )
identifier[codes_get_message] = identifier[check_return] ( identifier[lib] . identifier[codes_get_message] )
identifier[codes_get_message] ( identifier[handle] , identifier[mess] , identifier[mess_len] )
identifier[message] = identifier[ffi] . identifier[buffer] ( identifier[mess] [ literal[int] ], identifier[size] = identifier[mess_len] [ literal[int] ])
identifier[outfile] . identifier[write] ( identifier[message] ) | def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
'\n Write a coded message to a file. If the file does not exist, it is created.\n\n :param str path: (optional) the path to the GRIB file;\n defaults to the one of the open index.\n '
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message) |
def write(self, obj):
""" Print object on output """
accept = self.request.headers.get("Accept")
if "json" in accept:
if JsonDefaultHandler.__parser is None:
JsonDefaultHandler.__parser = Parser()
super(JsonDefaultHandler, self).write(JsonDefaultHandler.__parser.encode(obj))
return
# If we are not in json mode
super(JsonDefaultHandler, self).write(obj) | def function[write, parameter[self, obj]]:
constant[ Print object on output ]
variable[accept] assign[=] call[name[self].request.headers.get, parameter[constant[Accept]]]
if compare[constant[json] in name[accept]] begin[:]
if compare[name[JsonDefaultHandler].__parser is constant[None]] begin[:]
name[JsonDefaultHandler].__parser assign[=] call[name[Parser], parameter[]]
call[call[name[super], parameter[name[JsonDefaultHandler], name[self]]].write, parameter[call[name[JsonDefaultHandler].__parser.encode, parameter[name[obj]]]]]
return[None]
call[call[name[super], parameter[name[JsonDefaultHandler], name[self]]].write, parameter[name[obj]]] | keyword[def] identifier[write] ( identifier[self] , identifier[obj] ):
literal[string]
identifier[accept] = identifier[self] . identifier[request] . identifier[headers] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[accept] :
keyword[if] identifier[JsonDefaultHandler] . identifier[__parser] keyword[is] keyword[None] :
identifier[JsonDefaultHandler] . identifier[__parser] = identifier[Parser] ()
identifier[super] ( identifier[JsonDefaultHandler] , identifier[self] ). identifier[write] ( identifier[JsonDefaultHandler] . identifier[__parser] . identifier[encode] ( identifier[obj] ))
keyword[return]
identifier[super] ( identifier[JsonDefaultHandler] , identifier[self] ). identifier[write] ( identifier[obj] ) | def write(self, obj):
""" Print object on output """
accept = self.request.headers.get('Accept')
if 'json' in accept:
if JsonDefaultHandler.__parser is None:
JsonDefaultHandler.__parser = Parser() # depends on [control=['if'], data=[]]
super(JsonDefaultHandler, self).write(JsonDefaultHandler.__parser.encode(obj))
return # depends on [control=['if'], data=[]]
# If we are not in json mode
super(JsonDefaultHandler, self).write(obj) |
def _split_after_delimiter(self, item, indent_amt):
"""Split the line only after a delimiter."""
self._delete_whitespace()
if self.fits_on_current_line(item.size):
return
last_space = None
for item in reversed(self._lines):
if (
last_space and
(not isinstance(item, Atom) or not item.is_colon)
):
break
else:
last_space = None
if isinstance(item, self._Space):
last_space = item
if isinstance(item, (self._LineBreak, self._Indent)):
return
if not last_space:
return
self.add_line_break_at(self._lines.index(last_space), indent_amt) | def function[_split_after_delimiter, parameter[self, item, indent_amt]]:
constant[Split the line only after a delimiter.]
call[name[self]._delete_whitespace, parameter[]]
if call[name[self].fits_on_current_line, parameter[name[item].size]] begin[:]
return[None]
variable[last_space] assign[=] constant[None]
for taget[name[item]] in starred[call[name[reversed], parameter[name[self]._lines]]] begin[:]
if <ast.BoolOp object at 0x7da20c6c5630> begin[:]
break
if call[name[isinstance], parameter[name[item], name[self]._Space]] begin[:]
variable[last_space] assign[=] name[item]
if call[name[isinstance], parameter[name[item], tuple[[<ast.Attribute object at 0x7da20c6c7130>, <ast.Attribute object at 0x7da20c6c4bb0>]]]] begin[:]
return[None]
if <ast.UnaryOp object at 0x7da20c6c49d0> begin[:]
return[None]
call[name[self].add_line_break_at, parameter[call[name[self]._lines.index, parameter[name[last_space]]], name[indent_amt]]] | keyword[def] identifier[_split_after_delimiter] ( identifier[self] , identifier[item] , identifier[indent_amt] ):
literal[string]
identifier[self] . identifier[_delete_whitespace] ()
keyword[if] identifier[self] . identifier[fits_on_current_line] ( identifier[item] . identifier[size] ):
keyword[return]
identifier[last_space] = keyword[None]
keyword[for] identifier[item] keyword[in] identifier[reversed] ( identifier[self] . identifier[_lines] ):
keyword[if] (
identifier[last_space] keyword[and]
( keyword[not] identifier[isinstance] ( identifier[item] , identifier[Atom] ) keyword[or] keyword[not] identifier[item] . identifier[is_colon] )
):
keyword[break]
keyword[else] :
identifier[last_space] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[item] , identifier[self] . identifier[_Space] ):
identifier[last_space] = identifier[item]
keyword[if] identifier[isinstance] ( identifier[item] ,( identifier[self] . identifier[_LineBreak] , identifier[self] . identifier[_Indent] )):
keyword[return]
keyword[if] keyword[not] identifier[last_space] :
keyword[return]
identifier[self] . identifier[add_line_break_at] ( identifier[self] . identifier[_lines] . identifier[index] ( identifier[last_space] ), identifier[indent_amt] ) | def _split_after_delimiter(self, item, indent_amt):
"""Split the line only after a delimiter."""
self._delete_whitespace()
if self.fits_on_current_line(item.size):
return # depends on [control=['if'], data=[]]
last_space = None
for item in reversed(self._lines):
if last_space and (not isinstance(item, Atom) or not item.is_colon):
break # depends on [control=['if'], data=[]]
else:
last_space = None
if isinstance(item, self._Space):
last_space = item # depends on [control=['if'], data=[]]
if isinstance(item, (self._LineBreak, self._Indent)):
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
if not last_space:
return # depends on [control=['if'], data=[]]
self.add_line_break_at(self._lines.index(last_space), indent_amt) |
def get_physical_coordinates(self, i):
"""
For a pixel index i, return the real-world coordinates in nanometers.
This is equivalent to multiplying the image coordinates of the given pixel with the pixel size.
:param i: the pixel index
:return: a tuple of x and y coordinates.
:rtype: Tuple[float]
:raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y"
"""
try:
pixel_size_x = self.imzmldict["pixel size x"]
pixel_size_y = self.imzmldict["pixel size y"]
except KeyError:
raise KeyError("Could not find all pixel size attributes in imzML file")
image_x, image_y = self.coordinates[i][:2]
return image_x * pixel_size_x, image_y * pixel_size_y | def function[get_physical_coordinates, parameter[self, i]]:
constant[
For a pixel index i, return the real-world coordinates in nanometers.
This is equivalent to multiplying the image coordinates of the given pixel with the pixel size.
:param i: the pixel index
:return: a tuple of x and y coordinates.
:rtype: Tuple[float]
:raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y"
]
<ast.Try object at 0x7da1b050f700>
<ast.Tuple object at 0x7da1b050f3d0> assign[=] call[call[name[self].coordinates][name[i]]][<ast.Slice object at 0x7da1b050f7f0>]
return[tuple[[<ast.BinOp object at 0x7da1b050fa90>, <ast.BinOp object at 0x7da1b050d540>]]] | keyword[def] identifier[get_physical_coordinates] ( identifier[self] , identifier[i] ):
literal[string]
keyword[try] :
identifier[pixel_size_x] = identifier[self] . identifier[imzmldict] [ literal[string] ]
identifier[pixel_size_y] = identifier[self] . identifier[imzmldict] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[KeyError] ( literal[string] )
identifier[image_x] , identifier[image_y] = identifier[self] . identifier[coordinates] [ identifier[i] ][: literal[int] ]
keyword[return] identifier[image_x] * identifier[pixel_size_x] , identifier[image_y] * identifier[pixel_size_y] | def get_physical_coordinates(self, i):
"""
For a pixel index i, return the real-world coordinates in nanometers.
This is equivalent to multiplying the image coordinates of the given pixel with the pixel size.
:param i: the pixel index
:return: a tuple of x and y coordinates.
:rtype: Tuple[float]
:raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y"
"""
try:
pixel_size_x = self.imzmldict['pixel size x']
pixel_size_y = self.imzmldict['pixel size y'] # depends on [control=['try'], data=[]]
except KeyError:
raise KeyError('Could not find all pixel size attributes in imzML file') # depends on [control=['except'], data=[]]
(image_x, image_y) = self.coordinates[i][:2]
return (image_x * pixel_size_x, image_y * pixel_size_y) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.