code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _build(self, input_sequence, state):
"""Connects the BidirectionalRNN module into the graph.
Args:
input_sequence: tensor (time, batch, [feature_1, ..]). It must be
time_major.
state: tuple of states for the forward and backward cores.
Returns:
A dict with forward/backard states and output sequences:
"outputs":{
"forward": ...,
"backward": ...},
"state": {
"forward": ...,
"backward": ...}
Raises:
ValueError: in case time dimension is not statically known.
"""
input_shape = input_sequence.get_shape()
if input_shape[0] is None:
raise ValueError("Time dimension of input (dim 0) must be statically"
"known.")
seq_length = int(input_shape[0])
forward_state, backward_state = state
# Lists for the forward backward output and state.
output_sequence_f = []
output_sequence_b = []
# Forward pass over the sequence.
with tf.name_scope("forward_rnn"):
core_state = forward_state
for i in six.moves.range(seq_length):
core_output, core_state = self._forward_core(
input_sequence[i, :,], core_state)
output_sequence_f.append((core_output, core_state))
output_sequence_f = nest.map_structure(
lambda *vals: tf.stack(vals), *output_sequence_f)
# Backward pass over the sequence.
with tf.name_scope("backward_rnn"):
core_state = backward_state
for i in six.moves.range(seq_length - 1, -1, -1):
core_output, core_state = self._backward_core(
input_sequence[i, :,], core_state)
output_sequence_b.append((core_output, core_state))
output_sequence_b = nest.map_structure(
lambda *vals: tf.stack(vals), *output_sequence_b)
# Compose the full output and state sequeneces.
return {
"outputs": {
"forward": output_sequence_f[0],
"backward": output_sequence_b[0]
},
"state": {
"forward": output_sequence_f[1],
"backward": output_sequence_b[1]
}
}
|
Connects the BidirectionalRNN module into the graph.
Args:
input_sequence: tensor (time, batch, [feature_1, ..]). It must be
time_major.
state: tuple of states for the forward and backward cores.
Returns:
A dict with forward/backard states and output sequences:
"outputs":{
"forward": ...,
"backward": ...},
"state": {
"forward": ...,
"backward": ...}
Raises:
ValueError: in case time dimension is not statically known.
|
def max_cardinality_heuristic(G):
"""Computes an upper bound on the treewidth of graph G based on
the max-cardinality heuristic for the elimination ordering.
Parameters
----------
G : NetworkX graph
The graph on which to compute an upper bound for the treewidth.
inplace : bool
If True, G will be made an empty graph in the process of
running the function, otherwise the function uses a copy
of G.
Returns
-------
treewidth_upper_bound : int
An upper bound on the treewidth of the graph G.
order : list
An elimination order that induces the treewidth.
Examples
--------
This example computes an upper bound for the treewidth of the :math:`K_4`
complete graph.
>>> import dwave_networkx as dnx
>>> import networkx as nx
>>> K_4 = nx.complete_graph(4)
>>> dnx.max_cardinality_heuristic(K_4)
(3, [3, 1, 0, 2])
References
----------
Based on the algorithm presented in [GD]_
"""
# we need only deal with the adjacency structure of G. We will also
# be manipulating it directly so let's go ahead and make a new one
adj = {v: set(G[v]) for v in G}
num_nodes = len(adj)
# preallocate the return values
order = [0] * num_nodes
upper_bound = 0
# we will need to track the nodes and how many labelled neighbors
# each node has
labelled_neighbors = {v: 0 for v in adj}
# working backwards
for i in range(num_nodes):
# pick the node with the most labelled neighbors
v = max(labelled_neighbors, key=lambda u: labelled_neighbors[u] + random())
del labelled_neighbors[v]
# increment all of its neighbors
for u in adj[v]:
if u in labelled_neighbors:
labelled_neighbors[u] += 1
order[-(i + 1)] = v
for v in order:
# if the number of neighbours of v is higher than upper_bound, update
dv = len(adj[v])
if dv > upper_bound:
upper_bound = dv
# make v simplicial by making its neighborhood a clique then remove the node
# add v to order
_elim_adj(adj, v)
return upper_bound, order
|
Computes an upper bound on the treewidth of graph G based on
the max-cardinality heuristic for the elimination ordering.
Parameters
----------
G : NetworkX graph
The graph on which to compute an upper bound for the treewidth.
inplace : bool
If True, G will be made an empty graph in the process of
running the function, otherwise the function uses a copy
of G.
Returns
-------
treewidth_upper_bound : int
An upper bound on the treewidth of the graph G.
order : list
An elimination order that induces the treewidth.
Examples
--------
This example computes an upper bound for the treewidth of the :math:`K_4`
complete graph.
>>> import dwave_networkx as dnx
>>> import networkx as nx
>>> K_4 = nx.complete_graph(4)
>>> dnx.max_cardinality_heuristic(K_4)
(3, [3, 1, 0, 2])
References
----------
Based on the algorithm presented in [GD]_
|
def getActiveSegment(self, c, i, timeStep):
""" For a given cell, return the segment with the strongest _connected_
activation, i.e. sum up the activations of the connected synapses of the
segments only. That is, a segment is active only if it has enough connected
synapses.
"""
# todo: put back preference for sequence segments.
nSegments = len(self.cells[c][i])
bestActivation = self.activationThreshold
which = -1
for j,s in enumerate(self.cells[c][i]):
activity = self.getSegmentActivityLevel(s, self.activeState[timeStep], connectedSynapsesOnly = True)
if activity >= bestActivation:
bestActivation = activity
which = j
if which != -1:
return self.cells[c][i][which]
else:
return None
|
For a given cell, return the segment with the strongest _connected_
activation, i.e. sum up the activations of the connected synapses of the
segments only. That is, a segment is active only if it has enough connected
synapses.
|
def basemz(df):
"""
The mz of the most abundant ion.
"""
# returns the
d = np.array(df.columns)[df.values.argmax(axis=1)]
return Trace(d, df.index, name='basemz')
|
The mz of the most abundant ion.
|
def shelter_get(self, **kwargs):
"""
shelter.get wrapper. Given a shelter ID, retrieve its details in
dict form.
:rtype: dict
:returns: The shelter's details.
"""
root = self._do_api_call("shelter.get", kwargs)
shelter = root.find("shelter")
for field in shelter:
record = {}
for field in shelter:
record[field.tag] = field.text
return record
|
shelter.get wrapper. Given a shelter ID, retrieve its details in
dict form.
:rtype: dict
:returns: The shelter's details.
|
def value(self):
"""Retrieve the data value of this attachment.
Will show the filename of the attachment if there is an attachment available otherwise None
Use save_as in order to download as a file.
Example
-------
>>> file_attachment_property = project.part('Bike').property('file_attachment')
>>> if file_attachment_property.value:
... file_attachment_property.save_as('file.ext')
... else:
... print('file attachment not set, its value is None')
"""
if 'value' in self._json_data and self._json_data['value']:
return "[Attachment: {}]".format(self._json_data['value'].split('/')[-1])
else:
return None
|
Retrieve the data value of this attachment.
Will show the filename of the attachment if there is an attachment available otherwise None
Use save_as in order to download as a file.
Example
-------
>>> file_attachment_property = project.part('Bike').property('file_attachment')
>>> if file_attachment_property.value:
... file_attachment_property.save_as('file.ext')
... else:
... print('file attachment not set, its value is None')
|
def __calculate_adjacency_lists(graph):
"""Builds an adjacency list representation for the graph, since we can't guarantee that the
internal representation of the graph is stored that way."""
adj = {}
for node in graph.get_all_node_ids():
neighbors = graph.neighbors(node)
adj[node] = neighbors
return adj
|
Builds an adjacency list representation for the graph, since we can't guarantee that the
internal representation of the graph is stored that way.
|
def t_doublequote_end(self, t):
r'"'
t.value = t.lexer.string_value
t.type = 'ID'
t.lexer.string_value = None
t.lexer.pop_state()
return t
|
r'"
|
def infer_getattr(node, context=None):
"""Understand getattr calls
If one of the arguments is an Uninferable object, then the
result will be an Uninferable object. Otherwise, the normal attribute
lookup will be done.
"""
obj, attr = _infer_getattr_args(node, context)
if (
obj is util.Uninferable
or attr is util.Uninferable
or not hasattr(obj, "igetattr")
):
return util.Uninferable
try:
return next(obj.igetattr(attr, context=context))
except (StopIteration, InferenceError, AttributeInferenceError):
if len(node.args) == 3:
# Try to infer the default and return it instead.
try:
return next(node.args[2].infer(context=context))
except InferenceError:
raise UseInferenceDefault
raise UseInferenceDefault
|
Understand getattr calls
If one of the arguments is an Uninferable object, then the
result will be an Uninferable object. Otherwise, the normal attribute
lookup will be done.
|
def _send(self, msg, buffers=None):
"""Sends a message to the model in the front-end."""
if self.comm is not None and self.comm.kernel is not None:
self.comm.send(data=msg, buffers=buffers)
|
Sends a message to the model in the front-end.
|
def display_initialize(self):
"""Display 'please wait' message, and narrow build warning."""
echo(self.term.home + self.term.clear)
echo(self.term.move_y(self.term.height // 2))
echo(self.term.center('Initializing page data ...').rstrip())
flushout()
if LIMIT_UCS == 0x10000:
echo('\n\n')
echo(self.term.blink_red(self.term.center(
'narrow Python build: upperbound value is {n}.'
.format(n=LIMIT_UCS)).rstrip()))
echo('\n\n')
flushout()
|
Display 'please wait' message, and narrow build warning.
|
async def get_entity_by_id(self, get_entity_by_id_request):
"""Return one or more user entities.
Searching by phone number only finds entities when their phone number
is in your contacts (and not always even then), and can't be used to
find Google Voice contacts.
"""
response = hangouts_pb2.GetEntityByIdResponse()
await self._pb_request('contacts/getentitybyid',
get_entity_by_id_request, response)
return response
|
Return one or more user entities.
Searching by phone number only finds entities when their phone number
is in your contacts (and not always even then), and can't be used to
find Google Voice contacts.
|
def run(main=None, argv=None, **flags):
"""
:param main: main or sys.modules['__main__'].main
:param argv: argument list used in argument parse
:param flags: flags to define with defaults
:return:
"""
"""Runs the program with an optional 'main' function and 'argv' list."""
import sys as _sys
import inspect
main = main or _sys.modules['__main__'].main
if main.__doc__:
docstring = main.__doc__.split(':param')[0]
_parser.usage = 'from docstring \n {}'.format(docstring) # add_help
# if not flags:
try:
a = inspect.getfullargspec(main)
except AttributeError:
a = inspect.getargspec(main) # namedtuple(args, varargs, keywords, defaults)
if a.defaults:
kwargs = dict(zip(reversed(a.args), reversed(a.defaults)))
add_flag(**kwargs)
else:
kwargs = dict()
# add to command argument
if a.defaults is None:
nargs = len(a.args)
else:
nargs = len(a.args) - len(a.defaults)
# if nargs > 0:
posargs = a.args[:nargs]
flag.add_args(posargs)
add_flag(**flags)
# Extract the args from the optional `argv` list.
args = argv[1:] if argv else None
# Parse the known flags from that list, or from the command
# line otherwise.
unparsed, kw = flag._parse_flags_kw(args=args)
d = flag.__dict__['__flags']
args = [d[k] for k in posargs]
args += unparsed
kwargs.update({k: d[k] for k in kwargs.keys()})
kwargs.update(kw)
# Call the main function, passing through any arguments,
# with parsed flags as kwwargs
# to the final program.
_sys.exit(main(*args, **kwargs))
|
:param main: main or sys.modules['__main__'].main
:param argv: argument list used in argument parse
:param flags: flags to define with defaults
:return:
|
def channels_leave(self, room_id, **kwargs):
"""Causes the callee to be removed from the channel."""
return self.__call_api_post('channels.leave', roomId=room_id, kwargs=kwargs)
|
Causes the callee to be removed from the channel.
|
def _array_type_std_res(self, counts, total, colsum, rowsum):
"""Return ndarray containing standard residuals for array values.
The shape of the return value is the same as that of *counts*.
Array variables require special processing because of the
underlying math. Essentially, it boils down to the fact that the
variable dimensions are mutually independent, and standard residuals
are calculated for each of them separately, and then stacked together
in the resulting array.
"""
if self.mr_dim_ind == 0:
# --This is a special case where broadcasting cannot be
# --automatically done. We need to "inflate" the single dimensional
# --ndarrays, to be able to treat them as "columns" (essentially a
# --Nx1 ndarray). This is needed for subsequent multiplication
# --that needs to happen column wise (rowsum * colsum) / total.
total = total[:, np.newaxis]
rowsum = rowsum[:, np.newaxis]
expected_counts = rowsum * colsum / total
variance = rowsum * colsum * (total - rowsum) * (total - colsum) / total ** 3
return (counts - expected_counts) / np.sqrt(variance)
|
Return ndarray containing standard residuals for array values.
The shape of the return value is the same as that of *counts*.
Array variables require special processing because of the
underlying math. Essentially, it boils down to the fact that the
variable dimensions are mutually independent, and standard residuals
are calculated for each of them separately, and then stacked together
in the resulting array.
|
def gradient(self):
r"""Gradient operator of the functional.
The functional is not differentiable in ``x=0``. However, when
evaluating the gradient operator in this point it will return 0.
Notes
-----
The gradient is given by
.. math::
\left[ \nabla \| \|f\|_1 \|_1 \right]_i =
\frac{f_i}{|f_i|}
.. math::
\left[ \nabla \| \|f\|_2 \|_1 \right]_i =
\frac{f_i}{\|f\|_2}
else:
.. math::
\left[ \nabla || ||f||_p ||_1 \right]_i =
\frac{| f_i |^{p-2} f_i}{||f||_p^{p-1}}
"""
functional = self
class GroupL1Gradient(Operator):
"""The gradient operator of the `GroupL1Norm` functional."""
def __init__(self):
"""Initialize a new instance."""
super(GroupL1Gradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x, out):
"""Return ``self(x)``."""
pwnorm_x = functional.pointwise_norm(x)
pwnorm_x.ufuncs.sign(out=pwnorm_x)
functional.pointwise_norm.derivative(x).adjoint(pwnorm_x,
out=out)
return out
return GroupL1Gradient()
|
r"""Gradient operator of the functional.
The functional is not differentiable in ``x=0``. However, when
evaluating the gradient operator in this point it will return 0.
Notes
-----
The gradient is given by
.. math::
\left[ \nabla \| \|f\|_1 \|_1 \right]_i =
\frac{f_i}{|f_i|}
.. math::
\left[ \nabla \| \|f\|_2 \|_1 \right]_i =
\frac{f_i}{\|f\|_2}
else:
.. math::
\left[ \nabla || ||f||_p ||_1 \right]_i =
\frac{| f_i |^{p-2} f_i}{||f||_p^{p-1}}
|
def _val_to_store_info(self, val):
"""
Transform val to a storable representation,
returning a tuple of the flags, the length of the new value, and the new value itself.
"""
if isinstance(val, str):
return val, 0
elif isinstance(val, int):
return "%d" % val, Client._FLAG_INTEGER
elif isinstance(val, long):
return "%d" % val, Client._FLAG_LONG
return pickle.dumps(val, protocol=pickle.HIGHEST_PROTOCOL), Client._FLAG_PICKLE
|
Transform val to a storable representation,
returning a tuple of the flags, the length of the new value, and the new value itself.
|
def full_name(self):
"""
Obtains the full name of the actor.
:return: the full name
:rtype: str
"""
if self._full_name is None:
fn = self.name.replace(".", "\\.")
parent = self._parent
if parent is not None:
fn = parent.full_name + "." + fn
self._full_name = fn
return self._full_name
|
Obtains the full name of the actor.
:return: the full name
:rtype: str
|
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
if self.storagehandler is None:
return "No storage handler available!"
expr = str(self.resolve_option("expression")).replace(
"{X}", str(self.storagehandler.storage[str(self.resolve_option("storage_name"))]))
expr = self.storagehandler.expand(expr)
self.storagehandler.storage[self.resolve_option("storage_name")] = eval(expr)
self._output.append(self.input)
return None
|
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
|
def get_contract_factory(self, name: ContractName) -> Contract:
"""
Return the contract factory for a given contract type, generated from the data vailable
in ``Package.manifest``. Contract factories are accessible from the package class.
.. code:: python
Owned = OwnedPackage.get_contract_factory('owned')
In cases where a contract uses a library, the contract factory will have
unlinked bytecode. The ``ethpm`` package ships with its own subclass of
``web3.contract.Contract``, ``ethpm.contract.LinkableContract`` with a few extra
methods and properties related to bytecode linking.
.. code:: python
>>> math = owned_package.contract_factories.math
>>> math.needs_bytecode_linking
True
>>> linked_math = math.link_bytecode({'MathLib': '0x1234...'})
>>> linked_math.needs_bytecode_linking
False
"""
validate_contract_name(name)
if "contract_types" not in self.manifest:
raise InsufficientAssetsError(
"This package does not contain any contract type data."
)
try:
contract_data = self.manifest["contract_types"][name]
except KeyError:
raise InsufficientAssetsError(
"This package does not contain any package data to generate "
f"a contract factory for contract type: {name}. Available contract types include: "
f"{ list(self.manifest['contract_types'].keys()) }."
)
validate_minimal_contract_factory_data(contract_data)
contract_kwargs = generate_contract_factory_kwargs(contract_data)
contract_factory = self.w3.eth.contract(**contract_kwargs)
return contract_factory
|
Return the contract factory for a given contract type, generated from the data vailable
in ``Package.manifest``. Contract factories are accessible from the package class.
.. code:: python
Owned = OwnedPackage.get_contract_factory('owned')
In cases where a contract uses a library, the contract factory will have
unlinked bytecode. The ``ethpm`` package ships with its own subclass of
``web3.contract.Contract``, ``ethpm.contract.LinkableContract`` with a few extra
methods and properties related to bytecode linking.
.. code:: python
>>> math = owned_package.contract_factories.math
>>> math.needs_bytecode_linking
True
>>> linked_math = math.link_bytecode({'MathLib': '0x1234...'})
>>> linked_math.needs_bytecode_linking
False
|
def Run(self):
"""Retrieve all the clients for the AbstractClientStatsCollectors."""
try:
self.stats = {}
self.BeginProcessing()
processed_count = 0
for client_info_batch in _IterateAllClients(
recency_window=self.recency_window):
for client_info in client_info_batch:
self.ProcessClientFullInfo(client_info)
processed_count += len(client_info_batch)
self.Log("Processed %d clients.", processed_count)
self.HeartBeat()
self.FinishProcessing()
for fd in itervalues(self.stats):
fd.Close()
logging.info("%s: processed %d clients.", self.__class__.__name__,
processed_count)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error while calculating stats: %s", e)
raise
|
Retrieve all the clients for the AbstractClientStatsCollectors.
|
def _dict_seq_locus(list_c, loci_obj, seq_obj):
"""
return dict with sequences = [ cluster1, cluster2 ...]
"""
seqs = defaultdict(set)
# n = len(list_c.keys())
for c in list_c.values():
for l in c.loci2seq:
[seqs[s].add(c.id) for s in c.loci2seq[l]]
common = [s for s in seqs if len(seqs[s]) > 1]
seqs_in_c = defaultdict(float)
for c in list_c.values():
for l in c.loci2seq:
# total = sum([v for v in loci_obj[l].coverage.values()])
for s in c.loci2seq[l]:
if s in common:
pos = seq_obj[s].pos[l]
# cov = 1.0 * loci_obj[l].coverage[pos] / total
cov = 1.0 * loci_obj[l].coverage[pos]
if seqs_in_c[(s, c.id)] < cov:
seqs_in_c[(s, c.id)] = cov
seqs_in_c = _transform(seqs_in_c)
return seqs_in_c
|
return dict with sequences = [ cluster1, cluster2 ...]
|
def init_pop(self):
"""initializes population of features as GP stacks."""
pop = Pop(self.population_size)
seed_with_raw_features = False
# make programs
if self.seed_with_ml:
# initial population is the components of the default ml model
if (self.ml_type == 'SVC' or self.ml_type == 'SVR'):
# this is needed because svm has a bug that throws valueerror
#on attribute check
seed_with_raw_features=True
elif (hasattr(self.pipeline.named_steps['ml'],'coef_') or
hasattr(self.pipeline.named_steps['ml'],'feature_importances_')):
# add model components with non-zero coefficients to initial
# population, in order of coefficient size
coef = (self.pipeline.named_steps['ml'].coef_ if
hasattr(self.pipeline.named_steps['ml'],'coef_') else
self.pipeline.named_steps['ml'].feature_importances_)
# compress multiple coefficients for each feature into single
# numbers (occurs with multiclass classification)
if len(coef.shape)>1:
coef = [np.mean(abs(c)) for c in coef.transpose()]
# remove zeros
coef = [c for c in coef if c!=0]
# sort feature locations based on importance/coefficient
locs = np.arange(len(coef))
locs = locs[np.argsort(np.abs(coef))[::-1]]
for i,p in enumerate(pop.individuals):
if i < len(locs):
p.stack = [node('x',loc=locs[i])]
else:
# make program if pop is bigger than n_features
self.make_program(p.stack,self.func_set,self.term_set,
self.random_state.randint(self.min_depth,
self.max_depth+1),
self.otype)
p.stack = list(reversed(p.stack))
else:
seed_with_raw_features = True
# seed with random features if no importance info available
if seed_with_raw_features:
for i,p in enumerate(pop.individuals):
if i < self.n_features:
p.stack = [node('x',
loc=self.random_state.randint(self.n_features))]
else:
# make program if pop is bigger than n_features
self.make_program(p.stack,self.func_set,self.term_set,
self.random_state.randint(self.min_depth,
self.max_depth+1),
self.otype)
p.stack = list(reversed(p.stack))
# print initial population
if self.verbosity > 2:
print("seeded initial population:",
self.stacks_2_eqns(pop.individuals))
else: # don't seed with ML
for I in pop.individuals:
depth = self.random_state.randint(self.min_depth,self.max_depth_init)
self.make_program(I.stack,self.func_set,self.term_set,depth,
self.otype)
#print(I.stack)
I.stack = list(reversed(I.stack))
return pop
|
initializes population of features as GP stacks.
|
def setMAC(self, xEUI):
"""set the extended addresss of Thread device
Args:
xEUI: extended address in hex format
Returns:
True: successful to set the extended address
False: fail to set the extended address
"""
print '%s call setMAC' % self.port
address64 = ''
try:
if not xEUI:
address64 = self.mac
if not isinstance(xEUI, str):
address64 = self.__convertLongToString(xEUI)
# prepend 0 at the beginning
if len(address64) < 16:
address64 = address64.zfill(16)
print address64
else:
address64 = xEUI
cmd = WPANCTL_CMD + 'setprop NCP:MACAddress %s' % address64
if self.__sendCommand(cmd)[0] != 'Fail':
self.mac = address64
return True
else:
return False
except Exception, e:
ModuleHelper.WriteIntoDebugLogger('setMAC() Error: ' + str(e))
|
set the extended addresss of Thread device
Args:
xEUI: extended address in hex format
Returns:
True: successful to set the extended address
False: fail to set the extended address
|
def _new_song(self):
'''
Used internally to get a metasong index.
'''
# We'll need this later
s = self.song
if self.shuffle:
# If shuffle is on, we need to (1) get a random song that
# (2) accounts for weighting. This line does both.
self.song = self.shuffles[random.randrange(len(self.shuffles))]
else:
# Nice and easy, just get the next song...
self.song += 1
# But wait! need to make sure it exists!
if self.song >= len(self.loop):
# It doesn't, so start over at the beginning.
self.song = 0
# Set flag if we have the same song as we had before.
self.dif_song = s != self.song
# Reset the position within the metasong
self.pos = 0
|
Used internally to get a metasong index.
|
def xml(self, url, method='get', params=None, data=None):
"""
请求并返回xml
:type url: str
:param url: API
:type method: str
:param method: HTTP METHOD
:type params: dict
:param params: query
:type data: dict
:param data: body
:rtype: html.HtmlElement
:return:
"""
r = self.req(url, method, params, data)
# this is required for avoid utf8-mb4 lead to encoding error
return self.to_xml(r.content, base_url=r.url)
|
请求并返回xml
:type url: str
:param url: API
:type method: str
:param method: HTTP METHOD
:type params: dict
:param params: query
:type data: dict
:param data: body
:rtype: html.HtmlElement
:return:
|
def complete_xml_element(self, xmlnode, doc):
"""Complete the XML node with `self` content.
:Parameters:
- `xmlnode`: XML node with the element being built. It has already
right name and namespace, but no attributes or content.
- `doc`: document to which the element belongs.
:Types:
- `xmlnode`: `libxml2.xmlNode`
- `doc`: `libxml2.xmlDoc`"""
ns = xmlnode.ns()
if self.instructions is not None:
xmlnode.newTextChild(ns, "instructions", to_utf8(self.instructions))
if self.form:
self.form.as_xml(xmlnode, doc)
if self.remove:
xmlnode.newChild(ns, "remove", None)
else:
if self.registered:
xmlnode.newChild(ns, "registered", None)
for field in legacy_fields:
value = getattr(self, field)
if value is not None:
xmlnode.newTextChild(ns, field, to_utf8(value))
|
Complete the XML node with `self` content.
:Parameters:
- `xmlnode`: XML node with the element being built. It has already
right name and namespace, but no attributes or content.
- `doc`: document to which the element belongs.
:Types:
- `xmlnode`: `libxml2.xmlNode`
- `doc`: `libxml2.xmlDoc`
|
def __get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any],
children_on_fs: Dict[str, PersistedObject], logger: Logger) \
-> Dict[str, Any]:
"""
Simply inspects the required type to find the names and types of its constructor arguments.
Then relies on the inner ParserFinder to parse each of them.
:param obj_on_fs:
:param desired_type:
:param children_on_fs:
:param logger:
:return:
"""
# -- (a) collect pep-484 information in the class constructor to be able to understand what is required
constructor_args_types_and_opt = get_constructor_attributes_types(desired_type)
# -- (b) plan to parse each attribute required by the constructor
children_plan = dict() # results will be put in this object
# --use sorting in order to lead to reproducible results in case of multiple errors
for attribute_name, att_desc in sorted(constructor_args_types_and_opt.items()):
attribute_is_mandatory = att_desc[1]
attribute_type = att_desc[0]
# get the child
if attribute_name in children_on_fs.keys():
child_on_fs = children_on_fs[attribute_name]
# find a parser
t, parser_found = self.parser_finder.build_parser_for_fileobject_and_desiredtype(child_on_fs,
attribute_type,
logger=logger)
# create a parsing plan
children_plan[attribute_name] = parser_found.create_parsing_plan(t, child_on_fs,
logger=logger, _main_call=False)
else:
if attribute_is_mandatory:
raise MissingMandatoryAttributeFiles.create(obj_on_fs, desired_type, attribute_name)
else:
# we don't care : optional attribute
# dont use warning since it does not show up nicely
msg = 'NOT FOUND - This optional constructor attribute for type ' \
+ get_pretty_type_str(desired_type) + ' was not found on file system, but this may be normal'\
' - this message is displayed \'just in case\'.'
if logger.isEnabledFor(DEBUG):
logger.warning('(B) ' + obj_on_fs.get_pretty_child_location(attribute_name,
blank_parent_part=True) + ': '
+ msg)
else:
logger.warning('WARNING parsing [{loc}] as a [{typ}]: optional constructor attribute [{att}] '
'not found on file system. This may be normal - this message is displayed \'just'
' in case\'.'.format(
loc=obj_on_fs.get_pretty_location(blank_parent_part=False, append_file_ext=False),
typ=get_pretty_type_str(desired_type),
att=attribute_name))
return children_plan
|
Simply inspects the required type to find the names and types of its constructor arguments.
Then relies on the inner ParserFinder to parse each of them.
:param obj_on_fs:
:param desired_type:
:param children_on_fs:
:param logger:
:return:
|
def anomalyGetLabels(self, start, end):
"""
Get labels from the anomaly classifier within this model.
:param start: (int) index to start getting labels
:param end: (int) index to end getting labels
"""
return self._getAnomalyClassifier().getSelf().getLabels(start, end)
|
Get labels from the anomaly classifier within this model.
:param start: (int) index to start getting labels
:param end: (int) index to end getting labels
|
def get_aux_files(basename):
"""
Look for and return all the aux files that are associated witht this filename.
Will look for:
background (_bkg.fits)
rms (_rms.fits)
mask (.mim)
catalogue (_comp.fits)
psf map (_psf.fits)
will return filenames if they exist, or None where they do not.
Parameters
----------
basename : str
The name/path of the input image.
Returns
-------
aux : dict
Dict of filenames or None with keys (bkg, rms, mask, cat, psf)
"""
base = os.path.splitext(basename)[0]
files = {"bkg": base + "_bkg.fits",
"rms": base + "_rms.fits",
"mask": base + ".mim",
"cat": base + "_comp.fits",
"psf": base + "_psf.fits"}
for k in files.keys():
if not os.path.exists(files[k]):
files[k] = None
return files
|
Look for and return all the aux files that are associated witht this filename.
Will look for:
background (_bkg.fits)
rms (_rms.fits)
mask (.mim)
catalogue (_comp.fits)
psf map (_psf.fits)
will return filenames if they exist, or None where they do not.
Parameters
----------
basename : str
The name/path of the input image.
Returns
-------
aux : dict
Dict of filenames or None with keys (bkg, rms, mask, cat, psf)
|
def cmd(str, print_ret=False, usr_pwd=None, run=True):
"""
Executes a command and throws an exception on error.
in:
str - command
print_ret - print command return
usr_pwd - execute command as another user (user_name, password)
run - really execute command?
out:
returns the command output
"""
if usr_pwd:
str = 'echo {} | sudo -u {} {} '.format(usr_pwd[1], usr_pwd[0], str)
print(' [>] {}'.format(str))
if run:
err, ret = commands.getstatusoutput(str)
else:
err = None
ret = None
if err:
print(' [x] {}'.format(ret))
raise Exception(ret)
if ret and print_ret:
lines = ret.split('\n')
for line in lines:
print(' [<] {}'.format(line))
return ret
|
Executes a command and throws an exception on error.
in:
str - command
print_ret - print command return
usr_pwd - execute command as another user (user_name, password)
run - really execute command?
out:
returns the command output
|
def _wait(self, objects, attr, value, wait_interval=None, wait_time=None):
r"""
Calls the ``fetch`` method of each object in ``objects`` periodically
until the ``attr`` attribute of each one equals ``value``, yielding the
final state of each object as soon as it satisfies the condition.
If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing any
remaining in-progress objects) is raised.
If a `KeyboardInterrupt` is caught, any remaining objects are returned
immediately without waiting for completion.
.. versionchanged:: 0.2.0
Raises `WaitTimeoutError` on timeout
:param iterable objects: an iterable of `Resource`\ s with ``fetch``
methods
:param string attr: the attribute to watch
:param value: the value of ``attr`` to wait for
:param number wait_interval: how many seconds to sleep between
requests; defaults to :attr:`wait_interval` if not specified or
`None`
:param number wait_time: the total number of seconds after which the
method will raise an error if any objects have not yet completed,
or a negative number to wait indefinitely; defaults to
:attr:`wait_time` if not specified or `None`
:rtype: generator
:raises DOAPIError: if the API endpoint replies with an error
:raises WaitTimeoutError: if ``wait_time`` is exceeded
"""
objects = list(objects)
if not objects:
return
if wait_interval is None:
wait_interval = self.wait_interval
if wait_time < 0:
end_time = None
else:
if wait_time is None:
wait_time = self.wait_time
if wait_time is None or wait_time < 0:
end_time = None
else:
end_time = time() + wait_time
while end_time is None or time() < end_time:
loop_start = time()
next_objs = []
for o in objects:
obj = o.fetch()
if getattr(obj, attr, None) == value:
yield obj
else:
next_objs.append(obj)
objects = next_objs
if not objects:
break
loop_end = time()
time_left = wait_interval - (loop_end - loop_start)
if end_time is not None:
time_left = min(time_left, end_time - loop_end)
if time_left > 0:
try:
sleep(time_left)
except KeyboardInterrupt:
for o in objects:
yield o
return
if objects:
raise WaitTimeoutError(objects, attr, value, wait_interval,
wait_time)
|
r"""
Calls the ``fetch`` method of each object in ``objects`` periodically
until the ``attr`` attribute of each one equals ``value``, yielding the
final state of each object as soon as it satisfies the condition.
If ``wait_time`` is exceeded, a `WaitTimeoutError` (containing any
remaining in-progress objects) is raised.
If a `KeyboardInterrupt` is caught, any remaining objects are returned
immediately without waiting for completion.
.. versionchanged:: 0.2.0
Raises `WaitTimeoutError` on timeout
:param iterable objects: an iterable of `Resource`\ s with ``fetch``
methods
:param string attr: the attribute to watch
:param value: the value of ``attr`` to wait for
:param number wait_interval: how many seconds to sleep between
requests; defaults to :attr:`wait_interval` if not specified or
`None`
:param number wait_time: the total number of seconds after which the
method will raise an error if any objects have not yet completed,
or a negative number to wait indefinitely; defaults to
:attr:`wait_time` if not specified or `None`
:rtype: generator
:raises DOAPIError: if the API endpoint replies with an error
:raises WaitTimeoutError: if ``wait_time`` is exceeded
|
def next(self):
"""
Return the next available item. If there are no more items in the
local 'results' list, check if there is a 'next_uri' value. If so,
use that to get the next page of results from the API, and return
the first item from that query.
"""
try:
return self.results.pop(0)
except IndexError:
if self.next_uri is None:
raise StopIteration()
else:
if not self.next_uri:
self.results = self.list_method(marker=self.marker,
limit=self.limit, prefix=self.prefix)
else:
args = self.extra_args
self.results = self._list_method(self.next_uri, *args)
if self.results:
last_res = self.results[-1]
self.marker = getattr(last_res, self.marker_att)
# We should have more results.
try:
return self.results.pop(0)
except IndexError:
raise StopIteration()
|
Return the next available item. If there are no more items in the
local 'results' list, check if there is a 'next_uri' value. If so,
use that to get the next page of results from the API, and return
the first item from that query.
|
def remove_xattr(self, path, xattr_name, **kwargs):
"""Remove an xattr of a file or directory."""
kwargs['xattr.name'] = xattr_name
response = self._put(path, 'REMOVEXATTR', **kwargs)
assert not response.content
|
Remove an xattr of a file or directory.
|
def add_val(self, val):
"""add value in form of dict"""
if not isinstance(val, type({})):
raise ValueError(type({}))
self.read()
self.config.update(val)
self.save()
|
add value in form of dict
|
def _can_parse(self, content_type):
'''Whether this navigator can parse the given content-type.
Checks that the content_type matches one of the types specified
in the 'Accept' header of the request, if supplied.
If not supplied, matches against the default'''
content_type, content_subtype, content_param = utils.parse_media_type(content_type)
for accepted in self.headers.get('Accept', self.DEFAULT_CONTENT_TYPE).split(','):
type, subtype, param = utils.parse_media_type(accepted)
# if either accepted_type or content_type do not
# contain a parameter section, then it will be
# optimistically ignored
matched = (type == content_type) \
and (subtype == content_subtype) \
and (param == content_param or not (param and content_param))
if matched:
return True
return False
|
Whether this navigator can parse the given content-type.
Checks that the content_type matches one of the types specified
in the 'Accept' header of the request, if supplied.
If not supplied, matches against the default
|
def get_value_with_source(self, layer=None):
"""Returns a tuple of the value's source and the value at the specified
layer. If no layer is specified then the outer layer is used.
Parameters
----------
layer : str
Name of the layer to use. If None then the outermost where the value
exists will be used.
Raises
------
KeyError
If the value is not set for the specified layer
"""
if layer:
return self._values[layer]
for layer in reversed(self._layers):
if layer in self._values:
return self._values[layer]
raise KeyError(layer)
|
Returns a tuple of the value's source and the value at the specified
layer. If no layer is specified then the outer layer is used.
Parameters
----------
layer : str
Name of the layer to use. If None then the outermost where the value
exists will be used.
Raises
------
KeyError
If the value is not set for the specified layer
|
def info(self, *args, **kwargs):
"""Logs the line of the current thread owns the underlying lock, or
blocks."""
self.lock()
try:
return logger.info(*args, **kwargs)
finally:
self.unlock()
|
Logs the line of the current thread owns the underlying lock, or
blocks.
|
def group_by(resources, key):
"""Return a mapping of key value to resources with the corresponding value.
Key may be specified as dotted form for nested dictionary lookup
"""
resource_map = {}
parts = key.split('.')
for r in resources:
v = r
for k in parts:
v = v.get(k)
if not isinstance(v, dict):
break
resource_map.setdefault(v, []).append(r)
return resource_map
|
Return a mapping of key value to resources with the corresponding value.
Key may be specified as dotted form for nested dictionary lookup
|
def build(self):
"""Builds the `HelicalHelix`."""
helical_helix = Polypeptide()
primitive_coords = self.curve_primitive.coordinates
helices = [Helix.from_start_and_end(start=primitive_coords[i],
end=primitive_coords[i + 1],
helix_type=self.minor_helix_type,
aa=1)
for i in range(len(primitive_coords) - 1)]
residues_per_turn = self.minor_residues_per_turn(
minor_repeat=self.minor_repeat)
if residues_per_turn == 0:
residues_per_turn = _helix_parameters[self.minor_helix_type][0]
if self.minor_handedness == 'l':
residues_per_turn *= -1
# initial phi_c_alpha value calculated using the first Helix in helices.
if self.orientation != -1:
initial_angle = dihedral(numpy.array([0, 0, 0]),
primitive_coords[0],
primitive_coords[1],
helices[0][0]['CA'])
else:
initial_angle = dihedral(
numpy.array([0, 0, primitive_coords[0][2]]),
primitive_coords[0],
numpy.array([primitive_coords[0][0],
primitive_coords[0][1], primitive_coords[1][2]]),
helices[0][0]['CA'])
# angle required to achieve desired phi_c_alpha value of self.phi_c_alpha.
addition_angle = self.phi_c_alpha - initial_angle
for i, h in enumerate(helices):
angle = (i * (360.0 / residues_per_turn)) + addition_angle
h.rotate(angle=angle, axis=h.axis.unit_tangent,
point=h.helix_start)
helical_helix.extend(h)
helical_helix.relabel_all()
self._monomers = helical_helix._monomers[:]
for monomer in self._monomers:
monomer.ampal_parent = self
return
|
Builds the `HelicalHelix`.
|
def all_subslices(itr):
""" generates every possible slice that can be generated from an iterable """
assert iterable(itr), 'generators.all_subslices only accepts iterable arguments, not {}'.format(itr)
if not hasattr(itr, '__len__'): # if itr isnt materialized, make it a deque
itr = deque(itr)
len_itr = len(itr)
for start,_ in enumerate(itr):
d = deque()
for i in islice(itr, start, len_itr): # how many slices for this round
d.append(i)
yield tuple(d)
|
generates every possible slice that can be generated from an iterable
|
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
|
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
|
def decode(input, fallback_encoding, errors='replace'):
"""
Decode a single string.
:param input: A byte string
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:return:
A ``(output, encoding)`` tuple of an Unicode string
and an :obj:`Encoding`.
"""
# Fail early if `encoding` is an invalid label.
fallback_encoding = _get_encoding(fallback_encoding)
bom_encoding, input = _detect_bom(input)
encoding = bom_encoding or fallback_encoding
return encoding.codec_info.decode(input, errors)[0], encoding
|
Decode a single string.
:param input: A byte string
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:return:
A ``(output, encoding)`` tuple of an Unicode string
and an :obj:`Encoding`.
|
def _process_event(self, event):
""" Extend event object with User and Channel objects """
if event.get('user'):
event.user = self.lookup_user(event.get('user'))
if event.get('channel'):
event.channel = self.lookup_channel(event.get('channel'))
if self.user.id in event.mentions:
event.mentions_me = True
event.mentions = [ self.lookup_user(uid) for uid in event.mentions ]
return event
|
Extend event object with User and Channel objects
|
def merge_wavelengths(waveset1, waveset2, threshold=1e-12):
"""Return the union of the two sets of wavelengths using
:func:`numpy.union1d`.
The merged wavelengths may sometimes contain numbers which are nearly
equal but differ at levels as small as 1e-14. Having values this
close together can cause problems down the line. So, here we test
whether any such small differences are present, with a small
difference defined as less than ``threshold``. If a small
difference is present, the lower of the too-close pair is removed.
Parameters
----------
waveset1, waveset2 : array-like or `None`
Wavelength values, assumed to be in the same unit already.
Also see :func:`~synphot.models.get_waveset`.
threshold : float, optional
Merged wavelength values are considered "too close together"
when the difference is smaller than this number.
The default is 1e-12.
Returns
-------
out_wavelengths : array-like or `None`
Merged wavelengths. `None` if undefined.
"""
if waveset1 is None and waveset2 is None:
out_wavelengths = None
elif waveset1 is not None and waveset2 is None:
out_wavelengths = waveset1
elif waveset1 is None and waveset2 is not None:
out_wavelengths = waveset2
else:
out_wavelengths = np.union1d(waveset1, waveset2)
delta = out_wavelengths[1:] - out_wavelengths[:-1]
i_good = np.where(delta > threshold)
# Remove "too close together" duplicates
if len(i_good[0]) < delta.size:
out_wavelengths = np.append(
out_wavelengths[i_good], out_wavelengths[-1])
return out_wavelengths
|
Return the union of the two sets of wavelengths using
:func:`numpy.union1d`.
The merged wavelengths may sometimes contain numbers which are nearly
equal but differ at levels as small as 1e-14. Having values this
close together can cause problems down the line. So, here we test
whether any such small differences are present, with a small
difference defined as less than ``threshold``. If a small
difference is present, the lower of the too-close pair is removed.
Parameters
----------
waveset1, waveset2 : array-like or `None`
Wavelength values, assumed to be in the same unit already.
Also see :func:`~synphot.models.get_waveset`.
threshold : float, optional
Merged wavelength values are considered "too close together"
when the difference is smaller than this number.
The default is 1e-12.
Returns
-------
out_wavelengths : array-like or `None`
Merged wavelengths. `None` if undefined.
|
def update(self):
"""
Connect to GitHub API endpoint specified by `_apicall_parameters()`,
postprocess the result using `_apiresult_postprocess()` and trigger
a cache update if the API call was successful.
If an error occurs, cache the empty result generated by
`_apiresult_error()`. Additionally, set up retrying after a certain
time.
Return `True` if the API call was successful, `False` otherwise.
Call this method directly if you want to invalidate the current cache.
Otherwise, just call `data()`, which will automatically call `update()`
if required.
"""
result = self.api.github_api(*self._apicall_parameters())
if result is None:
# an error occurred, try again after BACKOFF
self._next_update = datetime.now() + timedelta(seconds=self.BACKOFF)
# assume an empty result until the error disappears
self._cached_result = self._apiresult_error()
else:
# request successful, cache does not expire
self._next_update = None
# Write the new result into self._cached_result to be picked up by
# _data on `del self._data`.
self._cached_result = self._apiresult_postprocess(result)
# Don't `del self._data` if it has never been cached, that would create
# ugly database entries in the cache table.
if not self._first_lookup:
del self._data
else:
self._first_lookup = False
# signal success or error
return result is not None
|
Connect to GitHub API endpoint specified by `_apicall_parameters()`,
postprocess the result using `_apiresult_postprocess()` and trigger
a cache update if the API call was successful.
If an error occurs, cache the empty result generated by
`_apiresult_error()`. Additionally, set up retrying after a certain
time.
Return `True` if the API call was successful, `False` otherwise.
Call this method directly if you want to invalidate the current cache.
Otherwise, just call `data()`, which will automatically call `update()`
if required.
|
def click(self, focus=None, sleep_interval=None):
"""
Perform the click action on the UI element(s) represented by the UI proxy. If this UI proxy represents a set of
UI elements, the first one in the set is clicked and the anchor point of the UI element is used as the default
one. It is also possible to click another point offset by providing ``focus`` argument.
See ``CoordinateSystem`` for more details.
Args:
focus (2-:obj:`tuple`/2-:obj:`list`/:obj:`str`): an offset point (x, y) from the top left corner of the UI
element(s), values must be in range of 0~1. This argument can be also specified by 'anchor' or 'center'.
'Center' means to click the center of bounding box of UI element.
sleep_interval: number of seconds to wait after this action. Default is None which is the default sleep
interval. This value can be configured by Poco initialization. See configuration at poco
:py:class:`initialization <poco.pocofw.Poco>` for more details.
Raises:
PocoNoSuchNodeException: raised when the UI element does not exist
"""
focus = focus or self._focus or 'anchor'
pos_in_percentage = self.get_position(focus)
self.poco.pre_action('click', self, pos_in_percentage)
ret = self.poco.click(pos_in_percentage)
if sleep_interval:
time.sleep(sleep_interval)
else:
self.poco.wait_stable()
self.poco.post_action('click', self, pos_in_percentage)
return ret
|
Perform the click action on the UI element(s) represented by the UI proxy. If this UI proxy represents a set of
UI elements, the first one in the set is clicked and the anchor point of the UI element is used as the default
one. It is also possible to click another point offset by providing ``focus`` argument.
See ``CoordinateSystem`` for more details.
Args:
focus (2-:obj:`tuple`/2-:obj:`list`/:obj:`str`): an offset point (x, y) from the top left corner of the UI
element(s), values must be in range of 0~1. This argument can be also specified by 'anchor' or 'center'.
'Center' means to click the center of bounding box of UI element.
sleep_interval: number of seconds to wait after this action. Default is None which is the default sleep
interval. This value can be configured by Poco initialization. See configuration at poco
:py:class:`initialization <poco.pocofw.Poco>` for more details.
Raises:
PocoNoSuchNodeException: raised when the UI element does not exist
|
def register_message_handler(self, callback, *custom_filters, commands=None, regexp=None, content_types=None,
state=None, run_task=None, **kwargs):
"""
Register handler for message
.. code-block:: python3
# This handler works only if state is None (by default).
dp.register_message_handler(cmd_start, commands=['start', 'about'])
dp.register_message_handler(entry_point, commands=['setup'])
# This handler works only if current state is "first_step"
dp.register_message_handler(step_handler_1, state="first_step")
# If you want to handle all states by one handler, use `state="*"`.
dp.register_message_handler(cancel_handler, commands=['cancel'], state="*")
dp.register_message_handler(cancel_handler, lambda msg: msg.text.lower() == 'cancel', state="*")
:param callback:
:param commands: list of commands
:param regexp: REGEXP
:param content_types: List of content types.
:param custom_filters: list of custom filters
:param kwargs:
:param state:
:return: decorated function
"""
filters_set = self.filters_factory.resolve(self.message_handlers,
*custom_filters,
commands=commands,
regexp=regexp,
content_types=content_types,
state=state,
**kwargs)
self.message_handlers.register(self._wrap_async_task(callback, run_task), filters_set)
|
Register handler for message
.. code-block:: python3
# This handler works only if state is None (by default).
dp.register_message_handler(cmd_start, commands=['start', 'about'])
dp.register_message_handler(entry_point, commands=['setup'])
# This handler works only if current state is "first_step"
dp.register_message_handler(step_handler_1, state="first_step")
# If you want to handle all states by one handler, use `state="*"`.
dp.register_message_handler(cancel_handler, commands=['cancel'], state="*")
dp.register_message_handler(cancel_handler, lambda msg: msg.text.lower() == 'cancel', state="*")
:param callback:
:param commands: list of commands
:param regexp: REGEXP
:param content_types: List of content types.
:param custom_filters: list of custom filters
:param kwargs:
:param state:
:return: decorated function
|
def get_class(kls):
"""
:param kls - string of fully identified starter function or starter method path
for instance:
- workers.abstract_worker.AbstractWorker.start
- workers.example_script_worker.main
:return tuple (type, object, starter)
for instance:
- (FunctionType, <function_main>, None)
- (type, <Class_...>, 'start')
"""
parts = kls.split('.')
try:
# First, try to import module hosting starter function
module = '.'.join(parts[:-1])
m = __import__(module)
except ImportError:
# Alternatively, try to import module hosting Class with a starter method
module = '.'.join(parts[:-2])
m = __import__(module)
t = None
starter = None
for i in range(1, len(parts)):
comp = parts[i]
starter = parts[i:]
m = getattr(m, comp)
if isinstance(m, class_types):
t = type
starter = None if len(parts[i:]) == 1 else '.'.join(parts[i + 1:])
break
if isinstance(m, types.FunctionType):
t = types.FunctionType
starter = None
break
return t, m, starter
|
:param kls - string of fully identified starter function or starter method path
for instance:
- workers.abstract_worker.AbstractWorker.start
- workers.example_script_worker.main
:return tuple (type, object, starter)
for instance:
- (FunctionType, <function_main>, None)
- (type, <Class_...>, 'start')
|
def getCocktailSum(e0, e1, eCocktail, uCocktail):
"""get the cocktail sum for a given data bin range"""
# get mask and according indices
mask = (eCocktail >= e0) & (eCocktail <= e1)
# data bin range wider than single cocktail bin
if np.any(mask):
idx = getMaskIndices(mask)
# determine coinciding flags
eCl, eCu = eCocktail[idx[0]], eCocktail[idx[1]]
not_coinc_low, not_coinc_upp = (eCl != e0), (eCu != e1)
# get cocktail sum in data bin (always w/o last bin)
uCocktailSum = fsum(uCocktail[mask[:-1]][:-1])
logging.debug(' sum: {}'.format(uCocktailSum))
# get correction for non-coinciding edges
if not_coinc_low:
eCl_bw = eCl - eCocktail[idx[0]-1]
corr_low = (eCl - e0) / eCl_bw
abs_corr_low = float(corr_low) * uCocktail[idx[0]-1]
uCocktailSum += abs_corr_low
logging.debug((' low: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e0, eCl, eCl - e0, eCl_bw, corr_low
)).format(abs_corr_low, uCocktailSum))
if not_coinc_upp:
if idx[1]+1 < len(eCocktail):
eCu_bw = eCocktail[idx[1]+1] - eCu
corr_upp = (e1 - eCu) / eCu_bw
abs_corr_upp = float(corr_upp) * uCocktail[idx[1]]
else:# catch last index (quick fix!)
abs_corr_upp = eCu_bw = corr_upp = 0
uCocktailSum += abs_corr_upp
logging.debug((' upp: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e1, eCu, e1 - eCu, eCu_bw, corr_upp
)).format(abs_corr_upp, uCocktailSum))
else:
mask = (eCocktail >= e0)
idx = getMaskIndices(mask) # only use first index
# catch if already at last index
if idx[0] == idx[1] and idx[0] == len(eCocktail)-1:
corr = (e1 - e0) / (eCocktail[idx[0]] - eCocktail[idx[0]-1])
uCocktailSum = float(corr) * uCocktail[idx[0]-1]
else: # default case
corr = (e1 - e0) / (eCocktail[idx[0]+1] - eCocktail[idx[0]])
uCocktailSum = float(corr) * uCocktail[idx[0]]
logging.debug(' sum: {}'.format(uCocktailSum))
return uCocktailSum
|
get the cocktail sum for a given data bin range
|
def services(self):
""" returns the services in the current folder """
self._services = []
params = {
"f" : "json"
}
json_dict = self._get(url=self._currentURL,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
if "services" in json_dict.keys():
for s in json_dict['services']:
uURL = self._currentURL + "/%s.%s" % (s['serviceName'], s['type'])
self._services.append(
AGSService(url=uURL,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
)
return self._services
|
returns the services in the current folder
|
def find_donor_catchments(self, include_subject_catchment='auto'):
"""
Find list of suitable donor cachments, ranked by hydrological similarity distance measure. This method is
implicitly called when calling the :meth:`.growth_curve` method unless the attribute :attr:`.donor_catchments`
is set manually.
The results are stored in :attr:`.donor_catchments`. The (list of)
:class:`floodestimation.entities.Catchment` will have an additional attribute :attr:`similarity_dist`.
:param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext2000
< 0.03
- `force`: always include subject catchment
- `exclude`: do not include the subject catchment
:type include_subject_catchment: str
"""
# Only if we have access to db with gauged catchment data
if self.gauged_cachments:
self.donor_catchments = self.gauged_cachments. \
most_similar_catchments(subject_catchment=self.catchment,
similarity_dist_function=lambda c1, c2: self._similarity_distance(c1, c2),
include_subject_catchment=include_subject_catchment)
else:
self.donor_catchments = []
|
Find list of suitable donor cachments, ranked by hydrological similarity distance measure. This method is
implicitly called when calling the :meth:`.growth_curve` method unless the attribute :attr:`.donor_catchments`
is set manually.
The results are stored in :attr:`.donor_catchments`. The (list of)
:class:`floodestimation.entities.Catchment` will have an additional attribute :attr:`similarity_dist`.
:param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext2000
< 0.03
- `force`: always include subject catchment
- `exclude`: do not include the subject catchment
:type include_subject_catchment: str
|
def check_bcr_catchup(self):
"""we're exceeding data request speed vs receive + process"""
logger.debug(f"Checking if BlockRequests has caught up {len(BC.Default().BlockRequests)}")
# test, perhaps there's some race condition between slow startup and throttle sync, otherwise blocks will never go down
for peer in self.Peers: # type: NeoNode
peer.stop_block_loop(cancel=False)
peer.stop_peerinfo_loop(cancel=False)
peer.stop_header_loop(cancel=False)
if len(BC.Default().BlockRequests) > 0:
for peer in self.Peers:
peer.keep_alive()
peer.health_check(HEARTBEAT_BLOCKS)
peer_bcr_len = len(peer.myblockrequests)
# if a peer has cleared its queue then reset heartbeat status to avoid timing out when resuming from "check_bcr" if there's 1 or more really slow peer(s)
if peer_bcr_len == 0:
peer.start_outstanding_data_request[HEARTBEAT_BLOCKS] = 0
print(f"{peer.prefix} request count: {peer_bcr_len}")
if peer_bcr_len == 1:
next_hash = BC.Default().GetHeaderHash(self.CurrentBlockheight + 1)
print(f"{peer.prefix} {peer.myblockrequests} {next_hash}")
else:
# we're done catching up. Stop own loop and restart peers
self.stop_check_bcr_loop()
self.check_bcr_loop = None
logger.debug("BlockRequests have caught up...resuming sync")
for peer in self.Peers:
peer.ProtocolReady() # this starts all loops again
# give a little bit of time between startup of peers
time.sleep(2)
|
we're exceeding data request speed vs receive + process
|
def _sleep(self, seconds):
"""
Sleep between requests, but don't force asynchronous code to wait
:param seconds: The number of seconds to sleep
:return: None
"""
for _ in range(int(seconds)):
if not self.force_stop:
sleep(1)
|
Sleep between requests, but don't force asynchronous code to wait
:param seconds: The number of seconds to sleep
:return: None
|
def background_at_centroid(self):
"""
The value of the ``background`` at the position of the source
centroid.
The background value at fractional position values are
determined using bilinear interpolation.
"""
from scipy.ndimage import map_coordinates
if self._background is not None:
# centroid can still be NaN if all data values are <= 0
if (self._is_completely_masked or
np.any(~np.isfinite(self.centroid))):
return np.nan * self._background_unit # unit for table
else:
value = map_coordinates(self._background,
[[self.ycentroid.value],
[self.xcentroid.value]], order=1,
mode='nearest')[0]
return value * self._background_unit
else:
return None
|
The value of the ``background`` at the position of the source
centroid.
The background value at fractional position values are
determined using bilinear interpolation.
|
def do_lzop_get(creds, url, path, decrypt, do_retry=True):
"""
Get and decompress a S3 URL
This streams the content directly to lzop; the compressed version
is never stored on disk.
"""
assert url.endswith('.lzo'), 'Expect an lzop-compressed file'
def log_wal_fetch_failures_on_error(exc_tup, exc_processor_cxt):
def standard_detail_message(prefix=''):
return (prefix + ' There have been {n} attempts to fetch wal '
'file {url} so far.'.format(n=exc_processor_cxt, url=url))
typ, value, tb = exc_tup
del exc_tup
# Screen for certain kinds of known-errors to retry from
if issubclass(typ, socket.error):
socketmsg = value[1] if isinstance(value, tuple) else value
logger.info(
msg='Retrying fetch because of a socket error',
detail=standard_detail_message(
"The socket error's message is '{0}'."
.format(socketmsg)))
elif (issubclass(typ, boto.exception.S3ResponseError) and
value.error_code == 'RequestTimeTooSkewed'):
logger.info(msg='Retrying fetch because of a Request Skew time',
detail=standard_detail_message())
else:
# For all otherwise untreated exceptions, report them as a
# warning and retry anyway -- all exceptions that can be
# justified should be treated and have error messages
# listed.
logger.warning(
msg='retrying WAL file fetch from unexpected exception',
detail=standard_detail_message(
'The exception type is {etype} and its value is '
'{evalue} and its traceback is {etraceback}'
.format(etype=typ, evalue=value,
etraceback=''.join(traceback.format_tb(tb)))))
# Help Python GC by resolving possible cycles
del tb
def download():
with files.DeleteOnError(path) as decomp_out:
key = _uri_to_key(creds, url)
with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl:
g = gevent.spawn(write_and_return_error, key, pl.stdin)
try:
# Raise any exceptions from write_and_return_error
exc = g.get()
if exc is not None:
raise exc
except boto.exception.S3ResponseError as e:
if e.status == 404:
# Do not retry if the key not present, this
# can happen under normal situations.
pl.abort()
logger.info(
msg=('could no longer locate object while '
'performing wal restore'),
detail=('The absolute URI that could not be '
'located is {url}.'.format(url=url)),
hint=('This can be normal when Postgres is trying '
'to detect what timelines are available '
'during restoration.'))
decomp_out.remove_regardless = True
return False
elif e.value.error_code == 'ExpiredToken':
# Do not retry if STS token has expired. It can never
# succeed in the future anyway.
pl.abort()
logger.info(
msg=('could no longer authenticate while '
'performing wal restore'),
detail=('The absolute URI that could not be '
'accessed is {url}.'.format(url=url)),
hint=('This can be normal when using STS '
'credentials.'))
decomp_out.remove_regardless = True
return False
else:
raise
logger.info(
msg='completed download and decompression',
detail='Downloaded and decompressed "{url}" to "{path}"'
.format(url=url, path=path))
return True
if do_retry:
download = retry(
retry_with_count(log_wal_fetch_failures_on_error))(download)
return download()
|
Get and decompress a S3 URL
This streams the content directly to lzop; the compressed version
is never stored on disk.
|
def render(self, progress, width=None, status=None):
"""Render the widget."""
results = [widget.render(progress, width=self._widget_lengths[i], status=status)
for i, widget in enumerate(self._widgets)]
if self._file_mode:
res = ""
for i, result in enumerate(results):
res += result.rendered
if result.length < self._widget_lengths[i] and progress < 1: break
res += " " if i < len(results) - 1 else ""
rendered_str = res[len(self._rendered):]
self._rendered = res
else:
rendered_str = " ".join(r.rendered for r in results)
if self._to_render:
rendered_str = self._to_render + rendered_str
self._to_render = None
next_progress = min(r.next_progress for r in results)
next_time = min(r.next_time for r in results)
return RenderResult(rendered_str, next_progress=next_progress, next_time=next_time)
|
Render the widget.
|
def p_andnode_expression(self, t):
'''andnode_expression : LB identlist RB '''
self.accu.add(Term('vertex', ["and(\""+t[2]+"\")"]))
t[0] = "and(\""+t[2]+"\")"
|
andnode_expression : LB identlist RB
|
def extract_number_oscillations(self, index, amplitude_threshold):
"""!
@brief Extracts number of oscillations of specified oscillator.
@param[in] index (uint): Index of oscillator whose dynamic is considered.
@param[in] amplitude_threshold (double): Amplitude threshold when oscillation is taken into account, for example,
when oscillator amplitude is greater than threshold then oscillation is incremented.
@return (uint) Number of oscillations of specified oscillator.
"""
return pyclustering.utils.extract_number_oscillations(self.__amplitude, index, amplitude_threshold);
|
!
@brief Extracts number of oscillations of specified oscillator.
@param[in] index (uint): Index of oscillator whose dynamic is considered.
@param[in] amplitude_threshold (double): Amplitude threshold when oscillation is taken into account, for example,
when oscillator amplitude is greater than threshold then oscillation is incremented.
@return (uint) Number of oscillations of specified oscillator.
|
def get_authoryear_from_entry(entry, paren=False):
"""Get and format author-year text from a pybtex entry to emulate
natbib citations.
Parameters
----------
entry : `pybtex.database.Entry`
A pybtex bibliography entry.
parens : `bool`, optional
Whether to add parentheses around the year. Default is `False`.
Returns
-------
authoryear : `str`
The author-year citation text.
"""
def _format_last(person):
"""Reformat a pybtex Person into a last name.
Joins all parts of a last name and strips "{}" wrappers.
"""
return ' '.join([n.strip('{}') for n in person.last_names])
if len(entry.persons['author']) > 0:
# Grab author list
persons = entry.persons['author']
elif len(entry.persons['editor']) > 0:
# Grab editor list
persons = entry.persons['editor']
else:
raise AuthorYearError
try:
year = entry.fields['year']
except KeyError:
raise AuthorYearError
if paren and len(persons) == 1:
template = '{author} ({year})'
return template.format(author=_format_last(persons[0]),
year=year)
elif not paren and len(persons) == 1:
template = '{author} {year}'
return template.format(author=_format_last(persons[0]),
year=year)
elif paren and len(persons) == 2:
template = '{author1} and {author2} ({year})'
return template.format(author1=_format_last(persons[0]),
author2=_format_last(persons[1]),
year=year)
elif not paren and len(persons) == 2:
template = '{author1} and {author2} {year}'
return template.format(author1=_format_last(persons[0]),
author2=_format_last(persons[1]),
year=year)
elif not paren and len(persons) > 2:
template = '{author} et al {year}'
return template.format(author=_format_last(persons[0]),
year=year)
elif paren and len(persons) > 2:
template = '{author} et al ({year})'
return template.format(author=_format_last(persons[0]),
year=year)
|
Get and format author-year text from a pybtex entry to emulate
natbib citations.
Parameters
----------
entry : `pybtex.database.Entry`
A pybtex bibliography entry.
parens : `bool`, optional
Whether to add parentheses around the year. Default is `False`.
Returns
-------
authoryear : `str`
The author-year citation text.
|
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
|
Returns whether an augmenter may be executed.
Returns
-------
bool
If True, the augmenter may be executed. If False, it may not be executed.
|
def OPTIONS(self, *args, **kwargs):
""" OPTIONS request """
return self._handle_api(self.API_OPTIONS, args, kwargs)
|
OPTIONS request
|
def get_remove_security_group_commands(self, sg_id, profile):
"""Commands for removing ACL from interface"""
return self._get_interface_commands(sg_id, profile, delete=True)
|
Commands for removing ACL from interface
|
def _get_hash(self, file_obj):
"""
Compute hash for the `file_obj`.
Attr:
file_obj (obj): File-like object with ``.write()`` and ``.seek()``.
Returns:
str: Hexdigest of the hash.
"""
size = 0
hash_buider = self.hash_builder()
for piece in self._get_file_iterator(file_obj):
hash_buider.update(piece)
size += len(piece)
file_obj.seek(0)
return "%s_%x" % (hash_buider.hexdigest(), size)
|
Compute hash for the `file_obj`.
Attr:
file_obj (obj): File-like object with ``.write()`` and ``.seek()``.
Returns:
str: Hexdigest of the hash.
|
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
SQLiteBlobPathSpec: a path specification.
Raises:
AccessError: if the access to list the directory was denied.
BackEndError: if the directory could not be listed.
"""
table_name = getattr(self.path_spec, 'table_name', None)
column_name = getattr(self.path_spec, 'column_name', None)
if table_name and column_name:
if self._number_of_entries is None:
# Open the first entry to determine how many entries we have.
# TODO: change this when there is a move this to a central temp file
# manager. https://github.com/log2timeline/dfvfs/issues/92
path_spec = sqlite_blob_path_spec.SQLiteBlobPathSpec(
table_name=table_name, column_name=column_name, row_index=0,
parent=self.path_spec.parent)
sub_file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
if not file_entry:
self._number_of_entries = 0
else:
self._number_of_entries = sub_file_entry.GetNumberOfRows()
for row_index in range(0, self._number_of_entries):
yield sqlite_blob_path_spec.SQLiteBlobPathSpec(
table_name=table_name, column_name=column_name, row_index=row_index,
parent=self.path_spec.parent)
|
Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
SQLiteBlobPathSpec: a path specification.
Raises:
AccessError: if the access to list the directory was denied.
BackEndError: if the directory could not be listed.
|
def autoargs(include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
f=DECORATED
):
"""
Defines a decorator with parameters, to automatically assign the inputs of a function to self PRIOR to executing
the function. In other words:
```
@autoargs
def myfunc(a):
print('hello')
```
will create the equivalent of
```
def myfunc(a):
self.a = a
print('hello')
```
Initial code from http://stackoverflow.com/questions/3652851/what-is-the-best-way-to-do-automatic-attribute-assignment-in-python-and-is-it-a#answer-3653049
:param include: a tuple of attribute names to include in the auto-assignment. If None, all arguments will be
included by default
:param exclude: a tuple of attribute names to exclude from the auto-assignment. In such case, include should be None
:return:
"""
return autoargs_decorate(f, include=include, exclude=exclude)
|
Defines a decorator with parameters, to automatically assign the inputs of a function to self PRIOR to executing
the function. In other words:
```
@autoargs
def myfunc(a):
print('hello')
```
will create the equivalent of
```
def myfunc(a):
self.a = a
print('hello')
```
Initial code from http://stackoverflow.com/questions/3652851/what-is-the-best-way-to-do-automatic-attribute-assignment-in-python-and-is-it-a#answer-3653049
:param include: a tuple of attribute names to include in the auto-assignment. If None, all arguments will be
included by default
:param exclude: a tuple of attribute names to exclude from the auto-assignment. In such case, include should be None
:return:
|
def deploy(remote, assets_to_s3):
""" To DEPLOY your application """
header("Deploying...")
if assets_to_s3:
for mod in get_deploy_assets2s3_list(CWD):
_assets2s3(mod)
remote_name = remote or "ALL"
print("Pushing application's content to remote: %s " % remote_name)
hosts = get_deploy_hosts_list(CWD, remote or None)
git_push_to_master(cwd=CWD, hosts=hosts, name=remote_name)
print("Done!")
|
To DEPLOY your application
|
def process_cli(log_level, mets, page_id, tasks):
"""
Process a series of tasks
"""
log = getLogger('ocrd.cli.process')
run_tasks(mets, log_level, page_id, tasks)
log.info("Finished")
|
Process a series of tasks
|
def setPololuProtocol(self):
"""
Set the pololu protocol.
"""
self._compact = False
self._log and self._log.debug("Pololu protocol has been set.")
|
Set the pololu protocol.
|
def getManagers(self):
"""Return all managers of responsible departments
"""
manager_ids = []
manager_list = []
for department in self.getDepartments():
manager = department.getManager()
if manager is None:
continue
manager_id = manager.getId()
if manager_id not in manager_ids:
manager_ids.append(manager_id)
manager_list.append(manager)
return manager_list
|
Return all managers of responsible departments
|
def _normalize(self, flags):
"""Take any format of flags and turn it into a hex string."""
norm = None
if isinstance(flags, MessageFlags):
norm = flags.bytes
elif isinstance(flags, bytearray):
norm = binascii.hexlify(flags)
elif isinstance(flags, int):
norm = bytes([flags])
elif isinstance(flags, bytes):
norm = binascii.hexlify(flags)
elif isinstance(flags, str):
flags = flags[0:2]
norm = binascii.hexlify(binascii.unhexlify(flags.lower()))
elif flags is None:
norm = None
else:
_LOGGER.warning('MessageFlags with unknown type %s: %r',
type(flags), flags)
return norm
|
Take any format of flags and turn it into a hex string.
|
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Routing(key)
if key not in Routing._member_map_:
extend_enum(Routing, key, default)
return Routing[key]
|
Backport support for original codes.
|
def process_result_value(self, value, dialect):
"""
SQLAlchemy uses this to convert a string into a SourceLocation object.
We separate the fields by a |
"""
if value is None:
return None
p = value.split("|")
if len(p) == 0:
return None
return SourceLocation(*map(int, p))
|
SQLAlchemy uses this to convert a string into a SourceLocation object.
We separate the fields by a |
|
def validate(name, # type: str
value, # type: Any
enforce_not_none=True, # type: bool
equals=None, # type: Any
instance_of=None, # type: Union[Type, Tuple[Type]]
subclass_of=None, # type: Union[Type, Tuple[Type]]
is_in=None, # type: Container
subset_of=None, # type: Set
contains = None, # type: Union[Any, Iterable]
superset_of=None, # type: Set
min_value=None, # type: Any
min_strict=False, # type: bool
max_value=None, # type: Any
max_strict=False, # type: bool
length=None, # type: int
min_len=None, # type: int
min_len_strict=False, # type: bool
max_len=None, # type: int
max_len_strict=False, # type: bool
custom=None, # type: Callable[[Any], Any]
error_type=None, # type: Type[ValidationError]
help_msg=None, # type: str
**kw_context_args):
"""
A validation function for quick inline validation of `value`, with minimal capabilities:
* None handling: reject None (enforce_not_none=True, default), or accept None silently (enforce_not_none=False)
* Type validation: `value` should be an instance of any of `var_types` if provided
* Value validation:
* if `allowed_values` is provided, `value` should be in that set
* if `min_value` (resp. `max_value`) is provided, `value` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_strict`, resp. `max_strict`, to `True`
* if `min_len` (resp. `max_len`) is provided, `len(value)` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_len_strict`, resp. `max_len_strict`, to `True`
:param name: the applicative name of the checked value, that will be used in error messages
:param value: the value to check
:param enforce_not_none: boolean, default True. Whether to enforce that `value` is not None.
:param equals: an optional value to enforce.
:param instance_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param subclass_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param is_in: an optional set of allowed values.
:param subset_of: an optional superset for the variable
:param contains: an optional value that the variable should contain (value in variable == True)
:param superset_of: an optional subset for the variable
:param min_value: an optional minimum value
:param min_strict: if True, only values strictly greater than `min_value` will be accepted
:param max_value: an optional maximum value
:param max_strict: if True, only values strictly lesser than `max_value` will be accepted
:param length: an optional strict length
:param min_len: an optional minimum length
:param min_len_strict: if True, only values with length strictly greater than `min_len` will be accepted
:param max_len: an optional maximum length
:param max_len_strict: if True, only values with length strictly lesser than `max_len` will be accepted
:param custom: a custom base validation function or list of base validation functions to use. This is the same
syntax than for valid8 decorators. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type),
or a list of several such elements. Nested lists are supported and indicate an implicit `and_`. Tuples indicate
an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be
used instead of callables, they will be transformed to functions automatically.
:param error_type: a subclass of `ValidationError` to raise in case of validation failure. By default a
`ValidationError` will be raised with the provided `help_msg`
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: nothing in case of success. Otherwise, raises a ValidationError
"""
# backwards compatibility
instance_of = instance_of or (kw_context_args.pop('allowed_types') if 'allowed_types' in kw_context_args else None)
is_in = is_in or (kw_context_args.pop('allowed_values') if 'allowed_values' in kw_context_args else None)
try:
# the following corresponds to an inline version of
# - _none_rejecter in base.py
# - gt/lt in comparables.py
# - is_in/contains/subset_of/superset_of/has_length/minlen/maxlen/is_in in collections.py
# - instance_of/subclass_of in types.py
# try (https://github.com/orf/inliner) to perform the inlining below automatically without code duplication ?
# > maybe not because quite dangerous (AST mod) and below we skip the "return True" everywhere for performance
#
# Another alternative: easy Cython compiling https://github.com/AlanCristhian/statically
# > but this is not py2 compliant
if value is None:
# inlined version of _none_rejecter in base.py
if enforce_not_none:
raise ValueIsNone(wrong_value=value)
# raise MissingMandatoryParameterException('Error, ' + name + '" is mandatory, it should be non-None')
# else do nothing and return
else:
if equals is not None:
if value != equals:
raise NotEqual(wrong_value=value, ref_value=equals)
if instance_of is not None:
assert_instance_of(value, instance_of)
if subclass_of is not None:
assert_subclass_of(value, subclass_of)
if is_in is not None:
# inlined version of is_in(allowed_values=allowed_values)(value) without 'return True'
if value not in is_in:
raise NotInAllowedValues(wrong_value=value, allowed_values=is_in)
if contains is not None:
# inlined version of contains(ref_value=contains)(value) without 'return True'
if contains not in value:
raise DoesNotContainValue(wrong_value=value, ref_value=contains)
if subset_of is not None:
# inlined version of is_subset(reference_set=subset_of)(value)
missing = value - subset_of
if len(missing) != 0:
raise NotSubset(wrong_value=value, reference_set=subset_of, unsupported=missing)
if superset_of is not None:
# inlined version of is_superset(reference_set=superset_of)(value)
missing = superset_of - value
if len(missing) != 0:
raise NotSuperset(wrong_value=value, reference_set=superset_of, missing=missing)
if min_value is not None:
# inlined version of gt(min_value=min_value, strict=min_strict)(value) without 'return True'
if min_strict:
if not value > min_value:
raise TooSmall(wrong_value=value, min_value=min_value, strict=True)
else:
if not value >= min_value:
raise TooSmall(wrong_value=value, min_value=min_value, strict=False)
if max_value is not None:
# inlined version of lt(max_value=max_value, strict=max_strict)(value) without 'return True'
if max_strict:
if not value < max_value:
raise TooBig(wrong_value=value, max_value=max_value, strict=True)
else:
if not value <= max_value:
raise TooBig(wrong_value=value, max_value=max_value, strict=False)
if length is not None:
# inlined version of has_length() without 'return True'
if len(value) != length:
raise WrongLength(wrong_value=value, ref_length=length)
if min_len is not None:
# inlined version of minlen(min_length=min_len, strict=min_len_strict)(value) without 'return True'
if min_len_strict:
if not len(value) > min_len:
raise TooShort(wrong_value=value, min_length=min_len, strict=True)
else:
if not len(value) >= min_len:
raise TooShort(wrong_value=value, min_length=min_len, strict=False)
if max_len is not None:
# inlined version of maxlen(max_length=max_len, strict=max_len_strict)(value) without 'return True'
if max_len_strict:
if not len(value) < max_len:
raise TooLong(wrong_value=value, max_length=max_len, strict=True)
else:
if not len(value) <= max_len:
raise TooLong(wrong_value=value, max_length=max_len, strict=False)
except Exception as e:
err = _QUICK_VALIDATOR._create_validation_error(name, value, validation_outcome=e, error_type=error_type,
help_msg=help_msg, **kw_context_args)
raise_(err)
if custom is not None:
# traditional custom validator
assert_valid(name, value, custom, error_type=error_type, help_msg=help_msg, **kw_context_args)
else:
# basic (and not enough) check to verify that there was no typo leading an argument to be put in kw_context_args
if error_type is None and help_msg is None and len(kw_context_args) > 0:
raise ValueError("Keyword context arguments have been provided but help_msg and error_type are not: {}"
"".format(kw_context_args))
|
A validation function for quick inline validation of `value`, with minimal capabilities:
* None handling: reject None (enforce_not_none=True, default), or accept None silently (enforce_not_none=False)
* Type validation: `value` should be an instance of any of `var_types` if provided
* Value validation:
* if `allowed_values` is provided, `value` should be in that set
* if `min_value` (resp. `max_value`) is provided, `value` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_strict`, resp. `max_strict`, to `True`
* if `min_len` (resp. `max_len`) is provided, `len(value)` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_len_strict`, resp. `max_len_strict`, to `True`
:param name: the applicative name of the checked value, that will be used in error messages
:param value: the value to check
:param enforce_not_none: boolean, default True. Whether to enforce that `value` is not None.
:param equals: an optional value to enforce.
:param instance_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param subclass_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param is_in: an optional set of allowed values.
:param subset_of: an optional superset for the variable
:param contains: an optional value that the variable should contain (value in variable == True)
:param superset_of: an optional subset for the variable
:param min_value: an optional minimum value
:param min_strict: if True, only values strictly greater than `min_value` will be accepted
:param max_value: an optional maximum value
:param max_strict: if True, only values strictly lesser than `max_value` will be accepted
:param length: an optional strict length
:param min_len: an optional minimum length
:param min_len_strict: if True, only values with length strictly greater than `min_len` will be accepted
:param max_len: an optional maximum length
:param max_len_strict: if True, only values with length strictly lesser than `max_len` will be accepted
:param custom: a custom base validation function or list of base validation functions to use. This is the same
syntax than for valid8 decorators. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type),
or a list of several such elements. Nested lists are supported and indicate an implicit `and_`. Tuples indicate
an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be
used instead of callables, they will be transformed to functions automatically.
:param error_type: a subclass of `ValidationError` to raise in case of validation failure. By default a
`ValidationError` will be raised with the provided `help_msg`
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: nothing in case of success. Otherwise, raises a ValidationError
|
def obj_to_md(self, file_path=None, title_columns=False,
quote_numbers=True):
"""
This will return a str of a mark down tables.
:param title_columns: bool if True will title all headers
:param file_path: str of the path to the file to write to
:param quote_numbers: bool if True will quote numbers that are strings
:return: str
"""
return self.obj_to_mark_down(file_path=file_path,
title_columns=title_columns,
quote_numbers=quote_numbers)
|
This will return a str of a mark down tables.
:param title_columns: bool if True will title all headers
:param file_path: str of the path to the file to write to
:param quote_numbers: bool if True will quote numbers that are strings
:return: str
|
def unwind(self, values, backend, **kwargs):
'''Unwind expression by applying *values* to the abstract nodes.
The ``kwargs`` dictionary can contain data which can be used
to override values
'''
if not hasattr(self, "_unwind_value"):
self._unwind_value = self._unwind(values, backend, **kwargs)
return self._unwind_value
|
Unwind expression by applying *values* to the abstract nodes.
The ``kwargs`` dictionary can contain data which can be used
to override values
|
def remove_intra(M, contigs):
"""Remove intrachromosomal contacts
Given a contact map and a list attributing each position
to a given chromosome, set all contacts within each
chromosome or contig to zero. Useful to perform
calculations on interchromosomal contacts only.
Parameters
----------
M : array_like
The initial contact map
contigs : list or array_like
A 1D array whose value at index i reflect the contig
label of the row i in the matrix M. The length of
the array must be equal to the (identical) shape
value of the matrix.
Returns
-------
N : numpy.ndarray
The output contact map with no intrachromosomal contacts
"""
N = np.copy(M)
n = len(N)
assert n == len(contigs)
# Naive implmentation for now
for (i, j) in itertools.product(range(n), range(n)):
if contigs[i] == contigs[j]:
N[i, j] = 0
return N
|
Remove intrachromosomal contacts
Given a contact map and a list attributing each position
to a given chromosome, set all contacts within each
chromosome or contig to zero. Useful to perform
calculations on interchromosomal contacts only.
Parameters
----------
M : array_like
The initial contact map
contigs : list or array_like
A 1D array whose value at index i reflect the contig
label of the row i in the matrix M. The length of
the array must be equal to the (identical) shape
value of the matrix.
Returns
-------
N : numpy.ndarray
The output contact map with no intrachromosomal contacts
|
def flags(self, index):
"""Set flags"""
return Qt.ItemFlags(QAbstractTableModel.flags(self, index) |
Qt.ItemIsEditable)
|
Set flags
|
def _ParseHeader(self, parser_mediator, structure):
"""Parses a log header.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
_, month, day, hours, minutes, seconds, year = structure.date_time
month = timelib.MONTH_DICT.get(month.lower(), 0)
time_elements_tuple = (year, month, day, hours, minutes, seconds)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
self._last_month = month
event_data = XChatLogEventData()
if structure.log_action[0] == 'BEGIN':
self._xchat_year = year
event_data.text = 'XChat start logging'
elif structure.log_action[0] == 'END':
self._xchat_year = None
event_data.text = 'XChat end logging'
else:
logger.debug('Unknown log action: {0:s}.'.format(
' '.join(structure.log_action)))
return
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a log header.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
|
def _disable_prometheus_process_collector(self) -> None:
"""
There is a bug in SDC' Docker implementation and intolerable prometheus_client code, due to which
its process_collector will fail.
See https://github.com/prometheus/client_python/issues/80
"""
logger.info("Removing prometheus process collector")
try:
core.REGISTRY.unregister(PROCESS_COLLECTOR)
except KeyError:
logger.debug("PROCESS_COLLECTOR already removed from prometheus")
|
There is a bug in SDC' Docker implementation and intolerable prometheus_client code, due to which
its process_collector will fail.
See https://github.com/prometheus/client_python/issues/80
|
def check(self, radl):
"""Check the features in this application."""
SIMPLE_FEATURES = {
"name": (str, lambda x, _: bool(x.value)),
"path": (str, lambda x, _: bool(x.value)),
"version": (str, is_version),
"preinstalled": (str, ["YES", "NO"])
}
self.check_simple(SIMPLE_FEATURES, radl)
|
Check the features in this application.
|
def reset(self):
'''
Resets this agent type to prepare it for a new simulation run. This
includes resetting the random number generator and initializing the style
of each agent of this type.
'''
self.resetRNG()
sNow = np.zeros(self.pop_size)
Shk = self.RNG.rand(self.pop_size)
sNow[Shk < self.p_init] = 1
self.sNow = sNow
|
Resets this agent type to prepare it for a new simulation run. This
includes resetting the random number generator and initializing the style
of each agent of this type.
|
def hdf5_col(self, chain=-1):
"""Return a pytables column object.
:Parameters:
chain : integer
The index of the chain.
.. note::
This method is specific to the ``hdf5`` backend.
"""
return self.db._tables[chain].colinstances[self.name]
|
Return a pytables column object.
:Parameters:
chain : integer
The index of the chain.
.. note::
This method is specific to the ``hdf5`` backend.
|
def deepcopy(self, x=None, y=None):
"""
Create a deep copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Deep copy.
"""
x = self.x if x is None else x
y = self.y if y is None else y
return Keypoint(x=x, y=y)
|
Create a deep copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Deep copy.
|
def get_context_data(self, **kwargs):
"""This adds into the context of strain_list_all (which filters for all alive :class:`~mousedb.animal.models.Animal` objects and active cages) and cages which filters for the number of current cages."""
strain = super(StrainDetail, self).get_object()
context = super(StrainDetail, self).get_context_data(**kwargs)
context['breeding_cages'] = Breeding.objects.filter(Strain=strain)
context['animal_list'] = Animal.objects.filter(Strain=strain).order_by('Background','Genotype')
context['cages'] = Animal.objects.filter(Strain=strain).values("Cage").distinct()
context['active'] = False
return context
|
This adds into the context of strain_list_all (which filters for all alive :class:`~mousedb.animal.models.Animal` objects and active cages) and cages which filters for the number of current cages.
|
def _adapt_response(self, response):
"""Convert various error responses to standardized ErrorDetails."""
errors, meta = super(ServerError, self)._adapt_response(response)
return errors[0], meta
|
Convert various error responses to standardized ErrorDetails.
|
def validateOneNamespace(self, doc, elem, prefix, ns, value):
"""Try to validate a single namespace declaration for an
element basically it does the following checks as described
by the XML-1.0 recommendation: - [ VC: Attribute Value Type
] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] -
[ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC:
Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlValidateOneNamespace(self._o, doc__o, elem__o, prefix, ns__o, value)
return ret
|
Try to validate a single namespace declaration for an
element basically it does the following checks as described
by the XML-1.0 recommendation: - [ VC: Attribute Value Type
] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] -
[ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC:
Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately
|
def _get_service_keys(self, service_name):
"""
Return the service keys for the given service.
"""
guid = self.get_instance_guid(service_name)
uri = "/v2/service_instances/%s/service_keys" % (guid)
return self.api.get(uri)
|
Return the service keys for the given service.
|
def tee(process, filter):
"""Read lines from process.stdout and echo them to sys.stdout.
Returns a list of lines read. Lines are not newline terminated.
The 'filter' is a callable which is invoked for every line,
receiving the line as argument. If the filter returns True, the
line is echoed to sys.stdout.
"""
lines = []
while True:
line = process.stdout.readline()
if line:
if sys.version_info[0] >= 3:
line = decode(line)
stripped_line = line.rstrip()
if filter(stripped_line):
sys.stdout.write(line)
lines.append(stripped_line)
elif process.poll() is not None:
process.stdout.close()
break
return lines
|
Read lines from process.stdout and echo them to sys.stdout.
Returns a list of lines read. Lines are not newline terminated.
The 'filter' is a callable which is invoked for every line,
receiving the line as argument. If the filter returns True, the
line is echoed to sys.stdout.
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'score') and self.score is not None:
_dict['score'] = self.score
if hasattr(self, 'sentence') and self.sentence is not None:
_dict['sentence'] = self.sentence
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'arguments') and self.arguments is not None:
_dict['arguments'] = [x._to_dict() for x in self.arguments]
return _dict
|
Return a json dictionary representing this model.
|
def get_url_path(self, language=None):
"""Return the URL's path component. Add the language prefix if
``PAGE_USE_LANGUAGE_PREFIX`` setting is set to ``True``.
:param language: the wanted url language.
"""
if self.is_first_root():
# this is used to allow users to change URL of the root
# page. The language prefix is not usable here.
try:
return reverse('pages-root')
except Exception:
pass
url = self.get_complete_slug(language)
if not language:
language = settings.PAGE_DEFAULT_LANGUAGE
if settings.PAGE_USE_LANGUAGE_PREFIX:
return reverse('pages-details-by-path',
args=[language, url])
else:
return reverse('pages-details-by-path', args=[url])
|
Return the URL's path component. Add the language prefix if
``PAGE_USE_LANGUAGE_PREFIX`` setting is set to ``True``.
:param language: the wanted url language.
|
def prep_directory(self, target_dir):
"""
Prepares a new directory to store the file at the provided path, if needed.
"""
dirname = path.dirname(target_dir)
if dirname:
dirname = path.join(settings.BUILD_DIR, dirname)
if not self.fs.exists(dirname):
logger.debug("Creating directory at {}{}".format(self.fs_name, dirname))
self.fs.makedirs(dirname)
|
Prepares a new directory to store the file at the provided path, if needed.
|
def autolink_role(typ, rawtext, etext, lineno, inliner,
options={}, content=[]):
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
r = env.get_domain('py').role('obj')(
'obj', rawtext, etext, lineno, inliner, options, content)
pnode = r[0][0]
prefixes = get_import_prefixes_from_env(env)
try:
name, obj, parent = import_by_name(pnode['reftarget'], prefixes)
except ImportError:
content = pnode[0]
r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
classes=content['classes'])
return r
|
Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
|
def ncr(n, r):
"""
Calculate n choose r.
:param n: n
:type n : int
:param r: r
:type r :int
:return: n choose r as int
"""
r = min(r, n - r)
numer = reduce(op.mul, range(n, n - r, -1), 1)
denom = reduce(op.mul, range(1, r + 1), 1)
return numer // denom
|
Calculate n choose r.
:param n: n
:type n : int
:param r: r
:type r :int
:return: n choose r as int
|
def is_valid(self):
"""Returns a Client object if this is a valid OAuth request."""
try:
request = self.get_oauth_request()
client = self.get_client(request)
params = self._server.verify_request(request, client, None)
except Exception as e:
raise e
return client
|
Returns a Client object if this is a valid OAuth request.
|
def generate_insufficient_overlap_message(
e,
exposure_geoextent,
exposure_layer,
hazard_geoextent,
hazard_layer,
viewport_geoextent):
"""Generate insufficient overlap message.
:param e: An exception.
:type e: Exception
:param exposure_geoextent: Extent of the exposure layer in the form
[xmin, ymin, xmax, ymax] in EPSG:4326.
:type exposure_geoextent: list
:param exposure_layer: Exposure layer.
:type exposure_layer: QgsMapLayer
:param hazard_geoextent: Extent of the hazard layer in the form
[xmin, ymin, xmax, ymax] in EPSG:4326.
:type hazard_geoextent: list
:param hazard_layer: Hazard layer instance.
:type hazard_layer: QgsMapLayer
:param viewport_geoextent: Viewport extents
as a list [xmin, ymin, xmax, ymax] in EPSG:4326.
:type viewport_geoextent: list
:return: An InaSAFE message object.
:rtype: safe.messaging.Message
"""
description = tr(
'There was insufficient overlap between the input layers and / or the '
'layers and the viewable area. Please select two overlapping layers '
'and zoom or pan to them or disable viewable area clipping in the '
'options dialog. Full details follow:')
message = m.Message(description)
text = m.Paragraph(tr('Failed to obtain the optimal extent given:'))
message.add(text)
analysis_inputs = m.BulletedList()
# We must use Qt string interpolators for tr to work properly
analysis_inputs.add(tr('Hazard: %s') % (hazard_layer.source()))
analysis_inputs.add(tr('Exposure: %s') % (exposure_layer.source()))
analysis_inputs.add(
tr('Viewable area Geo Extent: %s') % (
viewport_geoextent))
analysis_inputs.add(
tr('Hazard Geo Extent: %s') % (
hazard_geoextent))
analysis_inputs.add(
tr('Exposure Geo Extent: %s') % (
exposure_geoextent))
analysis_inputs.add(
tr('Details: %s') % (
e))
message.add(analysis_inputs)
return message
|
Generate insufficient overlap message.
:param e: An exception.
:type e: Exception
:param exposure_geoextent: Extent of the exposure layer in the form
[xmin, ymin, xmax, ymax] in EPSG:4326.
:type exposure_geoextent: list
:param exposure_layer: Exposure layer.
:type exposure_layer: QgsMapLayer
:param hazard_geoextent: Extent of the hazard layer in the form
[xmin, ymin, xmax, ymax] in EPSG:4326.
:type hazard_geoextent: list
:param hazard_layer: Hazard layer instance.
:type hazard_layer: QgsMapLayer
:param viewport_geoextent: Viewport extents
as a list [xmin, ymin, xmax, ymax] in EPSG:4326.
:type viewport_geoextent: list
:return: An InaSAFE message object.
:rtype: safe.messaging.Message
|
def batch(self, num):
"""
Iterator returning results in batches. When making more general queries
that might have larger results, specify a batch result that should be
returned with each iteration.
:param int num: number of results per iteration
:return: iterator holding list of results
"""
self._params.pop('limit', None) # Limit and batch are mutually exclusive
it = iter(self)
while True:
chunk = list(islice(it, num))
if not chunk:
return
yield chunk
|
Iterator returning results in batches. When making more general queries
that might have larger results, specify a batch result that should be
returned with each iteration.
:param int num: number of results per iteration
:return: iterator holding list of results
|
def write_memory(self, addr, data, transfer_size=32):
"""! @brief Write a single memory location.
By default the transfer size is a word.
"""
assert transfer_size in (8, 16, 32)
if transfer_size == 32:
self._link.write_mem32(addr, conversion.u32le_list_to_byte_list([data]), self._apsel)
elif transfer_size == 16:
self._link.write_mem16(addr, conversion.u16le_list_to_byte_list([data]), self._apsel)
elif transfer_size == 8:
self._link.write_mem8(addr, [data], self._apsel)
|
! @brief Write a single memory location.
By default the transfer size is a word.
|
def hardware_flexport_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
flexport = ET.SubElement(hardware, "flexport")
id = ET.SubElement(flexport, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def download(self, path):
"""downloads a config resource to the path"""
service_get_resp = requests.get(self.location, cookies={"session": self.session})
payload = service_get_resp.json()
download_get_resp = requests.get(payload["content"])
with open(path, "wb") as config_file:
config_file.write(download_get_resp.content)
|
downloads a config resource to the path
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.