code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def estimate_completion(self):
"""
Estimate completion time for a task.
:returns: deferred that when fired returns a datetime object for the
estimated, or the actual datetime, or None if we could not
estimate a time for this task method.
"""
if self.completion_ts:
# Task is already complete. Return the exact completion time:
defer.returnValue(self.completed)
# Get the timestamps from the descendent task that's doing the work:
if self.method == 'build' or self.method == 'image':
subtask_completion = yield self.estimate_descendents()
defer.returnValue(subtask_completion)
if self.state == task_states.FREE:
est_completion = yield self._estimate_free()
defer.returnValue(est_completion)
avg_delta = yield self.estimate_duration()
if avg_delta is None:
defer.returnValue(None)
est_completion = self.started + avg_delta
defer.returnValue(est_completion)
|
Estimate completion time for a task.
:returns: deferred that when fired returns a datetime object for the
estimated, or the actual datetime, or None if we could not
estimate a time for this task method.
|
def get(self, volume_id):
"""
Get a volume.
Args:
volume_id (str): Volume name.
Returns:
(:py:class:`Volume`): The volume.
Raises:
:py:class:`docker.errors.NotFound`
If the volume does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_volume(volume_id))
|
Get a volume.
Args:
volume_id (str): Volume name.
Returns:
(:py:class:`Volume`): The volume.
Raises:
:py:class:`docker.errors.NotFound`
If the volume does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
|
def read_obo(cls, path, flatten=True, part_of_cc_only=False):
""" Parse an OBO file and store GO term information.
Parameters
----------
path: str
Path of the OBO file.
flatten: bool, optional
If set to False, do not generate a list of all ancestors and
descendants for each GO term.
part_of_cc_only: bool, optional
Legacy parameter for backwards compatibility. If set to True,
ignore ``part_of`` relations outside the ``cellular_component``
domain.
Notes
-----
The OBO file must end with a line break.
"""
name2id = {}
alt_id = {}
syn2id = {}
terms = []
with open(path) as fh:
n = 0
while True:
try:
nextline = next(fh)
except StopIteration:
break
if nextline == '[Term]\n':
n += 1
id_ = next(fh)[4:-1]
# acc = get_acc(id_)
name = next(fh)[6:-1]
name2id[name] = id_
domain = next(fh)[11:-1]
def_ = None
is_a = set()
part_of = set()
l = next(fh)
while l != '\n':
if l.startswith('alt_id:'):
alt_id[l[8:-1]] = id_
elif l.startswith('def: '):
idx = l[6:].index('"')
def_ = l[6:(idx+6)]
elif l.startswith('is_a:'):
is_a.add(l[6:16])
elif l.startswith('synonym:'):
idx = l[10:].index('"')
if l[(10+idx+2):].startswith("EXACT"):
s = l[10:(10+idx)]
syn2id[s] = id_
elif l.startswith('relationship: part_of'):
if part_of_cc_only:
if domain == 'cellular_component':
part_of.add(l[22:32])
else:
part_of.add(l[22:32])
l = next(fh)
assert def_ is not None
terms.append(GOTerm(id_, name, domain, def_, is_a, part_of))
logger.info('Parsed %d GO term definitions.', n)
ontology = cls(terms, syn2id, alt_id, name2id)
# store children and parts
logger.info('Adding child and part relationships...')
for term in ontology:
for parent in term.is_a:
ontology[parent].children.add(term.id)
for whole in term.part_of:
ontology[whole].parts.add(term.id)
if flatten:
logger.info('Flattening ancestors...')
ontology._flatten_ancestors()
logger.info('Flattening descendants...')
ontology._flatten_descendants()
ontology._flattened = True
return ontology
|
Parse an OBO file and store GO term information.
Parameters
----------
path: str
Path of the OBO file.
flatten: bool, optional
If set to False, do not generate a list of all ancestors and
descendants for each GO term.
part_of_cc_only: bool, optional
Legacy parameter for backwards compatibility. If set to True,
ignore ``part_of`` relations outside the ``cellular_component``
domain.
Notes
-----
The OBO file must end with a line break.
|
def finalize_prov_profile(self, name):
# type: (Optional[Text]) -> List[Identifier]
"""Transfer the provenance related files to the RO."""
# NOTE: Relative posix path
if name is None:
# master workflow, fixed filenames
filename = "primary.cwlprov"
else:
# ASCII-friendly filename, avoiding % as we don't want %2520 in manifest.json
wf_name = urllib.parse.quote(str(name), safe="").replace("%", "_")
# Note that the above could cause overlaps for similarly named
# workflows, but that's OK as we'll also include run uuid
# which also covers thhe case of this step being run in
# multiple places or iterations
filename = "%s.%s.cwlprov" % (wf_name, self.workflow_run_uuid)
basename = posixpath.join(_posix_path(PROVENANCE), filename)
# TODO: Also support other profiles than CWLProv, e.g. ProvOne
# list of prov identifiers of provenance files
prov_ids = []
# https://www.w3.org/TR/prov-xml/
with self.research_object.write_bag_file(basename + ".xml") as provenance_file:
self.document.serialize(provenance_file, format="xml", indent=4)
prov_ids.append(self.provenance_ns[filename + ".xml"])
# https://www.w3.org/TR/prov-n/
with self.research_object.write_bag_file(basename + ".provn") as provenance_file:
self.document.serialize(provenance_file, format="provn", indent=2)
prov_ids.append(self.provenance_ns[filename + ".provn"])
# https://www.w3.org/Submission/prov-json/
with self.research_object.write_bag_file(basename + ".json") as provenance_file:
self.document.serialize(provenance_file, format="json", indent=2)
prov_ids.append(self.provenance_ns[filename + ".json"])
# "rdf" aka https://www.w3.org/TR/prov-o/
# which can be serialized to ttl/nt/jsonld (and more!)
# https://www.w3.org/TR/turtle/
with self.research_object.write_bag_file(basename + ".ttl") as provenance_file:
self.document.serialize(provenance_file, format="rdf", rdf_format="turtle")
prov_ids.append(self.provenance_ns[filename + ".ttl"])
# https://www.w3.org/TR/n-triples/
with self.research_object.write_bag_file(basename + ".nt") as provenance_file:
self.document.serialize(provenance_file, format="rdf", rdf_format="ntriples")
prov_ids.append(self.provenance_ns[filename + ".nt"])
# https://www.w3.org/TR/json-ld/
# TODO: Use a nice JSON-LD context
# see also https://eprints.soton.ac.uk/395985/
# 404 Not Found on https://provenance.ecs.soton.ac.uk/prov.jsonld :(
with self.research_object.write_bag_file(basename + ".jsonld") as provenance_file:
self.document.serialize(provenance_file, format="rdf", rdf_format="json-ld")
prov_ids.append(self.provenance_ns[filename + ".jsonld"])
_logger.debug(u"[provenance] added provenance: %s", prov_ids)
return prov_ids
|
Transfer the provenance related files to the RO.
|
def send(self):
"""
Entrypoint to send data to Zabbix
If debug is enabled, items are sent one by one
If debug isn't enable, we send items in bulk
Returns a list of results (1 if no debug, as many as items in other case)
"""
if self.logger: # pragma: no cover
self.logger.info("Starting to send %d items" % len(self._items_list))
try:
# Zabbix trapper send a maximum of 250 items in bulk
# We have to respect that, in case of enforcement on zabbix server side
# Special case if debug is enabled: we need to send items one by one
max_value = ZBX_TRAPPER_MAX_VALUE
if self.debug_level >= 4:
max_value = 1
if self.logger: # pragma: no cover
self.logger.debug("Bulk limit is %d items" % max_value)
else:
if self.logger: # pragma: no cover
self.logger.info("Bulk limit is %d items" % max_value)
# Initialize offsets & counters
max_offset = len(self._items_list)
run = 0
start_offset = 0
stop_offset = min(start_offset + max_value, max_offset)
server_success = server_failure = processed = failed = total = time = 0
while start_offset < stop_offset:
run += 1
if self.logger: # pragma: no cover
self.logger.debug(
'run %d: start_offset is %d, stop_offset is %d' %
(run, start_offset, stop_offset)
)
# Extract items to be send from global item's list'
_items_to_send = self.items_list[start_offset:stop_offset]
# Send extracted items
run_response, run_processed, run_failed, run_total, run_time = self._send_common(_items_to_send)
# Update counters
if run_response == 'success':
server_success += 1
elif run_response == 'failed':
server_failure += 1
processed += run_processed
failed += run_failed
total += run_total
time += run_time
if self.logger: # pragma: no cover
self.logger.info("%d items sent during run %d" % (run_total, run))
self.logger.debug(
'run %d: processed is %d, failed is %d, total is %d' %
(run, run_processed, run_failed, run_total)
)
# Compute next run's offsets
start_offset = stop_offset
stop_offset = min(start_offset + max_value, max_offset)
# Reset socket, which is likely to be closed by server
self._socket_reset()
except:
self._reset()
self._socket_reset()
raise
if self.logger: # pragma: no cover
self.logger.info('All %d items have been sent in %d runs' % (total, run))
self.logger.debug(
'Total run is %d; item processed: %d, failed: %d, total: %d, during %f seconds' %
(run, processed, failed, total, time)
)
# Everything has been sent.
# Reset DataContainer & return results_list
self._reset()
return server_success, server_failure, processed, failed, total, time
|
Entrypoint to send data to Zabbix
If debug is enabled, items are sent one by one
If debug isn't enable, we send items in bulk
Returns a list of results (1 if no debug, as many as items in other case)
|
def receive_data(self, data):
# type: (bytes) -> None
"""
Pass some received data to the connection for handling.
A list of events that the remote peer triggered by sending this data can
be retrieved with :meth:`~wsproto.connection.Connection.events`.
:param data: The data received from the remote peer on the network.
:type data: ``bytes``
"""
if data is None:
# "If _The WebSocket Connection is Closed_ and no Close control
# frame was received by the endpoint (such as could occur if the
# underlying transport connection is lost), _The WebSocket
# Connection Close Code_ is considered to be 1006."
self._events.append(CloseConnection(code=CloseReason.ABNORMAL_CLOSURE))
self._state = ConnectionState.CLOSED
return
if self.state in (ConnectionState.OPEN, ConnectionState.LOCAL_CLOSING):
self._proto.receive_bytes(data)
elif self.state is ConnectionState.CLOSED:
raise LocalProtocolError("Connection already closed.")
|
Pass some received data to the connection for handling.
A list of events that the remote peer triggered by sending this data can
be retrieved with :meth:`~wsproto.connection.Connection.events`.
:param data: The data received from the remote peer on the network.
:type data: ``bytes``
|
def main():
"""Start main part of the wait script."""
logger.info('Checking for available topics: %r', repr(REQUIRED_TOPICS))
client = connect_kafka(hosts=KAFKA_HOSTS)
check_topics(client, REQUIRED_TOPICS)
|
Start main part of the wait script.
|
def flip_alleles(genotypes):
"""Flip the alleles of an Genotypes instance."""
warnings.warn("deprecated: use 'Genotypes.flip_coded'", DeprecationWarning)
genotypes.reference, genotypes.coded = (genotypes.coded,
genotypes.reference)
genotypes.genotypes = 2 - genotypes.genotypes
return genotypes
|
Flip the alleles of an Genotypes instance.
|
def bin_spikes(spike_times, binsz):
"""Sort spike times into bins
:param spike_times: times of spike instances
:type spike_times: list
:param binsz: length of time bin to use
:type binsz: float
:returns: list of bin indicies, one for each element in spike_times
"""
bins = np.empty((len(spike_times),), dtype=int)
for i, stime in enumerate(spike_times):
# around to fix rounding errors
bins[i] = np.floor(np.around(stime/binsz, 5))
return bins
|
Sort spike times into bins
:param spike_times: times of spike instances
:type spike_times: list
:param binsz: length of time bin to use
:type binsz: float
:returns: list of bin indicies, one for each element in spike_times
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'heading') and self.heading is not None:
_dict['heading'] = self.heading._to_dict()
return _dict
|
Return a json dictionary representing this model.
|
def with_units(self, val, ua, ub):
"""Return value with unit.
args:
val (mixed): result
ua (str): 1st unit
ub (str): 2nd unit
raises:
SyntaxError
returns:
str
"""
if not val:
return str(val)
if ua or ub:
if ua and ub:
if ua == ub:
return str(val) + ua
else:
# Nodejs version does not seem to mind mismatched
# units within expressions. So we choose the first
# as they do
# raise SyntaxError("Error in expression %s != %s" % (ua, ub))
return str(val) + ua
elif ua:
return str(val) + ua
elif ub:
return str(val) + ub
return repr(val)
|
Return value with unit.
args:
val (mixed): result
ua (str): 1st unit
ub (str): 2nd unit
raises:
SyntaxError
returns:
str
|
def get_all_for_project(self, name, **kwargs):
"""
Gets the Build Records produced from the BuildConfiguration by name.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_for_project(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: BuildConfiguration name (required)
:param int page_index: Page index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL query
:return: BuildRecordPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_for_project_with_http_info(name, **kwargs)
else:
(data) = self.get_all_for_project_with_http_info(name, **kwargs)
return data
|
Gets the Build Records produced from the BuildConfiguration by name.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_for_project(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: BuildConfiguration name (required)
:param int page_index: Page index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL query
:return: BuildRecordPage
If the method is called asynchronously,
returns the request thread.
|
def build_net(self, is_training):
"""Build the whole neural network for the QA model."""
cfg = self.cfg
with tf.device('/cpu:0'):
word_embed = tf.get_variable(
name='word_embed', initializer=self.embed, dtype=tf.float32, trainable=False)
char_embed = tf.get_variable(name='char_embed',
shape=[cfg.char_vcb_size,
cfg.char_embed_dim],
dtype=tf.float32)
# [query_length, batch_size]
self.query_word = tf.placeholder(dtype=tf.int32,
shape=[None, None],
name='query_word')
self.query_mask = tf.placeholder(dtype=tf.float32,
shape=[None, None],
name='query_mask')
# [batch_size]
self.query_lengths = tf.placeholder(
dtype=tf.int32, shape=[None], name='query_lengths')
# [passage_length, batch_size]
self.passage_word = tf.placeholder(
dtype=tf.int32, shape=[None, None], name='passage_word')
self.passage_mask = tf.placeholder(
dtype=tf.float32, shape=[None, None], name='passage_mask')
# [batch_size]
self.passage_lengths = tf.placeholder(
dtype=tf.int32, shape=[None], name='passage_lengths')
if is_training:
self.answer_begin = tf.placeholder(
dtype=tf.int32, shape=[None], name='answer_begin')
self.answer_end = tf.placeholder(
dtype=tf.int32, shape=[None], name='answer_end')
self.query_char_ids = tf.placeholder(dtype=tf.int32,
shape=[
self.cfg.max_char_length, None, None],
name='query_char_ids')
# sequence_length, batch_size
self.query_char_lengths = tf.placeholder(
dtype=tf.int32, shape=[None, None], name='query_char_lengths')
self.passage_char_ids = tf.placeholder(dtype=tf.int32,
shape=[
self.cfg.max_char_length, None, None],
name='passage_char_ids')
# sequence_length, batch_size
self.passage_char_lengths = tf.placeholder(dtype=tf.int32,
shape=[None, None],
name='passage_char_lengths')
query_char_states = self.build_char_states(char_embed=char_embed,
is_training=is_training,
reuse=False,
char_ids=self.query_char_ids,
char_lengths=self.query_char_lengths)
passage_char_states = self.build_char_states(char_embed=char_embed,
is_training=is_training,
reuse=True,
char_ids=self.passage_char_ids,
char_lengths=self.passage_char_lengths)
with tf.variable_scope("encoding") as scope:
query_states = tf.concat([tf.nn.embedding_lookup(
word_embed, self.query_word), query_char_states], axis=2)
scope.reuse_variables()
passage_states = tf.concat([tf.nn.embedding_lookup(
word_embed, self.passage_word), passage_char_states], axis=2)
passage_states = tf.transpose(passage_states, perm=[1, 0, 2])
query_states = tf.transpose(query_states, perm=[1, 0, 2])
self.passage_states = passage_states
self.query_states = query_states
output, output2 = graph_to_network(passage_states, query_states,
self.passage_lengths, self.query_lengths,
self.graph, self.cfg.dropout,
is_training, num_heads=cfg.num_heads,
rnn_units=cfg.rnn_units)
passage_att_mask = self.passage_mask
batch_size_x = tf.shape(self.query_lengths)
answer_h = tf.zeros(
tf.concat([batch_size_x, tf.constant([cfg.ptr_dim], dtype=tf.int32)], axis=0))
answer_context = tf.reduce_mean(output2, axis=1)
query_init_w = tf.get_variable(
'query_init_w', shape=[output2.get_shape().as_list()[-1], cfg.ptr_dim])
self.query_init = query_init_w
answer_context = tf.matmul(answer_context, query_init_w)
output = tf.transpose(output, perm=[1, 0, 2])
with tf.variable_scope('answer_ptr_layer'):
ptr_att = DotAttention('ptr',
hidden_dim=cfg.ptr_dim,
is_vanilla=self.cfg.att_is_vanilla,
is_identity_transform=self.cfg.att_is_id,
need_padding=self.cfg.att_need_padding)
answer_pre_compute = ptr_att.get_pre_compute(output)
ptr_gru = XGRUCell(hidden_dim=cfg.ptr_dim)
begin_prob, begin_logits = ptr_att.get_prob(output, answer_context, passage_att_mask,
answer_pre_compute, True)
att_state = ptr_att.get_att(output, begin_prob)
(_, answer_h) = ptr_gru.call(inputs=att_state, state=answer_h)
answer_context = answer_h
end_prob, end_logits = ptr_att.get_prob(output, answer_context,
passage_att_mask, answer_pre_compute,
True)
self.begin_prob = tf.transpose(begin_prob, perm=[1, 0])
self.end_prob = tf.transpose(end_prob, perm=[1, 0])
begin_logits = tf.transpose(begin_logits, perm=[1, 0])
end_logits = tf.transpose(end_logits, perm=[1, 0])
if is_training:
def label_smoothing(inputs, masks, epsilon=0.1):
"""Modify target for label smoothing."""
epsilon = cfg.labelsmoothing
num_of_channel = tf.shape(inputs)[-1] # number of channels
inputs = tf.cast(inputs, tf.float32)
return (((1 - epsilon) * inputs) + (epsilon /
tf.cast(num_of_channel, tf.float32))) * masks
cost1 = tf.reduce_mean(
tf.losses.softmax_cross_entropy(label_smoothing(
tf.one_hot(self.answer_begin,
depth=tf.shape(self.passage_word)[0]),
tf.transpose(self.passage_mask, perm=[1, 0])), begin_logits))
cost2 = tf.reduce_mean(
tf.losses.softmax_cross_entropy(
label_smoothing(tf.one_hot(self.answer_end,
depth=tf.shape(self.passage_word)[0]),
tf.transpose(self.passage_mask, perm=[1, 0])), end_logits))
reg_ws = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
l2_loss = tf.reduce_sum(reg_ws)
loss = cost1 + cost2 + l2_loss
self.loss = loss
optimizer = tf.train.AdamOptimizer(learning_rate=cfg.learning_rate)
self.train_op = optimizer.minimize(self.loss)
return tf.stack([self.begin_prob, self.end_prob])
|
Build the whole neural network for the QA model.
|
def split(self, t):
"""returns two segments, whose union is this segment and which join at
self.point(t)."""
pt = self.point(t)
return Line(self.start, pt), Line(pt, self.end)
|
returns two segments, whose union is this segment and which join at
self.point(t).
|
def embed(args):
"""
%prog embed evidencefile scaffolds.fasta contigs.fasta
Use SSPACE evidencefile to scaffold contigs into existing scaffold
structure, as in `scaffolds.fasta`. Contigs.fasta were used by SSPACE
directly to scaffold.
Rules:
1. Only update existing structure by embedding contigs small enough to fit.
2. Promote singleton contigs only if they are big (>= min_length).
"""
p = OptionParser(embed.__doc__)
p.set_mingap(default=10)
p.add_option("--min_length", default=200, type="int",
help="Minimum length to consider [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
evidencefile, scaffolds, contigs = args
min_length = opts.min_length
splitfasta, oagp, cagp = gaps([scaffolds, "--split",
"--mingap={0}".format(opts.mingap)])
agp = AGP(cagp)
p = agp.graph
ef = EvidenceFile(evidencefile, contigs)
sizes = ef.sz
q = ef.graph
logging.debug("Reference graph: {0}".format(p))
logging.debug("Patch graph: {0}".format(q))
newagp = deepcopy(agp)
seen = set()
deleted = set()
for a in agp:
if a.is_gap:
continue
name = a.component_id
object = a.object
if name in deleted:
print("* Skip {0}, already embedded".format(name), file=sys.stderr)
continue
seen.add(name)
target_name, tag = get_target(p, name)
path = q.get_path(name, target_name, tag=tag)
path_size = sum([sizes[x.v] for x, t in path]) if path else None
status = NO_UPDATE
# Heuristic, the patch must not be too long
if path and path_size > min_length and len(path) > 3:
path = None
if not path:
print(name, target_name, path, path_size, status, file=sys.stderr)
continue
backward = False
for x, t in path:
if x.v in seen:
print("* Does not allow backward" \
" patch on {0}".format(x.v), file=sys.stderr)
backward = True
break
if backward:
continue
# Build the path plus the ends
vv = q.get_node(name)
path.appendleft((vv, tag))
if tag == ">":
path.reverse()
status = INSERT_BEFORE
elif target_name is None:
status = INSERT_AFTER
else:
target = q.get_node(target_name)
path.append((target, tag))
status = INSERT_BETWEEN
print(name, target_name, path, path_size, status, file=sys.stderr)
# Trim the ends off from the constructed AGPLines
lines = path_to_agp(q, path, object, sizes, status)
if status == INSERT_BEFORE:
lines = lines[:-1]
td = newagp.insert_lines(name, lines, \
delete=True, verbose=True)
elif status == INSERT_AFTER:
lines = lines[1:]
td = newagp.insert_lines(name, lines, after=True, \
delete=True, verbose=True)
else:
lines = lines[1:-1]
td = newagp.update_between(name, target_name, lines, \
delete=True, verbose=True)
deleted |= td
seen |= td
# Recruite big singleton contigs
CUTOFF = opts.min_length
for ctg, size in sizes.items():
if ctg in seen:
continue
if size < CUTOFF:
continue
newagp.append(AGPLine.cline(ctg, ctg, sizes, "?"))
# Write a new AGP file
newagpfile = "embedded.agp"
newagp.print_to_file(newagpfile, index=True)
tidy([newagpfile, contigs])
|
%prog embed evidencefile scaffolds.fasta contigs.fasta
Use SSPACE evidencefile to scaffold contigs into existing scaffold
structure, as in `scaffolds.fasta`. Contigs.fasta were used by SSPACE
directly to scaffold.
Rules:
1. Only update existing structure by embedding contigs small enough to fit.
2. Promote singleton contigs only if they are big (>= min_length).
|
def go_from(self, vertex):
"""
Tell the edge to go out from this vertex.
Args:
vertex (Vertex): vertex to go from.
"""
if self.vertex_out:
self.vertex_out.edges_out.remove(self)
self.vertex_out = vertex
vertex.edges_out.add(self)
|
Tell the edge to go out from this vertex.
Args:
vertex (Vertex): vertex to go from.
|
def format_item(self, item, defaults=None, stencil=None):
""" Format an item.
"""
from pyrobase.osutil import shell_escape
try:
item_text = fmt.to_console(formatting.format_item(self.options.output_format, item, defaults))
except (NameError, ValueError, TypeError), exc:
self.fatal("Trouble with formatting item %r\n\n FORMAT = %r\n\n REASON =" % (item, self.options.output_format), exc)
raise # in --debug mode
if self.options.shell:
item_text = '\t'.join(shell_escape(i) for i in item_text.split('\t'))
# Justify headers according to stencil
if stencil:
item_text = '\t'.join(i.ljust(len(s)) for i, s in zip(item_text.split('\t'), stencil))
return item_text
|
Format an item.
|
def _handle_sdp_target_state_updated(sdp_state: SDPState):
"""Respond to an SDP target state change event.
This function sets the current state of SDP to the target state if that is
possible.
TODO(BMo) This cant be done as a blocking function as it is here!
"""
LOG.info('Handling SDP target state updated event...')
LOG.info('SDP target state: %s', sdp_state.target_state)
# Map between the SDP target state and the service target state?
if sdp_state.target_state == 'off':
_update_services_target_state('off')
# TODO: Work out if the state of SDP has reached the target state.
# If yes, update the current state.
sdp_state.update_current_state(sdp_state.target_state)
|
Respond to an SDP target state change event.
This function sets the current state of SDP to the target state if that is
possible.
TODO(BMo) This cant be done as a blocking function as it is here!
|
def write(self, learn:Learner, trn_batch:Tuple, val_batch:Tuple, iteration:int, tbwriter:SummaryWriter)->None:
"Writes training and validation batch images to Tensorboard."
self._write_for_dstype(learn=learn, batch=val_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Valid)
self._write_for_dstype(learn=learn, batch=trn_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Train)
|
Writes training and validation batch images to Tensorboard.
|
def libvlc_video_set_marquee_string(p_mi, option, psz_text):
'''Set a marquee string option.
@param p_mi: libvlc media player.
@param option: marq option to set See libvlc_video_marquee_string_option_t.
@param psz_text: marq option value.
'''
f = _Cfunctions.get('libvlc_video_set_marquee_string', None) or \
_Cfunction('libvlc_video_set_marquee_string', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_char_p)
return f(p_mi, option, psz_text)
|
Set a marquee string option.
@param p_mi: libvlc media player.
@param option: marq option to set See libvlc_video_marquee_string_option_t.
@param psz_text: marq option value.
|
def place_on_gpu(data):
"""Utility to place data on GPU, where data could be a torch.Tensor, a tuple
or list of Tensors, or a tuple or list of tuple or lists of Tensors"""
data_type = type(data)
if data_type in (list, tuple):
data = [place_on_gpu(data[i]) for i in range(len(data))]
data = data_type(data)
return data
elif isinstance(data, torch.Tensor):
return data.cuda()
else:
return ValueError(f"Data type {type(data)} not recognized.")
|
Utility to place data on GPU, where data could be a torch.Tensor, a tuple
or list of Tensors, or a tuple or list of tuple or lists of Tensors
|
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
self.log.info("Sending termination message to manager.")
self._child_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
|
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
|
def get_contributors(self, anon=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/contributors <http://developer.github.com/v3/repos>`_
:param anon: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
url_parameters = dict()
if anon is not github.GithubObject.NotSet:
url_parameters["anon"] = anon
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/contributors",
url_parameters
)
|
:calls: `GET /repos/:owner/:repo/contributors <http://developer.github.com/v3/repos>`_
:param anon: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
|
def main():
"""function to """
# parse arg to find file(s)
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file",
help="convert the markdown file to HTML")
parser.add_argument("-d", "--directory",
help="convert the markdown files in the directory to HTML")
parser.add_argument("-o", "--output",
help="chose the output filename")
parser.add_argument("--no_browser", nargs='?', const=True,
help="if stated, will prevent browser from opening")
args = parser.parse_args()
print(args)
superMarkdown = SuperMarkdown()
if args.output: # get the new output url
superMarkdown.export_url = args.output
if args.no_browser:
superMarkdown.open_browser = False
if args.directory: # get all files from directory
superMarkdown.add_toc()
files = [file for file in os.listdir(args.directory) if not os.path.isdir(file)]
superMarkdown.add_content(*files)
elif args.file: # get the file from directory
superMarkdown.add_content(args.file)
else: # get the default markdown file `ressources/test.markdown`
superMarkdown.add_content('SuperMarkdown/ressources/test.md')
superMarkdown.export()
|
function to
|
def uri_to_iri(value):
"""
Converts an ASCII URI byte string into a unicode IRI
:param value:
An ASCII-encoded byte string of the URI
:return:
A unicode string of the IRI
"""
if not isinstance(value, byte_cls):
raise TypeError(unwrap(
'''
value must be a byte string, not %s
''',
type_name(value)
))
parsed = urlsplit(value)
scheme = parsed.scheme
if scheme is not None:
scheme = scheme.decode('ascii')
username = _urlunquote(parsed.username, remap=[':', '@'])
password = _urlunquote(parsed.password, remap=[':', '@'])
hostname = parsed.hostname
if hostname:
hostname = hostname.decode('idna')
port = parsed.port
if port and not isinstance(port, int_types):
port = port.decode('ascii')
netloc = ''
if username is not None:
netloc += username
if password:
netloc += ':' + password
netloc += '@'
if hostname is not None:
netloc += hostname
if port is not None:
netloc += ':' + str_cls(port)
path = _urlunquote(parsed.path, remap=['/'], preserve=True)
query = _urlunquote(parsed.query, remap=['&', '='], preserve=True)
fragment = _urlunquote(parsed.fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
|
Converts an ASCII URI byte string into a unicode IRI
:param value:
An ASCII-encoded byte string of the URI
:return:
A unicode string of the IRI
|
def _from_binary_ea(cls, binary_stream):
"""See base class."""
_ea_list = []
offset = 0
#_MOD_LOGGER.debug(f"Creating Ea object from binary stream {binary_stream.tobytes()}...")
_MOD_LOGGER.debug("Creating Ea object from binary '%s'...", binary_stream.tobytes())
while True:
entry = EaEntry.create_from_binary(binary_stream[offset:])
offset += entry.offset_next_ea
_ea_list.append(entry)
if offset >= len(binary_stream):
break
nw_obj = cls(_ea_list)
_MOD_LOGGER.debug("Attempted to unpack EA from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
|
See base class.
|
def _authn_context_decl_ref(decl_ref, authn_auth=None):
"""
Construct the authn context with a authn context declaration reference
:param decl_ref: The authn context declaration reference
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
return factory(saml.AuthnContext,
authn_context_decl_ref=decl_ref,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
|
Construct the authn context with a authn context declaration reference
:param decl_ref: The authn context declaration reference
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
|
def _logins(users, user_attrs=None):
'''
FIXME: DOCS...
'''
# FIXME: check for support attrs
# Supported attrs:
# login # DEFAULT, no auth required
# email
# bio
# company
# created_at
# hireable
# location
# updated_at
# url
# 'login' will be the dict index key; remove it from user_attr columns
if 'login' in user_attrs:
if user_attrs.index('login') >= 0:
# get this out of the remaining attrs to parse
del user_attrs[user_attrs.index('login')]
_users = {}
for u in users:
l = u.login
logr.debug('LOGIN: {}'.format(l))
_users[l] = {}
for a in user_attrs:
logr.debug('user: {}'.format(u))
logr.debug('attr: {}'.format(a))
_users[l][a] = getattr(u, a)
return _users
|
FIXME: DOCS...
|
def _updateTransitionMatrix(self):
"""
Updates the hidden-state transition matrix and the initial distribution
"""
# TRANSITION MATRIX
C = self.model.count_matrix() + self.prior_C # posterior count matrix
# check if we work with these options
if self.reversible and not _tmatrix_disconnected.is_connected(C, strong=True):
raise NotImplementedError('Encountered disconnected count matrix with sampling option reversible:\n '
+ str(C) + '\nUse prior to ensure connectivity or use reversible=False.')
# ensure consistent sparsity pattern (P0 might have additional zeros because of underflows)
# TODO: these steps work around a bug in msmtools. Should be fixed there
P0 = msmest.transition_matrix(C, reversible=self.reversible, maxiter=10000, warn_not_converged=False)
zeros = np.where(P0 + P0.T == 0)
C[zeros] = 0
# run sampler
Tij = msmest.sample_tmatrix(C, nsample=1, nsteps=self.transition_matrix_sampling_steps,
reversible=self.reversible)
# INITIAL DISTRIBUTION
if self.stationary: # p0 is consistent with P
p0 = _tmatrix_disconnected.stationary_distribution(Tij, C=C)
else:
n0 = self.model.count_init().astype(float)
first_timestep_counts_with_prior = n0 + self.prior_n0
positive = first_timestep_counts_with_prior > 0
p0 = np.zeros_like(n0)
p0[positive] = np.random.dirichlet(first_timestep_counts_with_prior[positive]) # sample p0 from posterior
# update HMM with new sample
self.model.update(p0, Tij)
|
Updates the hidden-state transition matrix and the initial distribution
|
def buildPaginationHeader(resultCount, resultsPerPage, pageArg, url):
'''Build link header for result pagination'''
lastPage = resultCount / resultsPerPage
if pageArg:
page = int(pageArg)
next_url = re.sub("page=[0-9]+", "page={}".format(page + 1), url)
prev_url = re.sub("page=[0-9]+", "page={}".format(page - 1), url)
first_url = re.sub("page=[0-9]+", "page=1", url)
last_url = re.sub("page=[0-9]+", "page={}".format(lastPage), url)
else:
page = 1
next_url = url + "?page=2"
prev_url = ""
first_url = url + "?page=1"
last_url = url + "?page={}".format(lastPage)
if page == 1:
headerLink = "<{}>; rel=next, <{}>; rel=last".format(next_url, last_url)
elif page == lastPage:
headerLink = "<{}>; rel=prev, <{}>; rel=first".format(prev_url, first_url)
else:
headerLink = "<{}>; rel=next, <{}>; rel=prev, <{}>; rel=first, <{}>; rel=last".format(next_url, prev_url, first_url, last_url)
return headerLink
|
Build link header for result pagination
|
def _do_shell(self, line):
"""Send a command to the Unix shell.\n==> Usage: shell ls ~"""
if not line:
return
sp = Popen(line,
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=not WINDOWS)
(fo, fe) = (sp.stdout, sp.stderr)
if PY2:
out = fo.read().strip(EOL)
err = fe.read().strip(EOL)
else:
out = fo.read().decode("utf-8")
err = fe.read().decode("utf-8")
if out:
print(out)
return
if err:
print(err.replace('isbn_', ''))
|
Send a command to the Unix shell.\n==> Usage: shell ls ~
|
def create_space(self, space_name, add_users=True):
"""
Create a new space with the given name in the current target
organization.
"""
body = {
'name': space_name,
'organization_guid': self.api.config.get_organization_guid()
}
# MAINT: may need to do this more generally later
if add_users:
space_users = []
org_users = self.org.get_users()
for org_user in org_users['resources']:
guid = org_user['metadata']['guid']
space_users.append(guid)
body['manager_guids'] = space_users
body['developer_guids'] = space_users
return self.api.post('/v2/spaces', body)
|
Create a new space with the given name in the current target
organization.
|
def eval_string(self, s):
"""
Returns the tristate value of the expression 's', represented as 0, 1,
and 2 for n, m, and y, respectively. Raises KconfigError if syntax
errors are detected in 's'. Warns if undefined symbols are referenced.
As an example, if FOO and BAR are tristate symbols at least one of
which has the value y, then config.eval_string("y && (FOO || BAR)")
returns 2 (y).
To get the string value of non-bool/tristate symbols, use
Symbol.str_value. eval_string() always returns a tristate value, and
all non-bool/tristate symbols have the tristate value 0 (n).
The expression parsing is consistent with how parsing works for
conditional ('if ...') expressions in the configuration, and matches
the C implementation. m is rewritten to 'm && MODULES', so
eval_string("m") will return 0 (n) unless modules are enabled.
"""
# The parser is optimized to be fast when parsing Kconfig files (where
# an expression can never appear at the beginning of a line). We have
# to monkey-patch things a bit here to reuse it.
self._filename = None
# Don't include the "if " from below to avoid giving confusing error
# messages
self._line = s
self._tokens = self._tokenize("if " + s)
self._tokens_i = 1 # Skip the 'if' token
return expr_value(self._expect_expr_and_eol())
|
Returns the tristate value of the expression 's', represented as 0, 1,
and 2 for n, m, and y, respectively. Raises KconfigError if syntax
errors are detected in 's'. Warns if undefined symbols are referenced.
As an example, if FOO and BAR are tristate symbols at least one of
which has the value y, then config.eval_string("y && (FOO || BAR)")
returns 2 (y).
To get the string value of non-bool/tristate symbols, use
Symbol.str_value. eval_string() always returns a tristate value, and
all non-bool/tristate symbols have the tristate value 0 (n).
The expression parsing is consistent with how parsing works for
conditional ('if ...') expressions in the configuration, and matches
the C implementation. m is rewritten to 'm && MODULES', so
eval_string("m") will return 0 (n) unless modules are enabled.
|
def animate(self,*args,**kwargs): #pragma: no cover
"""
NAME:
animate
PURPOSE:
animate an Orbit
INPUT:
d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...); can be list with up to three entries for three subplots
d2= second dimension to plot; can be list with up to three entries for three subplots
width= (600) width of output div in px
height= (400) height of output div in px
json_filename= (None) if set, save the data necessary for the figure in this filename (e.g., json_filename= 'orbit_data/orbit.json'); this path is also used in the output HTML, so needs to be accessible
load_jslibs= (True) if True, load the require and jQuery Javascript libraries (necessary in Jupyterlab, not necessary but harmless in notebooks; if embedding on a webpage one typically wants to load these libraries in the header)
ro= (Object-wide default) physical scale for distances to use to convert
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
+kwargs for ra,dec,ll,bb, etc. functions
OUTPUT:
IPython.display.HTML object with code to animate the orbit; can be directly shown in jupyter notebook or embedded in HTML pages; get a text version of the HTML using the _repr_html_() function
HISTORY:
2017-09-17-24 - Written - Bovy (UofT)
2017-11-28 - Allow arbitrary functions of time to be plotted - Bovy (UofT)
"""
try:
from IPython.display import HTML
except ImportError:
raise ImportError("Orbit.animate requires ipython/jupyter to be installed")
if (kwargs.get('use_physical',False) \
and kwargs.get('ro',self._roSet)) or \
(not 'use_physical' in kwargs \
and kwargs.get('ro',self._roSet)):
labeldict= {'t':'t (Gyr)',
'R':'R (kpc)',
'vR':'v_R (km/s)',
'vT':'v_T (km/s)',
'z':'z (kpc)',
'vz':'v_z (km/s)',
'phi':'azimuthal angle',
'r':'r (kpc)',
'x':'x (kpc)',
'y':'y (kpc)',
'vx':'v_x (km/s)',
'vy':'v_y (km/s)',
'E':'E (km^2/s^2)',
'Ez':'E_z (km^2/s^2)',
'ER':'E_R (km^2/s^2)',
'Enorm':'E(t)/E(0.)',
'Eznorm':'E_z(t)/E_z(0.)',
'ERnorm':'E_R(t)/E_R(0.)',
'Jacobi':'E-Omega_p L (km^2/s^2)',
'Jacobinorm':'(E-Omega_p L)(t)/(E-Omega_p L)(0)'}
else:
labeldict= {'t':'t','R':'R','vR':'v_R','vT':'v_T',
'z':'z','vz':'v_z','phi':r'azimuthal angle',
'r':'r',
'x':'x','y':'y','vx':'v_x','vy':'v_y',
'E':'E','Enorm':'E(t)/E(0.)',
'Ez':'E_z','Eznorm':'E_z(t)/E_z(0.)',
'ER':r'E_R','ERnorm':r'E_R(t)/E_R(0.)',
'Jacobi':r'E-Omega_p L',
'Jacobinorm':r'(E-Omega_p L)(t)/(E-Omega_p L)(0)'}
labeldict.update({'ra':'RA (deg)',
'dec':'Dec (deg)',
'll':'Galactic lon (deg)',
'bb':'Galactic lat (deg)',
'dist':'distance (kpc)',
'pmra':'pmRA (mas/yr)',
'pmdec':'pmDec (mas/yr)',
'pmll':'pmGlon (mas/yr)',
'pmbb':'pmGlat (mas/yr)',
'vlos':'line-of-sight vel (km/s)',
'helioX':'X (kpc)',
'helioY':'Y (kpc)',
'helioZ':'Z (kpc)',
'U':'U (km/s)',
'V':'V (km/s)',
'W':'W (km/s)'})
# Cannot be using Quantity output
kwargs['quantity']= False
#Defaults
if not 'd1' in kwargs and not 'd2' in kwargs:
if len(self.vxvv) == 3:
d1= 'R'
d2= 'vR'
elif len(self.vxvv) == 4:
d1= 'x'
d2= 'y'
elif len(self.vxvv) == 2:
d1= 'x'
d2= 'vx'
elif len(self.vxvv) == 5 or len(self.vxvv) == 6:
d1= 'R'
d2= 'z'
elif not 'd1' in kwargs:
d2= kwargs.pop('d2')
d1= 't'
elif not 'd2' in kwargs:
d1= kwargs.pop('d1')
d2= 't'
else:
d1= kwargs.pop('d1')
d2= kwargs.pop('d2')
xs= []
ys= []
xlabels= []
ylabels= []
if isinstance(d1,str) or callable(d1):
d1s= [d1]
d2s= [d2]
else:
d1s= d1
d2s= d2
if len(d1s) > 3:
raise ValueError('Orbit.animate only works for up to three subplots')
all_xlabel= kwargs.get('xlabel',[None for d in d1])
all_ylabel= kwargs.get('ylabel',[None for d in d2])
for d1,d2, xlabel, ylabel in zip(d1s,d2s,all_xlabel,all_ylabel):
#Get x and y for each subplot
x= self._parse_plot_quantity(d1,**kwargs)
y= self._parse_plot_quantity(d2,**kwargs)
xs.append(x)
ys.append(y)
if xlabel is None:
xlabels.append(labeldict.get(d1,'\mathrm{No\ xlabel\ specified}'))
else:
xlabels.append(xlabel)
if ylabel is None:
ylabels.append(labeldict.get(d2,'\mathrm{No\ ylabel\ specified}'))
else:
ylabels.append(ylabel)
kwargs.pop('ro',None)
kwargs.pop('vo',None)
kwargs.pop('obs',None)
kwargs.pop('use_physical',None)
kwargs.pop('pot',None)
kwargs.pop('OmegaP',None)
kwargs.pop('quantity',None)
width= kwargs.pop('width',600)
height= kwargs.pop('height',400)
load_jslibs= kwargs.pop('load_jslibs',True)
if load_jslibs:
load_jslibs_code= """</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.5/require.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script>
"""
else:
load_jslibs_code= ""
# Dump data to HTML
nplots= len(xs)
jsonDict= {}
jsonDict['x']= xs[0].tolist()
jsonDict['y']= ys[0].tolist()
for ii in range(1,nplots):
jsonDict['x%i' % (ii+1)]= xs[ii].tolist()
jsonDict['y%i' % (ii+1)]= ys[ii].tolist()
json_filename= kwargs.pop('json_filename',None)
if json_filename is None:
jd= json.dumps(jsonDict)
json_code= """ let data= JSON.parse('{jd}');""".format(jd=jd)
close_json_code= ""
else:
with open(json_filename,'w') as jfile:
json.dump(jsonDict,jfile)
json_code= """Plotly.d3.json('{jfilename}',function(data){{""".format(jfilename=json_filename)
close_json_code= "});"
self.divid= 'galpy-'\
+''.join(choice(ascii_lowercase) for i in range(24))
button_width= 419.51+4.*10.
button_margin_left= int(nu.round((width-button_width)/2.))
if button_margin_left < 0: button_margin_left= 0
# Layout for multiple plots
if len(d1s) == 1:
xmin= [0,0,0]
xmax= [1,1,1]
elif len(d1s) == 2:
xmin= [0,0.55,0]
xmax= [0.45,1,1]
elif len(d1s) == 3:
xmin= [0,0.365,0.73]
xmax= [0.27,0.635,1]
layout= """{{
xaxis: {{
title: '{xlabel}',
domain: [{xmin},{xmax}],
}},
yaxis: {{title: '{ylabel}'}},
margin: {{t: 20}},
hovermode: 'closest',
showlegend: false,
""".format(xlabel=xlabels[0],ylabel=ylabels[0],xmin=xmin[0],xmax=xmax[0])
for ii in range(1,nplots):
layout+= """ xaxis{idx}: {{
title: '{xlabel}',
anchor: 'y{idx}',
domain: [{xmin},{xmax}],
}},
yaxis{idx}: {{
title: '{ylabel}',
anchor: 'x{idx}',
}},
""".format(idx=ii+1,xlabel=xlabels[ii],ylabel=ylabels[ii],
xmin=xmin[ii],xmax=xmax[ii])
layout+="""}"""
# Additional traces for additional plots
if len(d1s) > 1:
setup_trace2= """
let trace3= {{
x: data.x2.slice(0,numPerFrame),
y: data.y2.slice(0,numPerFrame),
xaxis: 'x2',
yaxis: 'y2',
mode: 'lines',
line: {{
shape: 'spline',
width: 0.8,
color: '#1f77b4',
}},
}};
let trace4= {{
x: data.x2.slice(0,numPerFrame),
y: data.y2.slice(0,numPerFrame),
xaxis: 'x2',
yaxis: 'y2',
mode: 'lines',
line: {{
shape: 'spline',
width: 3.,
color: '#d62728',
}},
}};
""".format(divid=self.divid) # not used!
delete_trace4= """Plotly.deleteTraces('{divid}',3);""".format(divid=self.divid)
delete_trace3= """Plotly.deleteTraces('{divid}',0);""".format(divid=self.divid)
update_trace34= """
trace_slice_begin+= trace_slice_len;
Plotly.extendTraces('{divid}', {{
x: [data.x2.slice(trace_slice_begin,trace_slice_end)],
y: [data.y2.slice(trace_slice_begin,trace_slice_end)],
}}, [2]);
trace_slice_begin-= trace_slice_len;
trace4= {{
x: [data.x2.slice(trace_slice_begin,trace_slice_end)],
y: [data.y2.slice(trace_slice_begin,trace_slice_end)],
}},
Plotly.restyle('{divid}',trace4,[3]);
""".format(divid=self.divid)
else:
setup_trace2= """
let traces= [trace1,trace2];
"""
delete_trace4= ""
delete_trace3= ""
update_trace34= ""
if len(d1s) > 2:
setup_trace3= """
let trace5= {{
x: data.x3.slice(0,numPerFrame),
y: data.y3.slice(0,numPerFrame),
xaxis: 'x3',
yaxis: 'y3',
mode: 'lines',
line: {{
shape: 'spline',
width: 0.8,
color: '#1f77b4',
}},
}};
let trace6= {{
x: data.x3.slice(0,numPerFrame),
y: data.y3.slice(0,numPerFrame),
xaxis: 'x3',
yaxis: 'y3',
mode: 'lines',
line: {{
shape: 'spline',
width: 3.,
color: '#d62728',
}},
}};
let traces= [trace1,trace2,trace3,trace4,trace5,trace6];
""".format(divid=self.divid)
delete_trace6= """Plotly.deleteTraces('{divid}',5);""".format(divid=self.divid)
delete_trace5= """Plotly.deleteTraces('{divid}',0);""".format(divid=self.divid)
update_trace56= """
trace_slice_begin+= trace_slice_len;
Plotly.extendTraces('{divid}', {{
x: [data.x3.slice(trace_slice_begin,trace_slice_end)],
y: [data.y3.slice(trace_slice_begin,trace_slice_end)],
}}, [4]);
trace_slice_begin-= trace_slice_len;
trace6= {{
x: [data.x3.slice(trace_slice_begin,trace_slice_end)],
y: [data.y3.slice(trace_slice_begin,trace_slice_end)],
}},
Plotly.restyle('{divid}',trace6,[5]);
""".format(divid=self.divid)
elif len(d1s) > 1:
setup_trace3= """
let traces= [trace1,trace2,trace3,trace4];
"""
delete_trace5= ""
delete_trace6= ""
update_trace56= ""
else:
setup_trace3= ""
delete_trace5= ""
delete_trace6= ""
update_trace56= ""
return HTML("""
<style>
.galpybutton {{
background-color:#ffffff;
-moz-border-radius:16px;
-webkit-border-radius:16px;
border-radius:16px;
border:1px solid #1f77b4;
display:inline-block;
cursor:pointer;
color:#1f77b4;
font-family:Courier;
font-size:17px;
padding:8px 10px;
text-decoration:none;
text-shadow:0px 1px 0px #2f6627;
}}
.galpybutton:hover {{
background-color:#ffffff;
}}
.galpybutton:active {{
position:relative;
top:1px;
}}
.galpybutton:focus{{
outline:0;
}}
</style>
<div id='{divid}' style='width:{width}px;height:{height}px;'></div>
<div class="controlbutton" id="{divid}-play" style="margin-left:{button_margin_left}px;display: inline-block;">
<button class="galpybutton">Play</button></div>
<div class="controlbutton" id="{divid}-pause" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Pause</button></div>
<div class="controlbutton" id="{divid}-timestwo" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Speed<font face="Arial"> </font>x<font face="Arial"> </font>2</button></div>
<div class="controlbutton" id="{divid}-timeshalf" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Speed<font face="Arial"> </font>/<font face="Arial"> </font>2</button></div>
<div class="controlbutton" id="{divid}-replay" style="margin-left:10px;display: inline-block;">
<button class="galpybutton">Replay</button></div>
<script>
require.config({{
paths: {{
Plotly: 'https://cdn.plot.ly/plotly-latest.min',
}}
}});
{load_jslibs_code}
require(['Plotly'], function (Plotly) {{
{json_code}
let layout = {layout};
let numPerFrame= 5;
let cnt= 1;
let interval;
let trace_slice_len;
let trace_slice_begin;
let trace_slice_end;
setup_trace();
$('.controlbutton button').click(function() {{
let button_type= this.parentNode.id;
if ( button_type === '{divid}-play' ) {{
clearInterval(interval);
interval= animate_trace();
}}
else if ( button_type === '{divid}-pause' )
clearInterval(interval);
else if ( button_type === '{divid}-timestwo' ) {{
cnt/= 2;
numPerFrame*= 2;
}}
else if ( button_type === '{divid}-timeshalf' ) {{
cnt*= 2;
numPerFrame/= 2;
}}
else if ( button_type === '{divid}-replay' ) {{
cnt= 1;
try {{ // doesn't exist if animation has already ended
{delete_trace6}
{delete_trace4}
Plotly.deleteTraces('{divid}',1);
}}
catch (err) {{
}}
Plotly.deleteTraces('{divid}',0);
{delete_trace3}
{delete_trace5}
clearInterval(interval);
setup_trace();
interval= animate_trace();
}}
}});
function setup_trace() {{
let trace1= {{
x: data.x.slice(0,numPerFrame),
y: data.y.slice(0,numPerFrame),
mode: 'lines',
line: {{
shape: 'spline',
width: 0.8,
color: '#1f77b4',
}},
}};
let trace2= {{
x: data.x.slice(0,numPerFrame),
y: data.y.slice(0,numPerFrame),
mode: 'lines',
line: {{
shape: 'spline',
width: 3.,
color: '#d62728',
}},
}};
{setup_trace2}
{setup_trace3}
Plotly.plot('{divid}',traces,layout);
}}
function animate_trace() {{
return setInterval(function() {{
// Make sure narrow and thick trace end in the same
// and the highlighted length has constant length
trace_slice_len= Math.floor(numPerFrame);
if ( trace_slice_len < 1) trace_slice_len= 1;
trace_slice_begin= Math.floor(cnt*numPerFrame);
trace_slice_end= Math.floor(Math.min(cnt*numPerFrame+trace_slice_len,data.x.length-1));
Plotly.extendTraces('{divid}', {{
x: [data.x.slice(trace_slice_begin,trace_slice_end)],
y: [data.y.slice(trace_slice_begin,trace_slice_end)],
}}, [0]);
trace_slice_begin-= trace_slice_len;
trace2= {{
x: [data.x.slice(trace_slice_begin,trace_slice_end)],
y: [data.y.slice(trace_slice_begin,trace_slice_end)],
}};
Plotly.restyle('{divid}',trace2,[1]);
{update_trace34}
{update_trace56}
cnt+= 1;
if(cnt*numPerFrame+trace_slice_len > data.x.length/1) {{
clearInterval(interval);
{delete_trace6}
{delete_trace4}
Plotly.deleteTraces('{divid}',1);
}}
}}, 30);
}}
{close_json_code}}});
</script>""".format(json_code=json_code,close_json_code=close_json_code,
divid=self.divid,width=width,height=height,
button_margin_left=button_margin_left,
layout=layout,load_jslibs_code=load_jslibs_code,
setup_trace2=setup_trace2,setup_trace3=setup_trace3,
delete_trace4=delete_trace4,delete_trace6=delete_trace6,
delete_trace3=delete_trace3,delete_trace5=delete_trace5,
update_trace34=update_trace34,
update_trace56=update_trace56))
|
NAME:
animate
PURPOSE:
animate an Orbit
INPUT:
d1= first dimension to plot ('x', 'y', 'R', 'vR', 'vT', 'z', 'vz', ...); can be list with up to three entries for three subplots
d2= second dimension to plot; can be list with up to three entries for three subplots
width= (600) width of output div in px
height= (400) height of output div in px
json_filename= (None) if set, save the data necessary for the figure in this filename (e.g., json_filename= 'orbit_data/orbit.json'); this path is also used in the output HTML, so needs to be accessible
load_jslibs= (True) if True, load the require and jQuery Javascript libraries (necessary in Jupyterlab, not necessary but harmless in notebooks; if embedding on a webpage one typically wants to load these libraries in the header)
ro= (Object-wide default) physical scale for distances to use to convert
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
+kwargs for ra,dec,ll,bb, etc. functions
OUTPUT:
IPython.display.HTML object with code to animate the orbit; can be directly shown in jupyter notebook or embedded in HTML pages; get a text version of the HTML using the _repr_html_() function
HISTORY:
2017-09-17-24 - Written - Bovy (UofT)
2017-11-28 - Allow arbitrary functions of time to be plotted - Bovy (UofT)
|
def add_user(self, workspace, params={}, **options):
"""The user can be referenced by their globally unique user ID or their email address.
Returns the full user record for the invited user.
Parameters
----------
workspace : {Id} The workspace or organization to invite the user to.
[data] : {Object} Data for the request
- user : {String} An identifier for the user. Can be one of an email address,
the globally unique identifier for the user, or the keyword `me`
to indicate the current user making the request.
"""
path = "/workspaces/%s/addUser" % (workspace)
return self.client.post(path, params, **options)
|
The user can be referenced by their globally unique user ID or their email address.
Returns the full user record for the invited user.
Parameters
----------
workspace : {Id} The workspace or organization to invite the user to.
[data] : {Object} Data for the request
- user : {String} An identifier for the user. Can be one of an email address,
the globally unique identifier for the user, or the keyword `me`
to indicate the current user making the request.
|
def ensure_dir_exists(f, fullpath=False):
"""
Ensure the existence of the (parent) directory of f
"""
if fullpath is False:
# Get parent directory
d = os.path.dirname(f)
else:
# Create the full path
d = f
if not os.path.exists(d):
os.makedirs(d)
|
Ensure the existence of the (parent) directory of f
|
def preprocess(input_file,
output_file,
defines=None,
options=None,
content_types_db=None,
_preprocessed_files=None,
_depth=0):
"""
Preprocesses the specified file.
:param input_filename:
The input path.
:param output_filename:
The output file (NOT path).
:param defines:
a dictionary of defined variables that will be
understood in preprocessor statements. Keys must be strings and,
currently, only the truth value of any key's value matters.
:param options:
A ``Namespace`` of command-line options.
:param content_types_db:
is an instance of ``ContentTypesDatabase``.
:param _preprocessed_files:
(for internal use only) is used to ensure files
are not recursively preprocessed.
:param _depth:
When the call reaches _depth == 0, the output file is actually
written. For all internal recursive calls _depth == 1.
:return:
Modified dictionary of defines or raises ``PreprocessorError`` if
an error occurred.
"""
# Options that can later be turned into function parameters.
include_paths = options.include_paths
should_keep_lines = options.should_keep_lines
should_substitute = options.should_substitute
default_content_type = options.default_content_type
input_filename = input_file.name
defines = defines or {}
# Ensure preprocessing isn't cyclic(?).
_preprocessed_files = _preprocessed_files or []
input_file_absolute_path = absolute_path(input_filename)
if input_file_absolute_path in _preprocessed_files:
raise PreprocessorError("detected recursive #include of '%s'"\
% input_filename)
_preprocessed_files.append(input_file_absolute_path)
# Determine the content type and comment info for the input file.
comment_groups = content_types_db.get_comment_group_for_path(input_filename, default_content_type)
statement_regexps = get_statement_regexps(comment_groups)
# Process the input file.
# (Would be helpful if I knew anything about lexing and parsing
# simple grammars.)
input_lines = input_file.readlines()
if _depth == 0:
# Only at recursion depth 0 is the temporary buffer created.
temp_output_buffer = StringIO()
else:
# At deeper levels, the temporary buffer is the output file.
temp_output_buffer = output_file
defines['__FILE__'] = input_filename
SKIP, EMIT = range(2) # states
states = [(EMIT, # a state is (<emit-or-skip-lines-in-this-section>,
0, # <have-emitted-in-this-if-block>,
0)] # <have-seen-'else'-in-this-if-block>)
line_number = 0
for line in input_lines:
line_number += 1
logger.debug("line %d: %r", line_number, line)
defines['__LINE__'] = line_number
# Is this line a preprocessor stmt line?
#XXX Could probably speed this up by optimizing common case of
# line NOT being a preprocessor stmt line.
for statement_regexp in statement_regexps:
match = statement_regexp.match(line)
if match:
break
else:
match = None
if match:
op = match.group("op")
logger.debug("%r stmt (states: %r)", op, states)
if op == "define":
if not (states and states[-1][0] == SKIP):
var, val = match.group("var", "val")
if val is None:
val = None
else:
try:
val = eval(val, {}, {})
except:
pass
defines[var] = val
elif op == "undef":
if not (states and states[-1][0] == SKIP):
var = match.group("var")
try:
del defines[var]
except KeyError:
pass
elif op == "include":
if not (states and states[-1][0] == SKIP):
if "var" in match.groupdict():
# This is the second include form: #include VAR
var = match.group("var")
f = defines[var]
else:
# This is the first include form: #include "path"
f = match.group("fname")
for d in [os.path.dirname(input_filename)] + include_paths:
fname = os.path.normpath(os.path.join(d, f))
if os.path.exists(fname):
break
else:
raise PreprocessorError(
"could not find #include'd file "\
"\"%s\" on include path: %r"\
% (f, include_paths))
with open(fname, 'rb') as f:
defines = preprocess(f,
temp_output_buffer,
defines=defines,
options=options,
content_types_db=content_types_db,
_preprocessed_files=_preprocessed_files,
_depth=1)
elif op in ("if", "ifdef", "ifndef"):
if op == "if":
expr = match.group("expr")
elif op == "ifdef":
expr = "defined('%s')" % match.group("expr")
elif op == "ifndef":
expr = "not defined('%s')" % match.group("expr")
try:
if states and states[-1][0] == SKIP:
# Were are nested in a SKIP-portion of an if-block.
states.append((SKIP, 0, 0))
elif _evaluate(expr, defines):
states.append((EMIT, 1, 0))
else:
states.append((SKIP, 0, 0))
except KeyError:
raise PreprocessorError("use of undefined variable in "\
"#%s stmt" % op, defines['__FILE__']
,
defines['__LINE__'], line)
elif op == "elif":
expr = match.group("expr")
try:
if states[-1][2]: # already had #else in this if-block
raise PreprocessorError("illegal #elif after #else in "\
"same #if block",
defines['__FILE__'],
defines['__LINE__'], line)
elif states[-1][1]: # if have emitted in this if-block
states[-1] = (SKIP, 1, 0)
elif states[:-1] and states[-2][0] == SKIP:
# Were are nested in a SKIP-portion of an if-block.
states[-1] = (SKIP, 0, 0)
elif _evaluate(expr, defines):
states[-1] = (EMIT, 1, 0)
else:
states[-1] = (SKIP, 0, 0)
except IndexError:
raise PreprocessorError("#elif stmt without leading #if "\
"stmt", defines['__FILE__'],
defines['__LINE__'], line)
elif op == "else":
try:
if states[-1][2]: # already had #else in this if-block
raise PreprocessorError("illegal #else after #else in "\
"same #if block",
defines['__FILE__'],
defines['__LINE__'], line)
elif states[-1][1]: # if have emitted in this if-block
states[-1] = (SKIP, 1, 1)
elif states[:-1] and states[-2][0] == SKIP:
# Were are nested in a SKIP-portion of an if-block.
states[-1] = (SKIP, 0, 1)
else:
states[-1] = (EMIT, 1, 1)
except IndexError:
raise PreprocessorError("#else stmt without leading #if "\
"stmt", defines['__FILE__'],
defines['__LINE__'], line)
elif op == "endif":
try:
states.pop()
except IndexError:
raise PreprocessorError("#endif stmt without leading #if"\
"stmt", defines['__FILE__'],
defines['__LINE__'], line)
elif op == "error":
if not (states and states[-1][0] == SKIP):
error = match.group("error")
raise PreprocessorError("#error: " + error,
defines['__FILE__'],
defines['__LINE__'], line)
logger.debug("states: %r", states)
if should_keep_lines:
temp_output_buffer.write("\n")
else:
try:
if states[-1][0] == EMIT:
logger.debug("emit line (%s)" % states[-1][1])
# Substitute all defines into line.
# XXX Should avoid recursive substitutions. But that
# would be a pain right now.
sline = line
if should_substitute:
for name in reversed(sorted(defines, key=len)):
value = defines[name]
sline = sline.replace(name, str(value))
temp_output_buffer.write(sline)
elif should_keep_lines:
logger.debug("keep blank line (%s)" % states[-1][1])
temp_output_buffer.write("\n")
else:
logger.debug("skip line (%s)" % states[-1][1])
except IndexError:
raise PreprocessorError("superfluous #endif before this line",
defines['__FILE__'],
defines['__LINE__'])
if len(states) > 1:
raise PreprocessorError("unterminated #if block", defines['__FILE__'],
defines['__LINE__'])
elif len(states) < 1:
raise PreprocessorError("superfluous #endif on or before this line",
defines['__FILE__'], defines['__LINE__'])
#if temp_output_buffer != output_file:
# temp_output_buffer.close()
if _depth == 0:
output_file.write(temp_output_buffer.getvalue())
temp_output_buffer.close()
return defines
|
Preprocesses the specified file.
:param input_filename:
The input path.
:param output_filename:
The output file (NOT path).
:param defines:
a dictionary of defined variables that will be
understood in preprocessor statements. Keys must be strings and,
currently, only the truth value of any key's value matters.
:param options:
A ``Namespace`` of command-line options.
:param content_types_db:
is an instance of ``ContentTypesDatabase``.
:param _preprocessed_files:
(for internal use only) is used to ensure files
are not recursively preprocessed.
:param _depth:
When the call reaches _depth == 0, the output file is actually
written. For all internal recursive calls _depth == 1.
:return:
Modified dictionary of defines or raises ``PreprocessorError`` if
an error occurred.
|
def set_leaf_dist(self, attr_value, dist):
"""
Sets the probability distribution at a leaf node.
"""
assert self.attr_name
assert self.tree.data.is_valid(self.attr_name, attr_value), \
"Value %s is invalid for attribute %s." \
% (attr_value, self.attr_name)
if self.is_continuous_class:
assert isinstance(dist, CDist)
assert self.attr_name
self._attr_value_cdist[self.attr_name][attr_value] = dist.copy()
# self.n += dist.count
else:
assert isinstance(dist, DDist)
# {attr_name:{attr_value:count}}
self._attr_value_counts[self.attr_name][attr_value] += 1
# {attr_name:total}
self._attr_value_count_totals[self.attr_name] += 1
# {attr_name:{attr_value:{class_value:count}}}
for cls_value, cls_count in iteritems(dist.counts):
self._attr_class_value_counts[self.attr_name][attr_value] \
[cls_value] += cls_count
|
Sets the probability distribution at a leaf node.
|
def init_db(self):
"""
Init database and prepare tables
"""
# database file
db_path = self.get_data_file("data.sqlite")
# comect and create cursor
self.db = sqlite3.connect(db_path)
self.cursor = self.db.cursor()
# prep tables
self.db_exec('''
CREATE TABLE IF NOT EXISTS shortcuts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
path TEXT NOT NULL,
command TEXT NOT NULL
)
''')
|
Init database and prepare tables
|
def list_build_set_records(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all build set records for a BuildConfigurationSet
"""
content = list_build_set_records_raw(id, name, page_size, page_index, sort, q)
if content:
return utils.format_json_list(content)
|
List all build set records for a BuildConfigurationSet
|
def template_instances(cls, dataset, capacity=0):
"""
Uses the Instances as template to create an empty dataset.
:param dataset: the original dataset
:type dataset: Instances
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the empty dataset
:rtype: Instances
"""
return Instances(
javabridge.make_instance(
"weka/core/Instances", "(Lweka/core/Instances;I)V", dataset.jobject, capacity))
|
Uses the Instances as template to create an empty dataset.
:param dataset: the original dataset
:type dataset: Instances
:param capacity: how many data rows to reserve initially (see compactify)
:type capacity: int
:return: the empty dataset
:rtype: Instances
|
def entry_point():
""" An entry point for setuptools. This is required because
`if __name__ == '__main__'` is not fired when the entry point
is 'main()'. This just wraps the old behavior in a function so
it can be called from setuptools.
"""
try:
mainret = main()
except (EOFError, KeyboardInterrupt):
print_err('\nUser cancelled.\n')
mainret = 2
except BrokenPipeError:
print_err('\nBroken pipe, input/output was interrupted.\n')
mainret = 3
except InvalidArg as exarg:
handle_err(exarg.as_colr())
mainret = 4
except ValueError as exnum:
handle_err(exnum)
mainret = 4
sys.exit(mainret)
|
An entry point for setuptools. This is required because
`if __name__ == '__main__'` is not fired when the entry point
is 'main()'. This just wraps the old behavior in a function so
it can be called from setuptools.
|
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
|
Compute an ns-package subpath for a filesystem or zipfile importer
|
def set_min(self, fmin):
"""
Updates minimum value
"""
if round(100000*fmin) != 100000*fmin:
raise DriverError('utils.widgets.Expose.set_min: ' +
'fmin must be a multiple of 0.00001')
self.fmin = fmin
self.set(self.fmin)
|
Updates minimum value
|
def _from_dict(cls, _dict):
"""Initialize a ListConfigurationsResponse object from a json dictionary."""
args = {}
if 'configurations' in _dict:
args['configurations'] = [
Configuration._from_dict(x)
for x in (_dict.get('configurations'))
]
return cls(**args)
|
Initialize a ListConfigurationsResponse object from a json dictionary.
|
def __protocolize(base_url):
"""Internal add-protocol-to-url helper"""
if not base_url.startswith("http://") and not base_url.startswith("https://"):
base_url = "https://" + base_url
# Some API endpoints can't handle extra /'s in path requests
base_url = base_url.rstrip("/")
return base_url
|
Internal add-protocol-to-url helper
|
def U(self):
"""
Property to support lazy evaluation of residuals
"""
if self._U is None:
sinv = N.diag(1/self.singular_values)
self._U = dot(self.arr,self.V.T,sinv)
return self._U
|
Property to support lazy evaluation of residuals
|
def create_user(name,
username,
email,
password,
token_manager=None,
app_url=defaults.APP_URL):
"""
create a new user with the specified name, username email and password
"""
headers = token_manager.get_access_token_headers()
auth_url = environment.get_auth_url(app_url=app_url)
url = "%s/api/v1/accounts" % auth_url
payload = {
'name': name,
'username': username,
'email': email,
'password': password
}
response = requests.post(url,
data=json.dumps(payload),
headers=headers)
if response.status_code == 201:
return response.json()
else:
raise JutException('Error %s: %s' % (response.status_code, response.text))
|
create a new user with the specified name, username email and password
|
def get_repository_search_session(self):
"""Gets the repository search session.
return: (osid.repository.RepositorySearchSession) - a
RepositorySearchSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_repository_search() is false
compliance: optional - This method must be implemented if
supports_repository_search() is true.
"""
if not self.supports_repository_search():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
try:
session = sessions.RepositorySearchSession(proxy=self._proxy,
runtime=self._runtime)
except AttributeError:
raise # OperationFailed()
return session
|
Gets the repository search session.
return: (osid.repository.RepositorySearchSession) - a
RepositorySearchSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_repository_search() is false
compliance: optional - This method must be implemented if
supports_repository_search() is true.
|
def getResults(uri):
'''
Method that recovers the text for each result in infobel.com
:param uri: Infobel uri
:return: A list of textual information to be processed
'''
# Using i3visio browser to avoid certain issues...
i3Browser = browser.Browser()
data = i3Browser.recoverURL(uri)
# Strings to be searched
regExp = "<!-- Results -->(.*)<!-- /Results -->"
# re.DOTALL is needed to match any character INCLUDING \n
results = re.findall(regExp, data, re.DOTALL)
return results
|
Method that recovers the text for each result in infobel.com
:param uri: Infobel uri
:return: A list of textual information to be processed
|
def search_converted_models(root=None):
"""
Searches for all converted models generated by
unit tests in folders tests and with function
*dump_data_and_model*.
"""
if root is None:
root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "tests"))
root = os.path.normpath(root)
if not os.path.exists(root):
raise FileNotFoundError("Unable to find '{0}'.".format(root))
founds = glob.iglob("{0}/**/*.model.onnx".format(root), recursive=True)
keep = []
for found in founds:
onnx = found
basename = onnx[:-len(".model.onnx")]
data = basename + ".data.pkl"
expected = basename + ".expected.pkl"
res = dict(onnx=onnx, data=data, expected=expected)
ok = True
for k, v in res.items():
if not os.path.exists(v):
ok = False
if ok:
models = [basename + ".model.pkl", basename + ".model.keras"]
for model in models:
if os.path.exists(model):
res['model'] = model
break
if 'model' in res:
keep.append((basename, res))
keep.sort()
return [_[1] for _ in keep]
|
Searches for all converted models generated by
unit tests in folders tests and with function
*dump_data_and_model*.
|
def compute_tls13_handshake_secrets(self):
"""
Ciphers key and IV are updated accordingly for Handshake data.
self.handshake_messages should be ClientHello...ServerHello.
"""
if self.tls13_early_secret is None:
warning("No early secret. This is abnormal.")
hkdf = self.prcs.hkdf
self.tls13_handshake_secret = hkdf.extract(self.tls13_early_secret,
self.tls13_dhe_secret)
chts = hkdf.derive_secret(self.tls13_handshake_secret,
b"client handshake traffic secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["client_handshake_traffic_secret"] = chts
shts = hkdf.derive_secret(self.tls13_handshake_secret,
b"server handshake traffic secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["server_handshake_traffic_secret"] = shts
if self.connection_end == "server":
self.prcs.tls13_derive_keys(chts)
self.pwcs.tls13_derive_keys(shts)
elif self.connection_end == "client":
self.pwcs.tls13_derive_keys(chts)
self.prcs.tls13_derive_keys(shts)
|
Ciphers key and IV are updated accordingly for Handshake data.
self.handshake_messages should be ClientHello...ServerHello.
|
def get_output(self):
"""
:yield: stdout_line, stderr_line, running
Generator that outputs lines captured from stdout and stderr
These can be consumed to output on a widget in an IDE
"""
if self.process.poll() is not None:
self.close()
yield None, None
while not (self.stdout_queue.empty() and self.stderr_queue.empty()):
if not self.stdout_queue.empty():
line = self.stdout_queue.get().decode('utf-8')
yield line, None
if not self.stderr_queue.empty():
line = self.stderr_queue.get().decode('utf-8')
yield None, line
|
:yield: stdout_line, stderr_line, running
Generator that outputs lines captured from stdout and stderr
These can be consumed to output on a widget in an IDE
|
def post_replicate(request):
"""MNReplication.replicate(session, sysmeta, sourceNode) → boolean."""
d1_gmn.app.views.assert_db.post_has_mime_parts(
request, (('field', 'sourceNode'), ('file', 'sysmeta'))
)
sysmeta_pyxb = d1_gmn.app.sysmeta.deserialize(request.FILES['sysmeta'])
d1_gmn.app.local_replica.assert_request_complies_with_replication_policy(
sysmeta_pyxb
)
pid = d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
d1_gmn.app.views.assert_db.is_valid_pid_for_create(pid)
d1_gmn.app.local_replica.add_to_replication_queue(
request.POST['sourceNode'], sysmeta_pyxb
)
return d1_gmn.app.views.util.http_response_with_boolean_true_type()
|
MNReplication.replicate(session, sysmeta, sourceNode) → boolean.
|
def collapse_whitespace(message):
"""Collapses consecutive whitespace into a single space"""
return u' '.join(map(lambda s: s.strip(),
filter(None, message.strip().splitlines())))
|
Collapses consecutive whitespace into a single space
|
def createInput(self):
"""create a random input vector"""
print "-" * 70 + "Creating a random input vector" + "-" * 70
#clear the inputArray to zero before creating a new input vector
self.inputArray[0:] = 0
for i in range(self.inputSize):
#randrange returns 0 or 1
self.inputArray[i] = random.randrange(2)
|
create a random input vector
|
def is_local(self, hadoop_conf=None, hadoop_home=None):
"""\
Is Hadoop configured to run in local mode?
By default, it is. [pseudo-]distributed mode must be
explicitly configured.
"""
conf = self.hadoop_params(hadoop_conf, hadoop_home)
keys = ('mapreduce.framework.name',
'mapreduce.jobtracker.address',
'mapred.job.tracker')
for k in keys:
if conf.get(k, 'local').lower() != 'local':
return False
return True
|
\
Is Hadoop configured to run in local mode?
By default, it is. [pseudo-]distributed mode must be
explicitly configured.
|
def get_status(self, mxit_id, scope='profile/public'):
"""
Retrieve the Mxit user's current status
No user authentication required
"""
status = _get(
token=self.oauth.get_app_token(scope),
uri='/user/public/statusmessage/' + urllib.quote(mxit_id)
)
if status.startswith('"') and status.endswith('"'):
status = status[1:-1]
return status
|
Retrieve the Mxit user's current status
No user authentication required
|
def _create_binary_trigger(trigger):
"""Create an 8-bit binary trigger from an InputTrigger, TrueTrigger, FalseTrigger."""
ops = {
0: ">",
1: "<",
2: ">=",
3: "<=",
4: "==",
5: 'always'
}
op_codes = {y: x for x, y in ops.items()}
source = 0
if isinstance(trigger, TrueTrigger):
op_code = op_codes['always']
elif isinstance(trigger, FalseTrigger):
raise ArgumentError("Cannot express a never trigger in binary descriptor", trigger=trigger)
else:
op_code = op_codes[trigger.comp_string]
if trigger.use_count:
source = 1
return (op_code << 1) | source
|
Create an 8-bit binary trigger from an InputTrigger, TrueTrigger, FalseTrigger.
|
def retention_policy_get(database,
name,
user=None,
password=None,
host=None,
port=None):
'''
Get an existing retention policy.
database
The database to operate on.
name
Name of the policy to modify.
CLI Example:
.. code-block:: bash
salt '*' influxdb08.retention_policy_get metrics default
'''
client = _client(user=user, password=password, host=host, port=port)
for policy in client.get_list_retention_policies(database):
if policy['name'] == name:
return policy
return None
|
Get an existing retention policy.
database
The database to operate on.
name
Name of the policy to modify.
CLI Example:
.. code-block:: bash
salt '*' influxdb08.retention_policy_get metrics default
|
def get_surveys(self):
"""Gets all surveys in account
Args:
None
Returns:
list: a list of all surveys
"""
payload = {
'Request': 'getSurveys',
'Format': 'JSON'
}
r = self._session.get(QUALTRICS_URL, params=payload)
output = r.json()
return output['Result']['Surveys']
|
Gets all surveys in account
Args:
None
Returns:
list: a list of all surveys
|
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
orig_shape = gp.shape
gp = gp.flatten()
#orig_shape = gp.shape
gp = gp.flatten()
Ysim = np.array([np.random.normal(self.gp_link.transf(gpj), scale=np.sqrt(self.variance), size=1) for gpj in gp])
return Ysim.reshape(orig_shape)
|
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
|
def add_context(self, err_context, succ_context=None):
""" Prepend msg to add some context information
:param pmsg: context info
:return: None
"""
self.err_context = err_context
self.succ_context = succ_context
|
Prepend msg to add some context information
:param pmsg: context info
:return: None
|
def process(self, data=None):
"""Fetch incoming data from the Flask request object when no data is supplied
to the process method. By default, the RequestHandler expects the
incoming data to be sent as JSON.
"""
return super(RequestHandler, self).process(data=data or self.get_request_data())
|
Fetch incoming data from the Flask request object when no data is supplied
to the process method. By default, the RequestHandler expects the
incoming data to be sent as JSON.
|
def Romeo_2002(Re, eD):
r'''Calculates Darcy friction factor using the method in Romeo (2002)
[2]_ as shown in [1]_.
.. math::
\frac{1}{\sqrt{f_d}} = -2\log\left\{\frac{\epsilon}{3.7065D}\times
\frac{5.0272}{Re}\times\log\left[\frac{\epsilon}{3.827D} -
\frac{4.567}{Re}\times\log\left(\frac{\epsilon}{7.7918D}^{0.9924} +
\left(\frac{5.3326}{208.815+Re}\right)^{0.9345}\right)\right]\right\}
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
Range is 3E3 <= Re <= 1.5E8; 0 <= eD <= 5E-2
Examples
--------
>>> Romeo_2002(1E5, 1E-4)
0.018530291219676177
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Romeo, Eva, Carlos Royo, and Antonio Monzon."Improved Explicit
Equations for Estimation of the Friction Factor in Rough and Smooth
Pipes." Chemical Engineering Journal 86, no. 3 (April 28, 2002): 369-74.
doi:10.1016/S1385-8947(01)00254-6.
'''
fd = (-2*log10(eD/3.7065-5.0272/Re*log10(eD/3.827-4.567/Re*log10((eD/7.7918)**0.9924+(5.3326/(208.815+Re))**0.9345))))**-2
return fd
|
r'''Calculates Darcy friction factor using the method in Romeo (2002)
[2]_ as shown in [1]_.
.. math::
\frac{1}{\sqrt{f_d}} = -2\log\left\{\frac{\epsilon}{3.7065D}\times
\frac{5.0272}{Re}\times\log\left[\frac{\epsilon}{3.827D} -
\frac{4.567}{Re}\times\log\left(\frac{\epsilon}{7.7918D}^{0.9924} +
\left(\frac{5.3326}{208.815+Re}\right)^{0.9345}\right)\right]\right\}
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
Range is 3E3 <= Re <= 1.5E8; 0 <= eD <= 5E-2
Examples
--------
>>> Romeo_2002(1E5, 1E-4)
0.018530291219676177
References
----------
.. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence
and Combustion 90, no. 1 (January 1, 2013): 1-27.
doi:10.1007/s10494-012-9419-7
.. [2] Romeo, Eva, Carlos Royo, and Antonio Monzon."Improved Explicit
Equations for Estimation of the Friction Factor in Rough and Smooth
Pipes." Chemical Engineering Journal 86, no. 3 (April 28, 2002): 369-74.
doi:10.1016/S1385-8947(01)00254-6.
|
def get_source(self):
"""returns self._source"""
if self._source is None:
self.emit("}\n")
self._source = "\n".join(self.lines)
del self.lines
return self._source
|
returns self._source
|
def blake2b(data, digest_size=BLAKE2B_BYTES, key=b'',
salt=b'', person=b'',
encoder=nacl.encoding.HexEncoder):
"""
Hashes ``data`` with blake2b.
:param data: the digest input byte sequence
:type data: bytes
:param digest_size: the requested digest size; must be at most
:const:`BLAKE2B_BYTES_MAX`;
the default digest size is
:const:`BLAKE2B_BYTES`
:type digest_size: int
:param key: the key to be set for keyed MAC/PRF usage; if set, the key
must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long
:type key: bytes
:param salt: an initialization salt at most
:const:`BLAKE2B_SALTBYTES` long;
it will be zero-padded if needed
:type salt: bytes
:param person: a personalization string at most
:const:`BLAKE2B_PERSONALBYTES` long;
it will be zero-padded if needed
:type person: bytes
:param encoder: the encoder to use on returned digest
:type encoder: class
:returns: The hashed message.
:rtype: bytes
"""
digest = _b2b_hash(data, digest_size=digest_size, key=key,
salt=salt, person=person)
return encoder.encode(digest)
|
Hashes ``data`` with blake2b.
:param data: the digest input byte sequence
:type data: bytes
:param digest_size: the requested digest size; must be at most
:const:`BLAKE2B_BYTES_MAX`;
the default digest size is
:const:`BLAKE2B_BYTES`
:type digest_size: int
:param key: the key to be set for keyed MAC/PRF usage; if set, the key
must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long
:type key: bytes
:param salt: an initialization salt at most
:const:`BLAKE2B_SALTBYTES` long;
it will be zero-padded if needed
:type salt: bytes
:param person: a personalization string at most
:const:`BLAKE2B_PERSONALBYTES` long;
it will be zero-padded if needed
:type person: bytes
:param encoder: the encoder to use on returned digest
:type encoder: class
:returns: The hashed message.
:rtype: bytes
|
def strframe(obj, extended=False):
"""
Return a string with a frame record pretty-formatted.
The record is typically an item in a list generated by `inspect.stack()
<https://docs.python.org/3/library/inspect.html#inspect.stack>`_).
:param obj: Frame record
:type obj: tuple
:param extended: Flag that indicates whether contents of the frame object
are printed (True) or not (False)
:type extended: boolean
:rtype: string
"""
# Stack frame -> (frame object [0], filename [1], line number of current
# line [2], function name [3], list of lines of context from source
# code [4], index of current line within list [5])
fname = normalize_windows_fname(obj[1])
ret = list()
ret.append(pcolor("Frame object ID: {0}".format(hex(id(obj[0]))), "yellow"))
ret.append("File name......: {0}".format(fname))
ret.append("Line number....: {0}".format(obj[2]))
ret.append("Function name..: {0}".format(obj[3]))
ret.append("Context........: {0}".format(obj[4]))
ret.append("Index..........: {0}".format(obj[5]))
if extended:
ret.append("f_back ID......: {0}".format(hex(id(obj[0].f_back))))
ret.append("f_builtins.....: {0}".format(obj[0].f_builtins))
ret.append("f_code.........: {0}".format(obj[0].f_code))
ret.append("f_globals......: {0}".format(obj[0].f_globals))
ret.append("f_lasti........: {0}".format(obj[0].f_lasti))
ret.append("f_lineno.......: {0}".format(obj[0].f_lineno))
ret.append("f_locals.......: {0}".format(obj[0].f_locals))
if hasattr(obj[0], "f_restricted"): # pragma: no cover
ret.append("f_restricted...: {0}".format(obj[0].f_restricted))
ret.append("f_trace........: {0}".format(obj[0].f_trace))
return "\n".join(ret)
|
Return a string with a frame record pretty-formatted.
The record is typically an item in a list generated by `inspect.stack()
<https://docs.python.org/3/library/inspect.html#inspect.stack>`_).
:param obj: Frame record
:type obj: tuple
:param extended: Flag that indicates whether contents of the frame object
are printed (True) or not (False)
:type extended: boolean
:rtype: string
|
def get_corrections_dict(self, entry):
"""
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value})
"""
corrections = {}
for c in self.corrections:
val = c.get_correction(entry)
if val != 0:
corrections[str(c)] = val
return corrections
|
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value})
|
def install_all_labels(stdout=None):
"""
Discover all subclasses of StructuredNode in your application and execute install_labels on each.
Note: code most be loaded (imported) in order for a class to be discovered.
:param stdout: output stream
:return: None
"""
if not stdout:
stdout = sys.stdout
def subsub(kls): # recursively return all subclasses
return kls.__subclasses__() + [g for s in kls.__subclasses__() for g in subsub(s)]
stdout.write("Setting up indexes and constraints...\n\n")
i = 0
for cls in subsub(StructuredNode):
stdout.write('Found {0}.{1}\n'.format(cls.__module__, cls.__name__))
install_labels(cls, quiet=False, stdout=stdout)
i += 1
if i:
stdout.write('\n')
stdout.write('Finished {0} classes.\n'.format(i))
|
Discover all subclasses of StructuredNode in your application and execute install_labels on each.
Note: code most be loaded (imported) in order for a class to be discovered.
:param stdout: output stream
:return: None
|
def bank_chisq_from_filters(tmplt_snr, tmplt_norm, bank_snrs, bank_norms,
tmplt_bank_matches, indices=None):
""" This function calculates and returns a TimeSeries object containing the
bank veto calculated over a segment.
Parameters
----------
tmplt_snr: TimeSeries
The SNR time series from filtering the segment against the current
search template
tmplt_norm: float
The normalization factor for the search template
bank_snrs: list of TimeSeries
The precomputed list of SNR time series between each of the bank veto
templates and the segment
bank_norms: list of floats
The normalization factors for the list of bank veto templates
(usually this will be the same for all bank veto templates)
tmplt_bank_matches: list of floats
The complex overlap between the search template and each
of the bank templates
indices: {None, Array}, optional
Array of indices into the snr time series. If given, the bank chisq
will only be calculated at these values.
Returns
-------
bank_chisq: TimeSeries of the bank vetos
"""
if indices is not None:
tmplt_snr = Array(tmplt_snr, copy=False)
bank_snrs_tmp = []
for bank_snr in bank_snrs:
bank_snrs_tmp.append(bank_snr.take(indices))
bank_snrs=bank_snrs_tmp
# Initialise bank_chisq as 0s everywhere
bank_chisq = zeros(len(tmplt_snr), dtype=real_same_precision_as(tmplt_snr))
# Loop over all the bank templates
for i in range(len(bank_snrs)):
bank_match = tmplt_bank_matches[i]
if (abs(bank_match) > 0.99):
# Not much point calculating bank_chisquared if the bank template
# is very close to the filter template. Can also hit numerical
# error due to approximations made in this calculation.
# The value of 2 is the expected addition to the chisq for this
# template
bank_chisq += 2.
continue
bank_norm = sqrt((1 - bank_match*bank_match.conj()).real)
bank_SNR = bank_snrs[i] * (bank_norms[i] / bank_norm)
tmplt_SNR = tmplt_snr * (bank_match.conj() * tmplt_norm / bank_norm)
bank_SNR = Array(bank_SNR, copy=False)
tmplt_SNR = Array(tmplt_SNR, copy=False)
bank_chisq += (bank_SNR - tmplt_SNR).squared_norm()
if indices is not None:
return bank_chisq
else:
return TimeSeries(bank_chisq, delta_t=tmplt_snr.delta_t,
epoch=tmplt_snr.start_time, copy=False)
|
This function calculates and returns a TimeSeries object containing the
bank veto calculated over a segment.
Parameters
----------
tmplt_snr: TimeSeries
The SNR time series from filtering the segment against the current
search template
tmplt_norm: float
The normalization factor for the search template
bank_snrs: list of TimeSeries
The precomputed list of SNR time series between each of the bank veto
templates and the segment
bank_norms: list of floats
The normalization factors for the list of bank veto templates
(usually this will be the same for all bank veto templates)
tmplt_bank_matches: list of floats
The complex overlap between the search template and each
of the bank templates
indices: {None, Array}, optional
Array of indices into the snr time series. If given, the bank chisq
will only be calculated at these values.
Returns
-------
bank_chisq: TimeSeries of the bank vetos
|
def set_slimits(self, row, column, min, max):
"""Set limits for the point sizes.
:param min: point size for the lowest value.
:param max: point size for the highest value.
"""
subplot = self.get_subplot_at(row, column)
subplot.set_slimits(min, max)
|
Set limits for the point sizes.
:param min: point size for the lowest value.
:param max: point size for the highest value.
|
def read_telenor(incoming_cdr, outgoing_cdr, cell_towers, describe=True,
warnings=True):
"""
Load user records from a CSV file in *telenor* format, which is only
applicable for call records.
.. warning:: ``read_telenor`` has been deprecated in bandicoot 0.4.
Parameters
----------
incoming_cdr : str
Path to the CSV file containing incoming records, using the following
scheme: ::
B_PARTY,A_PARTY,DURATION,B_CELL,CALL_DATE,CALL_TIME,CALL_TYPE
outgoing_cdr : str
Path to the CSV file containing outgoing records, using the following
scheme: ::
A_NUMBER,B_NUMBER,DURATION,B_CELL,CALL_DATE,CALL_TIME,CALL_TYPE
cell_towers : str
Path to the CSV file containing the positions of all
describe : boolean
If describe is True, it will print a description of the loaded user to
the standard output.
"""
log.warn("read_telenor has been deprecated in bandicoot 0.4.")
import itertools
import csv
def parse_direction(code):
if code == 'MOC':
return 'out'
elif code == 'MTC':
return 'in'
else:
raise NotImplementedError
cells = None
with open(cell_towers, 'r') as f:
cell_towers_list = csv.DictReader(f)
cells = {}
for line in cell_towers_list:
if line['LONGITUDE'] != '' and line['LATITUDE'] != '':
latlon = (float(line['LONGITUDE']), float(line['LATITUDE']))
cell_id = line['CELLID_HEX']
cells[cell_id] = latlon
def parse_record(raw):
direction = parse_direction(raw['CALL_TYPE'].strip())
if direction == 'in':
contact = raw.get('A_PARTY', raw.get('A_NUMBER'))
cell_id = raw['B_CELL']
else:
contact = raw.get('B_PARTY', raw.get('B_NUMBER'))
cell_id = raw['A_CELL']
position = Position(antenna=cell_id, location=cells.get(cell_id))
_date_str = raw.get('CDATE', raw.get('CALL_DATE'))
_time_str = raw.get('CTIME', raw.get('CALL_TIME'))
_datetime = datetime.strptime(_date_str + _time_str,
"%Y%m%d%H:%M:%S")
r = Record(interaction='call',
direction=direction,
correspondent_id=contact,
call_duration=float(raw['DURATION'].strip()),
datetime=_datetime,
position=position)
return r
with open(incoming_cdr, 'r') as f_in:
incoming_ = list(map(parse_record, csv.DictReader(f_in)))
with open(outgoing_cdr, 'r') as f:
outgoing_ = list(map(parse_record, csv.DictReader(f)))
records = itertools.chain(incoming_, outgoing_)
name = incoming_cdr
user, errors = load(name, records, cells, warnings=None, describe=False)
if describe:
user.describe()
return user
|
Load user records from a CSV file in *telenor* format, which is only
applicable for call records.
.. warning:: ``read_telenor`` has been deprecated in bandicoot 0.4.
Parameters
----------
incoming_cdr : str
Path to the CSV file containing incoming records, using the following
scheme: ::
B_PARTY,A_PARTY,DURATION,B_CELL,CALL_DATE,CALL_TIME,CALL_TYPE
outgoing_cdr : str
Path to the CSV file containing outgoing records, using the following
scheme: ::
A_NUMBER,B_NUMBER,DURATION,B_CELL,CALL_DATE,CALL_TIME,CALL_TYPE
cell_towers : str
Path to the CSV file containing the positions of all
describe : boolean
If describe is True, it will print a description of the loaded user to
the standard output.
|
def _strip_colors(self, message: str) -> str:
""" Remove all of the color tags from this message. """
for c in self.COLORS:
message = message.replace(c, "")
return message
|
Remove all of the color tags from this message.
|
def _apply_bias(inputs, outputs, channel_index, data_format, output_channels,
initializers, partitioners, regularizers):
"""Initialize and apply a bias to the outputs.
Figures out the shape of the bias vector, initialize it, and applies it.
Args:
inputs: A Tensor of shape `data_format`.
outputs: A Tensor of shape `data_format`.
channel_index: The index of the channel dimension in `inputs`.
data_format: Format of `inputs`.
output_channels: Channel dimensionality for `outputs`.
initializers: Optional dict containing ops to initialize the biases
(with key 'b').
partitioners: Optional dict containing partitioners to partition the
biases (with key 'b').
regularizers: Optional dict containing regularizers for the biases
(with key 'b').
Returns:
b: The constructed bias variable.
outputs: The `outputs` argument that has had a bias applied.
"""
bias_shape = (output_channels,)
if "b" not in initializers:
initializers["b"] = create_bias_initializer(bias_shape,
dtype=inputs.dtype)
b = tf.get_variable("b",
shape=bias_shape,
dtype=inputs.dtype,
initializer=initializers["b"],
partitioner=partitioners.get("b", None),
regularizer=regularizers.get("b", None))
# tf.nn.bias_add only supports 2 data formats.
if data_format in (DATA_FORMAT_NHWC, DATA_FORMAT_NCHW):
# Supported as-is.
outputs = tf.nn.bias_add(outputs, b, data_format=data_format)
else:
# Create our own bias vector.
bias_correct_dim = [1] * len(data_format)
bias_correct_dim[channel_index] = output_channels
outputs += tf.reshape(b, bias_correct_dim)
return b, outputs
|
Initialize and apply a bias to the outputs.
Figures out the shape of the bias vector, initialize it, and applies it.
Args:
inputs: A Tensor of shape `data_format`.
outputs: A Tensor of shape `data_format`.
channel_index: The index of the channel dimension in `inputs`.
data_format: Format of `inputs`.
output_channels: Channel dimensionality for `outputs`.
initializers: Optional dict containing ops to initialize the biases
(with key 'b').
partitioners: Optional dict containing partitioners to partition the
biases (with key 'b').
regularizers: Optional dict containing regularizers for the biases
(with key 'b').
Returns:
b: The constructed bias variable.
outputs: The `outputs` argument that has had a bias applied.
|
def lerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans
|
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
|
def parse_authn_request_response(self, xmlstr, binding, outstanding=None,
outstanding_certs=None, conv_info=None):
""" Deal with an AuthnResponse
:param xmlstr: The reply as a xml string
:param binding: Which binding that was used for the transport
:param outstanding: A dictionary with session IDs as keys and
the original web request from the user before redirection
as values.
:param outstanding_certs:
:param conv_info: Information about the conversation.
:return: An response.AuthnResponse or None
"""
if not getattr(self.config, 'entityid', None):
raise SAMLError("Missing entity_id specification")
if not xmlstr:
return None
kwargs = {
"outstanding_queries": outstanding,
"outstanding_certs": outstanding_certs,
"allow_unsolicited": self.allow_unsolicited,
"want_assertions_signed": self.want_assertions_signed,
"want_assertions_or_response_signed": self.want_assertions_or_response_signed,
"want_response_signed": self.want_response_signed,
"return_addrs": self.service_urls(binding=binding),
"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters,
"allow_unknown_attributes":
self.config.allow_unknown_attributes,
'conv_info': conv_info
}
try:
resp = self._parse_response(xmlstr, AuthnResponse,
"assertion_consumer_service",
binding, **kwargs)
except StatusError as err:
logger.error("SAML status error: %s", err)
raise
except UnravelError:
return None
except Exception as err:
logger.error("XML parse error: %s", err)
raise
if not isinstance(resp, AuthnResponse):
logger.error("Response type not supported: %s",
saml2.class_name(resp))
return None
if (resp.assertion and len(resp.response.encrypted_assertion) == 0 and
resp.assertion.subject.name_id):
self.users.add_information_about_person(resp.session_info())
logger.info("--- ADDED person info ----")
return resp
|
Deal with an AuthnResponse
:param xmlstr: The reply as a xml string
:param binding: Which binding that was used for the transport
:param outstanding: A dictionary with session IDs as keys and
the original web request from the user before redirection
as values.
:param outstanding_certs:
:param conv_info: Information about the conversation.
:return: An response.AuthnResponse or None
|
def calc_smoothpar_logistic2(metapar):
"""Return the smoothing parameter corresponding to the given meta
parameter when using |smooth_logistic2|.
Calculate the smoothing parameter value corresponding the meta parameter
value 2.5:
>>> from hydpy.auxs.smoothtools import calc_smoothpar_logistic2
>>> smoothpar = calc_smoothpar_logistic2(2.5)
Using this smoothing parameter value, the output of function
|smooth_logistic2| differs by
1 % from the related `true` discontinuous step function for the
input values -2.5 and 2.5 (which are located at a distance of 2.5
from the position of the discontinuity):
>>> from hydpy.cythons import smoothutils
>>> from hydpy import round_
>>> round_(smoothutils.smooth_logistic2(-2.5, smoothpar))
0.01
>>> round_(smoothutils.smooth_logistic2(2.5, smoothpar))
2.51
For zero or negative meta parameter values, a zero smoothing parameter
value is returned:
>>> round_(calc_smoothpar_logistic2(0.0))
0.0
>>> round_(calc_smoothpar_logistic2(-1.0))
0.0
"""
if metapar <= 0.:
return 0.
return optimize.newton(_error_smoothpar_logistic2,
.3 * metapar**.84,
_smooth_logistic2_derivative,
args=(metapar,))
|
Return the smoothing parameter corresponding to the given meta
parameter when using |smooth_logistic2|.
Calculate the smoothing parameter value corresponding the meta parameter
value 2.5:
>>> from hydpy.auxs.smoothtools import calc_smoothpar_logistic2
>>> smoothpar = calc_smoothpar_logistic2(2.5)
Using this smoothing parameter value, the output of function
|smooth_logistic2| differs by
1 % from the related `true` discontinuous step function for the
input values -2.5 and 2.5 (which are located at a distance of 2.5
from the position of the discontinuity):
>>> from hydpy.cythons import smoothutils
>>> from hydpy import round_
>>> round_(smoothutils.smooth_logistic2(-2.5, smoothpar))
0.01
>>> round_(smoothutils.smooth_logistic2(2.5, smoothpar))
2.51
For zero or negative meta parameter values, a zero smoothing parameter
value is returned:
>>> round_(calc_smoothpar_logistic2(0.0))
0.0
>>> round_(calc_smoothpar_logistic2(-1.0))
0.0
|
def _init(frame, log_level=ERROR):
'''
Enables explicit relative import in sub-modules when ran as __main__
:param log_level: module's inner logger level (equivalent to logging pkg)
'''
global _log_level
_log_level = log_level
# now we have access to the module globals
main_globals = frame.f_globals
# If __package__ set or it isn't the __main__, stop and return.
# (in some cases relative_import could be called once from outside
# __main__ if it was not called in __main__)
# (also a reload of relative_import could trigger this function)
pkg = main_globals.get('__package__')
file_ = main_globals.get('__file__')
if pkg or not file_:
_log_debug('Package solved or init was called from interactive '
'console. __package__=%r, __file__=%r' % (pkg, file_))
return
try:
_solve_pkg(main_globals)
except Exception as e:
_print_exc(e)
|
Enables explicit relative import in sub-modules when ran as __main__
:param log_level: module's inner logger level (equivalent to logging pkg)
|
def is_frameshift_len(mut_df):
"""Simply returns a series indicating whether each corresponding mutation
is a frameshift.
This is based on the length of the indel. Thus may be fooled by frameshifts
at exon-intron boundaries or other odd cases.
Parameters
----------
mut_df : pd.DataFrame
mutation input file as a dataframe in standard format
Returns
-------
is_fs : pd.Series
pandas series indicating if mutaitons are frameshifts
"""
# calculate length, 0-based coordinates
#indel_len = mut_df['End_Position'] - mut_df['Start_Position']
if 'indel len' in mut_df.columns:
indel_len = mut_df['indel len']
else:
indel_len = compute_indel_length(mut_df)
# only non multiples of 3 are frameshifts
is_fs = (indel_len%3)>0
# make sure no single base substitutions are counted
is_indel = (mut_df['Reference_Allele']=='-') | (mut_df['Tumor_Allele']=='-')
is_fs[~is_indel] = False
return is_fs
|
Simply returns a series indicating whether each corresponding mutation
is a frameshift.
This is based on the length of the indel. Thus may be fooled by frameshifts
at exon-intron boundaries or other odd cases.
Parameters
----------
mut_df : pd.DataFrame
mutation input file as a dataframe in standard format
Returns
-------
is_fs : pd.Series
pandas series indicating if mutaitons are frameshifts
|
def network_details():
"""
Returns details about the network links
"""
# Get IPv4 details
ipv4_addresses = [
info[4][0]
for info in socket.getaddrinfo(
socket.gethostname(), None, socket.AF_INET
)
]
# Add localhost
ipv4_addresses.extend(
info[4][0]
for info in socket.getaddrinfo("localhost", None, socket.AF_INET)
)
# Filter addresses
ipv4_addresses = sorted(set(ipv4_addresses))
try:
# Get IPv6 details
ipv6_addresses = [
info[4][0]
for info in socket.getaddrinfo(
socket.gethostname(), None, socket.AF_INET6
)
]
# Add localhost
ipv6_addresses.extend(
info[4][0]
for info in socket.getaddrinfo(
"localhost", None, socket.AF_INET6
)
)
# Filter addresses
ipv6_addresses = sorted(set(ipv6_addresses))
except (socket.gaierror, AttributeError):
# AttributeError: AF_INET6 is missing in some versions of Python
ipv6_addresses = None
return {
"IPv4": ipv4_addresses,
"IPv6": ipv6_addresses,
"host.name": socket.gethostname(),
"host.fqdn": socket.getfqdn(),
}
|
Returns details about the network links
|
def is_image(self, key):
"""Return True if variable is a PIL.Image image"""
data = self.model.get_data()
return isinstance(data[key], Image)
|
Return True if variable is a PIL.Image image
|
def apply_heuristic(self, node_a, node_b, heuristic=None):
"""
helper function to apply heuristic
"""
if not heuristic:
heuristic = self.heuristic
return heuristic(
abs(node_a.x - node_b.x),
abs(node_a.y - node_b.y))
|
helper function to apply heuristic
|
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
|
Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
|
def stitch_block_rows(block_list):
'''
Stitches blocks together into a single block rowwise. These blocks are 2D tables usually
generated from tableproc. The final block will be of dimensions (sum(num_rows), max(num_cols)).
'''
stitched = list(itertools.chain(*block_list))
max_length = max(len(row) for row in stitched)
for row in stitched:
if len(row) < max_length:
row += [None] * (max_length - len(row))
return stitched
|
Stitches blocks together into a single block rowwise. These blocks are 2D tables usually
generated from tableproc. The final block will be of dimensions (sum(num_rows), max(num_cols)).
|
def von_mises_strain(self):
"""
Equivalent strain to Von Mises Stress
"""
eps = self - 1/3 * np.trace(self) * np.identity(3)
return np.sqrt(np.sum(eps * eps) * 2/3)
|
Equivalent strain to Von Mises Stress
|
def is_diacritic(char, strict=True):
"""
Check whether the character is a diacritic (as opposed to a letter or a
suprasegmental).
In strict mode return True only if the diacritic is part of the IPA spec.
"""
if char in chart.diacritics:
return True
if not strict:
return (unicodedata.category(char) in ['Lm', 'Mn', 'Sk']) \
and (not is_suprasegmental(char)) \
and (not is_tie_bar(char)) \
and (not 0xA700 <= ord(char) <= 0xA71F)
return False
|
Check whether the character is a diacritic (as opposed to a letter or a
suprasegmental).
In strict mode return True only if the diacritic is part of the IPA spec.
|
def open(self, filename):
# type: (str) -> None
'''
Open up an existing ISO for inspection and modification.
Parameters:
filename - The filename containing the ISO to open up.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object already has an ISO; either close it or create a new object')
fp = open(filename, 'r+b')
self._managing_fp = True
try:
self._open_fp(fp)
except Exception:
fp.close()
raise
|
Open up an existing ISO for inspection and modification.
Parameters:
filename - The filename containing the ISO to open up.
Returns:
Nothing.
|
def highlight(string, keywords, cls_name='highlighted'):
""" Given an list of words, this function highlights the matched text in the given string. """
if not keywords:
return string
if not string:
return ''
include, exclude = get_text_tokenizer(keywords)
highlighted = highlight_text(include, string, cls_name)
return highlighted
|
Given an list of words, this function highlights the matched text in the given string.
|
def finish_operation(self, conn_or_internal_id, success, *args):
"""Finish an operation on a connection.
Args:
conn_or_internal_id (string, int): Either an integer connection id or a string
internal_id
success (bool): Whether the operation was successful
failure_reason (string): Optional reason why the operation failed
result (dict): Optional dictionary containing the results of the operation
"""
data = {
'id': conn_or_internal_id,
'success': success,
'callback_args': args
}
action = ConnectionAction('finish_operation', data, sync=False)
self._actions.put(action)
|
Finish an operation on a connection.
Args:
conn_or_internal_id (string, int): Either an integer connection id or a string
internal_id
success (bool): Whether the operation was successful
failure_reason (string): Optional reason why the operation failed
result (dict): Optional dictionary containing the results of the operation
|
def load(self):
""" Return the model from the store """
filters = [Filter(self.field, 'eq', self.rid)]
store = goldman.sess.store
self._is_loaded = True
self.models = store.search(self.rtype, filters=filters)
return self.models
|
Return the model from the store
|
def mark_deactivated(self,request,queryset):
"""An admin action for marking several cages as inactive.
This action sets the selected cages as Active=False and Death=today.
This admin action also shows as the output the number of mice sacrificed."""
rows_updated = queryset.update(Active=False, End=datetime.date.today() )
if rows_updated == 1:
message_bit = "1 cage was"
else:
message_bit = "%s cages were" % rows_updated
self.message_user(request, "%s successfully marked as deactivated." % message_bit)
|
An admin action for marking several cages as inactive.
This action sets the selected cages as Active=False and Death=today.
This admin action also shows as the output the number of mice sacrificed.
|
def extract_date(value):
"""
Convert timestamp to datetime and set everything to zero except a date
"""
dtime = value.to_datetime()
dtime = (dtime - timedelta(hours=dtime.hour) - timedelta(minutes=dtime.minute) -
timedelta(seconds=dtime.second) - timedelta(microseconds=dtime.microsecond))
return dtime
|
Convert timestamp to datetime and set everything to zero except a date
|
def get_metabolite_compartments(self):
"""Return all metabolites' compartments."""
warn('use Model.compartments instead', DeprecationWarning)
return {met.compartment for met in self.metabolites
if met.compartment is not None}
|
Return all metabolites' compartments.
|
def read_configuration(self):
"""Load configuration from Django settings."""
self.configured = True
# Default backend needs to be the database backend for backward
# compatibility.
backend = (getattr(settings, 'CELERY_RESULT_BACKEND', None) or
getattr(settings, 'CELERY_BACKEND', None))
if not backend:
settings.CELERY_RESULT_BACKEND = 'database'
return DictAttribute(settings)
|
Load configuration from Django settings.
|
def retyped(self, new_type):
"""Returns a new node with the same contents as self, but with a new node_type."""
return ParseNode(new_type,
children=list(self.children),
consumed=self.consumed,
position=self.position,
ignored=self.ignored)
|
Returns a new node with the same contents as self, but with a new node_type.
|
def dump_connection_info(engine: Engine, fileobj: TextIO = sys.stdout) -> None:
"""
Dumps some connection info, as an SQL comment. Obscures passwords.
Args:
engine: the SQLAlchemy :class:`Engine` to dump metadata information
from
fileobj: the file-like object (default ``sys.stdout``) to write
information to
"""
meta = MetaData(bind=engine)
writeline_nl(fileobj, sql_comment('Database info: {}'.format(meta)))
|
Dumps some connection info, as an SQL comment. Obscures passwords.
Args:
engine: the SQLAlchemy :class:`Engine` to dump metadata information
from
fileobj: the file-like object (default ``sys.stdout``) to write
information to
|
def _is_dirty(self, xblock):
"""
Return whether this field should be saved when xblock.save() is called
"""
# pylint: disable=protected-access
if self not in xblock._dirty_fields:
return False
baseline = xblock._dirty_fields[self]
return baseline is EXPLICITLY_SET or xblock._field_data_cache[self.name] != baseline
|
Return whether this field should be saved when xblock.save() is called
|
def check_ressources(sess):
"""
check the Ressources of the Fortinet Controller
all thresholds are currently hard coded. should be fine.
"""
# get the data
cpu_value = get_data(sess, cpu_oid, helper)
memory_value = get_data(sess, memory_oid, helper)
filesystem_value = get_data(sess, filesystem_oid, helper)
helper.add_summary("Controller Status")
helper.add_long_output("Controller Ressources - CPU: %s%%" % cpu_value)
helper.add_metric("CPU", cpu_value, "0:90", "0:90", "", "", "%%")
if int(cpu_value) > 90:
helper.status(critical)
helper.add_summary("Controller Ressources - CPU: %s%%" % cpu_value)
helper.add_long_output("Memory: %s%%" % memory_value)
helper.add_metric("Memory", memory_value, "0:90", "0:90", "", "", "%%")
if int(memory_value) > 90:
helper.add_summary("Memory: %s%%" % memory_value)
helper.status(critical)
helper.add_long_output("Filesystem: %s%%" % filesystem_value)
helper.add_metric("Filesystem", filesystem_value, "0:90", "0:90", "", "", "%%")
if int(filesystem_value) > 90:
helper.add_summary("Filesystem: %s%%" % filesystem_value)
helper.status(critical)
|
check the Ressources of the Fortinet Controller
all thresholds are currently hard coded. should be fine.
|
def delete(self, image_file, delete_thumbnails=True):
"""
Deletes the reference to the ``image_file`` and deletes the references
to thumbnails as well as thumbnail files if ``delete_thumbnails`` is
`True``. Does not delete the ``image_file`` is self.
"""
if delete_thumbnails:
self.delete_thumbnails(image_file)
self._delete(image_file.key)
|
Deletes the reference to the ``image_file`` and deletes the references
to thumbnails as well as thumbnail files if ``delete_thumbnails`` is
`True``. Does not delete the ``image_file`` is self.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.