text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def _ScanVolumeSystemRootNode(
self, scan_context, scan_node, auto_recurse=True):
"""Scans a volume system root node for supported formats.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): source scan node.
auto_recurse (Optional[bool]): True if the scan should automatically
recurse as far as possible.
Raises:
ValueError: if the scan context or scan node is invalid.
"""
if scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:
# For VSS add a scan node for the current volume.
path_spec = self.ScanForFileSystem(scan_node.path_spec.parent)
if path_spec:
scan_context.AddScanNode(path_spec, scan_node.parent_node)
# Determine the path specifications of the sub file entries.
file_entry = resolver.Resolver.OpenFileEntry(
scan_node.path_spec, resolver_context=self._resolver_context)
for sub_file_entry in file_entry.sub_file_entries:
sub_scan_node = scan_context.AddScanNode(
sub_file_entry.path_spec, scan_node)
if scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:
# Since scanning for file systems in VSS snapshot volumes can
# be expensive we only do this when explicitly asked for.
continue
if auto_recurse or not scan_context.updated:
self._ScanNode(scan_context, sub_scan_node, auto_recurse=auto_recurse) | [
"def",
"_ScanVolumeSystemRootNode",
"(",
"self",
",",
"scan_context",
",",
"scan_node",
",",
"auto_recurse",
"=",
"True",
")",
":",
"if",
"scan_node",
".",
"type_indicator",
"==",
"definitions",
".",
"TYPE_INDICATOR_VSHADOW",
":",
"# For VSS add a scan node for the curr... | 41.794118 | 23.205882 |
def render_meta(meta, fn="meta.pandas.html",
title="Project Metadata - MSMBuilder", pandas_kwargs=None):
"""Render a metadata dataframe as an html webpage for inspection.
Parameters
----------
meta : pd.Dataframe
The DataFrame of metadata
fn : str
Output filename (should end in html)
title : str
Page title
pandas_kwargs : dict
Arguments to be passed to pandas
"""
if pandas_kwargs is None:
pandas_kwargs = {}
kwargs_with_defaults = {
'classes': ('table', 'table-condensed', 'table-hover'),
}
kwargs_with_defaults.update(**pandas_kwargs)
env = Environment(loader=PackageLoader('msmbuilder', 'io_templates'))
templ = env.get_template("twitter-bootstrap.html")
rendered = templ.render(
title=title,
content=meta.to_html(**kwargs_with_defaults)
)
# Ugh, pandas hardcodes border="1"
rendered = re.sub(r' border="1"', '', rendered)
backup(fn)
with open(fn, 'w') as f:
f.write(rendered) | [
"def",
"render_meta",
"(",
"meta",
",",
"fn",
"=",
"\"meta.pandas.html\"",
",",
"title",
"=",
"\"Project Metadata - MSMBuilder\"",
",",
"pandas_kwargs",
"=",
"None",
")",
":",
"if",
"pandas_kwargs",
"is",
"None",
":",
"pandas_kwargs",
"=",
"{",
"}",
"kwargs_with... | 27.540541 | 20.135135 |
def _validate_config(self, config):
"""
Validates some parts of the module config
:type config: dict[str, dict[str, Any] | str]
:param config: The module config
"""
required_keys = [
self.KEY_IDP_CONFIG,
self.KEY_ENDPOINTS,
]
if not config:
raise ValueError("No configuration given")
for key in required_keys:
try:
_val = config[key]
except KeyError as e:
raise ValueError("Missing configuration key: %s" % key) from e | [
"def",
"_validate_config",
"(",
"self",
",",
"config",
")",
":",
"required_keys",
"=",
"[",
"self",
".",
"KEY_IDP_CONFIG",
",",
"self",
".",
"KEY_ENDPOINTS",
",",
"]",
"if",
"not",
"config",
":",
"raise",
"ValueError",
"(",
"\"No configuration given\"",
")",
... | 29.789474 | 14.947368 |
def wait_socket(_socket, session, timeout=1):
"""Helper function for testing non-blocking mode.
This function blocks the calling thread for <timeout> seconds -
to be used only for testing purposes.
Also available at `ssh2.utils.wait_socket`
"""
directions = session.block_directions()
if directions == 0:
return 0
readfds = [_socket] \
if (directions & LIBSSH2_SESSION_BLOCK_INBOUND) else ()
writefds = [_socket] \
if (directions & LIBSSH2_SESSION_BLOCK_OUTBOUND) else ()
return select(readfds, writefds, (), timeout) | [
"def",
"wait_socket",
"(",
"_socket",
",",
"session",
",",
"timeout",
"=",
"1",
")",
":",
"directions",
"=",
"session",
".",
"block_directions",
"(",
")",
"if",
"directions",
"==",
"0",
":",
"return",
"0",
"readfds",
"=",
"[",
"_socket",
"]",
"if",
"("... | 35.5 | 15.5 |
def cycle(self):
"""
Request one batch of events from Skype, calling :meth:`onEvent` with each event in turn.
Subclasses may override this method to alter loop functionality.
"""
try:
events = self.getEvents()
except requests.ConnectionError:
return
for event in events:
self.onEvent(event)
if self.autoAck:
event.ack() | [
"def",
"cycle",
"(",
"self",
")",
":",
"try",
":",
"events",
"=",
"self",
".",
"getEvents",
"(",
")",
"except",
"requests",
".",
"ConnectionError",
":",
"return",
"for",
"event",
"in",
"events",
":",
"self",
".",
"onEvent",
"(",
"event",
")",
"if",
"... | 30.5 | 17.928571 |
def xminvsks(self, **kwargs):
"""
Plot xmin versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function.
"""
pylab.plot(self._xmins,self._xmin_kstest,'.')
pylab.plot(self._xmin,self._ks,'s')
#pylab.errorbar([self._ks],self._alpha,yerr=self._alphaerr,fmt='+')
ax=pylab.gca()
ax.set_ylabel("KS statistic")
ax.set_xlabel("min(x)")
pylab.draw()
return ax | [
"def",
"xminvsks",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"pylab",
".",
"plot",
"(",
"self",
".",
"_xmins",
",",
"self",
".",
"_xmin_kstest",
",",
"'.'",
")",
"pylab",
".",
"plot",
"(",
"self",
".",
"_xmin",
",",
"self",
".",
"_ks",
",",... | 34.666667 | 20.555556 |
def writeable(value,
allow_empty = False,
**kwargs):
"""Validate that ``value`` is a path to a writeable file.
.. caution::
This validator does **NOT** work correctly on a Windows file system. This
is due to the vagaries of how Windows manages its file system and the
various ways in which it can manage file permission.
If called on a Windows file system, this validator will raise
:class:`NotImplementedError() <python:NotImplementedError>`.
.. caution::
**Use of this validator is an anti-pattern and should be used with caution.**
Validating the writability of a file *before* attempting to write to it
exposes your code to a bug called
`TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_.
This particular class of bug can expose your code to **security vulnerabilities**
and so this validator should only be used if you are an advanced user.
A better pattern to use when writing to file is to apply the principle of
EAFP ("easier to ask forgiveness than permission"), and simply attempt to
write to the file using a ``try ... except`` block:
.. code-block:: python
try:
with open('path/to/filename.txt', mode = 'a') as file_object:
# write to file here
except (OSError, IOError) as error:
# Handle an error if unable to write.
.. note::
This validator relies on :func:`os.access() <python:os.access>` to check
whether ``value`` is writeable. This function has certain limitations,
most especially that:
* It will **ignore** file-locking (yielding a false-positive) if the file
is locked.
* It focuses on *local operating system permissions*, which means if trying
to access a path over a network you might get a false positive or false
negative (because network paths may have more complicated authentication
methods).
:param value: The path to a file on the local filesystem whose writeability
is to be validated.
:type value: Path-like object
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: Validated absolute path or :obj:`None <python:None>`
:rtype: Path-like object or :obj:`None <python:None>`
:raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value``
is empty
:raises NotImplementedError: if used on a Windows system
:raises NotPathlikeError: if ``value`` is not a path-like object
:raises NotWriteableError: if ``value`` cannot be opened for writing
"""
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
value = path(value, force_run = True)
if sys.platform in ['win32', 'cygwin']:
raise NotImplementedError('not supported on Windows')
is_valid = os.access(value, mode = os.W_OK)
if not is_valid:
raise errors.NotWriteableError('writing not allowed for file at %s' % value)
return value | [
"def",
"writeable",
"(",
"value",
",",
"allow_empty",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"value",
"and",
"not",
"allow_empty",
":",
"raise",
"errors",
".",
"EmptyValueError",
"(",
"'value (%s) was empty'",
"%",
"value",
")",
"eli... | 37.988372 | 27.825581 |
def encodeCodon(seq_vec, ignore_stop_codons=True, maxlen=None, seq_align="start", encode_type="one_hot"):
"""Convert the Codon sequence into 1-hot-encoding numpy array
# Arguments
seq_vec: List of strings/DNA sequences
ignore_stop_codons: boolean; if True, STOP_CODONS are omitted from one-hot encoding.
maxlen: Maximum sequence length. See `pad_sequences` for more detail
seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail
encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ).
# Returns
numpy.ndarray of shape `(len(seq_vec), maxlen / 3, 61 if ignore_stop_codons else 64)`
"""
if ignore_stop_codons:
vocab = CODONS
neutral_vocab = STOP_CODONS + ["NNN"]
else:
vocab = CODONS + STOP_CODONS
neutral_vocab = ["NNN"]
# replace all U's with A's?
seq_vec = [str(seq).replace("U", "T") for seq in seq_vec]
return encodeSequence(seq_vec,
vocab=vocab,
neutral_vocab=neutral_vocab,
maxlen=maxlen,
seq_align=seq_align,
pad_value="NNN",
encode_type=encode_type) | [
"def",
"encodeCodon",
"(",
"seq_vec",
",",
"ignore_stop_codons",
"=",
"True",
",",
"maxlen",
"=",
"None",
",",
"seq_align",
"=",
"\"start\"",
",",
"encode_type",
"=",
"\"one_hot\"",
")",
":",
"if",
"ignore_stop_codons",
":",
"vocab",
"=",
"CODONS",
"neutral_vo... | 42.733333 | 23.333333 |
def sort_protein_group(pgroup, sortfunctions, sortfunc_index):
"""Recursive function that sorts protein group by a number of sorting
functions."""
pgroup_out = []
subgroups = sortfunctions[sortfunc_index](pgroup)
sortfunc_index += 1
for subgroup in subgroups:
if len(subgroup) > 1 and sortfunc_index < len(sortfunctions):
pgroup_out.extend(sort_protein_group(subgroup,
sortfunctions,
sortfunc_index))
else:
pgroup_out.extend(subgroup)
return pgroup_out | [
"def",
"sort_protein_group",
"(",
"pgroup",
",",
"sortfunctions",
",",
"sortfunc_index",
")",
":",
"pgroup_out",
"=",
"[",
"]",
"subgroups",
"=",
"sortfunctions",
"[",
"sortfunc_index",
"]",
"(",
"pgroup",
")",
"sortfunc_index",
"+=",
"1",
"for",
"subgroup",
"... | 43.214286 | 16.071429 |
def removeall(item, seq):
"""Return a copy of seq (or string) with all occurences of item removed.
>>> removeall(3, [1, 2, 3, 3, 2, 1, 3])
[1, 2, 2, 1]
>>> removeall(4, [1, 2, 3])
[1, 2, 3]
"""
if isinstance(seq, str):
return seq.replace(item, '')
else:
return [x for x in seq if x != item] | [
"def",
"removeall",
"(",
"item",
",",
"seq",
")",
":",
"if",
"isinstance",
"(",
"seq",
",",
"str",
")",
":",
"return",
"seq",
".",
"replace",
"(",
"item",
",",
"''",
")",
"else",
":",
"return",
"[",
"x",
"for",
"x",
"in",
"seq",
"if",
"x",
"!="... | 29.818182 | 11.727273 |
def _get_eligible_broker_pair(self, under_loaded_rg, eligible_partition):
"""Evaluate and return source and destination broker-pair from over-loaded
and under-loaded replication-group if possible, return None otherwise.
Return source broker with maximum partitions and destination broker with
minimum partitions based on following conditions:-
1) At-least one broker in under-loaded group which does not have
victim-partition. This is because a broker cannot have duplicate replica.
2) At-least one broker in over-loaded group which has victim-partition
"""
under_brokers = list(filter(
lambda b: eligible_partition not in b.partitions,
under_loaded_rg.brokers,
))
over_brokers = list(filter(
lambda b: eligible_partition in b.partitions,
self.brokers,
))
# Get source and destination broker
source_broker, dest_broker = None, None
if over_brokers:
source_broker = max(
over_brokers,
key=lambda broker: len(broker.partitions),
)
if under_brokers:
dest_broker = min(
under_brokers,
key=lambda broker: len(broker.partitions),
)
return (source_broker, dest_broker) | [
"def",
"_get_eligible_broker_pair",
"(",
"self",
",",
"under_loaded_rg",
",",
"eligible_partition",
")",
":",
"under_brokers",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"b",
":",
"eligible_partition",
"not",
"in",
"b",
".",
"partitions",
",",
"under_loaded_rg",
... | 41.5 | 19.34375 |
def find_pyd_file():
"""
Return path to .pyd after successful build command.
:return: Path to .pyd file or None.
"""
if not os.path.isdir("./build"):
raise NotADirectoryError
for path, dirs, files in os.walk("./build"):
for file_name in files:
file_name_parts = os.path.splitext(file_name)
if file_name_parts[1] == ".pyd":
return path
return None | [
"def",
"find_pyd_file",
"(",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"\"./build\"",
")",
":",
"raise",
"NotADirectoryError",
"for",
"path",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"\"./build\"",
")",
":",
"for",
... | 27.866667 | 13.6 |
def deviation(reference_intervals, estimated_intervals, trim=False):
"""Compute the median deviations between reference
and estimated boundary times.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
... est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
(Default value = False)
Returns
-------
reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference | [
"def",
"deviation",
"(",
"reference_intervals",
",",
"estimated_intervals",
",",
"trim",
"=",
"False",
")",
":",
"validate_boundary",
"(",
"reference_intervals",
",",
"estimated_intervals",
",",
"trim",
")",
"# Convert intervals to boundaries",
"reference_boundaries",
"="... | 37.559322 | 21.949153 |
def parse(self, limit=None):
"""
Override Source.parse()
Args:
:param limit (int, optional) limit the number of rows processed
Returns:
:return None
"""
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
ensembl_file = '/'.join((self.rawdir, self.files['ensembl2pathway']['file']))
self._parse_reactome_association_file(
ensembl_file, limit, subject_prefix='ENSEMBL', object_prefix='REACT')
chebi_file = '/'.join((self.rawdir, self.files['chebi2pathway']['file']))
self._parse_reactome_association_file(
chebi_file, limit, subject_prefix='CHEBI', object_prefix='REACT')
return | [
"def",
"parse",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"limit",
"is",
"not",
"None",
":",
"LOG",
".",
"info",
"(",
"\"Only parsing first %d rows\"",
",",
"limit",
")",
"ensembl_file",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"... | 38.157895 | 22.789474 |
def bundles(ctx):
"""
List discovered bundles.
"""
bundles = _get_bundles(ctx.obj.data['env'])
print_table(('Name', 'Location'),
[(bundle.name, f'{bundle.__module__}.{bundle.__class__.__name__}')
for bundle in bundles]) | [
"def",
"bundles",
"(",
"ctx",
")",
":",
"bundles",
"=",
"_get_bundles",
"(",
"ctx",
".",
"obj",
".",
"data",
"[",
"'env'",
"]",
")",
"print_table",
"(",
"(",
"'Name'",
",",
"'Location'",
")",
",",
"[",
"(",
"bundle",
".",
"name",
",",
"f'{bundle.__mo... | 33.125 | 10.875 |
def debug_string(self, max_debug=MAX_DEBUG_TRIALS):
"""Returns a human readable message for printing to the console."""
messages = self._debug_messages()
states = collections.defaultdict(set)
limit_per_state = collections.Counter()
for t in self._trials:
states[t.status].add(t)
# Show at most max_debug total, but divide the limit fairly
while max_debug > 0:
start_num = max_debug
for s in states:
if limit_per_state[s] >= len(states[s]):
continue
max_debug -= 1
limit_per_state[s] += 1
if max_debug == start_num:
break
for local_dir in sorted({t.local_dir for t in self._trials}):
messages.append("Result logdir: {}".format(local_dir))
num_trials_per_state = {
state: len(trials)
for state, trials in states.items()
}
total_number_of_trials = sum(num_trials_per_state.values())
if total_number_of_trials > 0:
messages.append("Number of trials: {} ({})"
"".format(total_number_of_trials,
num_trials_per_state))
for state, trials in sorted(states.items()):
limit = limit_per_state[state]
messages.append("{} trials:".format(state))
sorted_trials = sorted(
trials, key=lambda t: _naturalize(t.experiment_tag))
if len(trials) > limit:
tail_length = limit // 2
first = sorted_trials[:tail_length]
for t in first:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
messages.append(
" ... {} not shown".format(len(trials) - tail_length * 2))
last = sorted_trials[-tail_length:]
for t in last:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
else:
for t in sorted_trials:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
return "\n".join(messages) + "\n" | [
"def",
"debug_string",
"(",
"self",
",",
"max_debug",
"=",
"MAX_DEBUG_TRIALS",
")",
":",
"messages",
"=",
"self",
".",
"_debug_messages",
"(",
")",
"states",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"limit_per_state",
"=",
"collections",
".",
... | 40.654545 | 14.636364 |
def pairwise(iterable):
"""
For an iterable, group values into pairs.
Parameters
-----------
iterable : (m, ) list
A sequence of values
Returns
-----------
pairs: (n, 2)
Pairs of sequential values
Example
-----------
In [1]: data
Out[1]: [0, 1, 2, 3, 4, 5, 6]
In [2]: list(trimesh.util.pairwise(data))
Out[2]: [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)]
"""
# looping through a giant numpy array would be dumb
# so special case ndarrays and use numpy operations
if isinstance(iterable, np.ndarray):
iterable = iterable.reshape(-1)
stacked = np.column_stack((iterable, iterable))
pairs = stacked.reshape(-1)[1:-1].reshape((-1, 2))
return pairs
# if we have a normal iterable use itertools
import itertools
a, b = itertools.tee(iterable)
# pop the first element of the second item
next(b)
return zip(a, b) | [
"def",
"pairwise",
"(",
"iterable",
")",
":",
"# looping through a giant numpy array would be dumb",
"# so special case ndarrays and use numpy operations",
"if",
"isinstance",
"(",
"iterable",
",",
"np",
".",
"ndarray",
")",
":",
"iterable",
"=",
"iterable",
".",
"reshape... | 24.184211 | 19.710526 |
def populate(self, other):
"""Like update, but clears the contents first."""
self.clear()
self.update(other)
self.reset_all_changes() | [
"def",
"populate",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"clear",
"(",
")",
"self",
".",
"update",
"(",
"other",
")",
"self",
".",
"reset_all_changes",
"(",
")"
] | 32.2 | 11.2 |
def __split_nonleaf_node(self, node):
"""!
@brief Performs splitting of the specified non-leaf node.
@param[in] node (non_leaf_node): Non-leaf node that should be splitted.
@return (list) New pair of non-leaf nodes [non_leaf_node1, non_leaf_node2].
"""
[farthest_node1, farthest_node2] = node.get_farthest_successors(self.__type_measurement);
# create new non-leaf nodes
new_node1 = non_leaf_node(farthest_node1.feature, node.parent, [ farthest_node1 ], None);
new_node2 = non_leaf_node(farthest_node2.feature, node.parent, [ farthest_node2 ], None);
farthest_node1.parent = new_node1;
farthest_node2.parent = new_node2;
# re-insert other successors
for successor in node.successors:
if ( (successor is not farthest_node1) and (successor is not farthest_node2) ):
distance1 = new_node1.get_distance(successor, self.__type_measurement);
distance2 = new_node2.get_distance(successor, self.__type_measurement);
if (distance1 < distance2):
new_node1.insert_successor(successor);
else:
new_node2.insert_successor(successor);
return [new_node1, new_node2]; | [
"def",
"__split_nonleaf_node",
"(",
"self",
",",
"node",
")",
":",
"[",
"farthest_node1",
",",
"farthest_node2",
"]",
"=",
"node",
".",
"get_farthest_successors",
"(",
"self",
".",
"__type_measurement",
")",
"# create new non-leaf nodes\r",
"new_node1",
"=",
"non_le... | 44.096774 | 25.225806 |
def check_inlet(self, helper):
"""
check the Inlets of Raritan PDUs
"""
# walk the data
try:
inlet_values = self.sess.walk_oid(self.oids['oid_inlet_value'])
inlet_units = self.sess.walk_oid(self.oids['oid_inlet_unit'])
inlet_digits = self.sess.walk_oid(self.oids['oid_inlet_digits'])
inlet_states = self.sess.walk_oid(self.oids['oid_inlet_state'])
inlet_warning_uppers = self.sess.walk_oid(self.oids['oid_inlet_warning_upper'])
inlet_critical_uppers = self.sess.walk_oid(self.oids['oid_inlet_critical_upper'])
inlet_critical_lowers = self.sess.walk_oid(self.oids['oid_inlet_critical_lower'])
inlet_warning_lowers = self.sess.walk_oid(self.oids['oid_inlet_warning_lower'])
except health_monitoring_plugins.SnmpException as e:
helper.exit(summary=str(e), exit_code=unknown, perfdata='')
# just print the summary, that the inlet sensors are checked
helper.add_summary("Inlet")
# all list must have the same length, if not something went wrong. that makes it easier and we need less loops
# translate the data in human readable units with help of the dicts
for x in range(len(inlet_values)):
inlet_unit = units[int(inlet_units[x].val)]
inlet_digit = inlet_digits[x].val
inlet_state = states[int(inlet_states[x].val)]
inlet_value = real_value(inlet_values[x].val, inlet_digit)
inlet_warning_upper = real_value(inlet_warning_uppers[x].val, inlet_digit)
inlet_critical_upper = real_value(inlet_critical_uppers[x].val, inlet_digit)
inlet_warning_lower = real_value(inlet_warning_lowers[x].val, inlet_digit)
inlet_critical_lower = real_value(inlet_critical_lowers[x].val, inlet_digit)
if inlet_state != "normal":
# we don't want to use the thresholds. we rely on the state value of the device
helper.add_summary("%s %s is %s" % (inlet_value, inlet_unit, inlet_state))
helper.status(critical)
# we always want to see the values in the long output and in the perf data
helper.add_summary("%s %s" % (inlet_value, inlet_unit))
helper.add_long_output("%s %s: %s" % (inlet_value, inlet_unit, inlet_state))
helper.add_metric("Sensor " + str(x) + " -%s-" % inlet_unit, inlet_value,
inlet_warning_lower +\
":" + inlet_warning_upper, inlet_critical_lower + ":" +\
inlet_critical_upper, "", "", "") | [
"def",
"check_inlet",
"(",
"self",
",",
"helper",
")",
":",
"# walk the data",
"try",
":",
"inlet_values",
"=",
"self",
".",
"sess",
".",
"walk_oid",
"(",
"self",
".",
"oids",
"[",
"'oid_inlet_value'",
"]",
")",
"inlet_units",
"=",
"self",
".",
"sess",
"... | 59.840909 | 31.25 |
def _generate_dir_structure(path):
"""
Internal function intended to generate the biosignalsnotebooks directories in order to the user
can visualise and execute the Notebook created with "notebook" class in Jupyter.
----------
Parameters
----------
path : str
Path where the biosignalsnotebooks environment (files and folders) will be stored.
Returns
-------
out : str
Path of the directory that contains the folders (one folder per category) where the
Notebooks are stored.
"""
# ============================ Creation of the main directory ==================================
current_dir = (path + "\\opensignalsfactory_environment").replace("\\", "/")
if not os.path.isdir(current_dir):
os.makedirs(current_dir)
# ================== Copy of 'images' 'styles' and 'signal_samples' folders ====================
path_cloned_files = (os.path.abspath(__file__).split(os.path.basename(__file__))[0] + \
"\\notebook_files\\osf_files\\").replace("\\", "/")
for var in ["images", "styles", "signal_samples"]:
if os.path.isdir((current_dir + "\\" + var).replace("\\", "/")):
shutil.rmtree((current_dir + "\\" + var).replace("\\", "/"))
src = (path_cloned_files + "\\" + var).replace("\\", "/")
destination = (current_dir + "\\" + var).replace("\\", "/")
shutil.copytree(src, destination)
# =========================== Generation of 'Categories' folder ================================
current_dir += "/Categories"
if not os.path.isdir(current_dir):
os.makedirs(current_dir)
categories = list(NOTEBOOK_KEYS.keys())
for category in categories:
if not os.path.isdir(current_dir + "/" + category):
os.makedirs(current_dir + "/" + category)
return current_dir | [
"def",
"_generate_dir_structure",
"(",
"path",
")",
":",
"# ============================ Creation of the main directory ==================================",
"current_dir",
"=",
"(",
"path",
"+",
"\"\\\\opensignalsfactory_environment\"",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
... | 40.644444 | 27.933333 |
def get_value(self, symbol):
"""
Hierarchically searches for 'symbol' in the parameters blob if there is one (would have
been retrieved by 'load()'). Order is: default, <env_short>, <env>
Args:
symbol: the key to resolve
Returns:
Hierarchically resolved value for 'symbol' in the environment set by the constructor,
or None if a match is not found or there are no parameters
"""
default = "default"
if not self.parameters:
return None
# Hierarchically lookup the value
result = None
if default in self.parameters and symbol in self.parameters[default]:
result = self.parameters[default][symbol]
if self.env_short in self.parameters and symbol in self.parameters[self.env_short]:
result = self.parameters[self.env_short][symbol]
# This lookup is redundant when env_short == env, but it's also cheap
if self.env in self.parameters and symbol in self.parameters[self.env]:
result = self.parameters[self.env][symbol]
# Finally, convert any list of items into a single \n-delimited string
if isinstance(result, list):
result = "\n".join(result)
return result | [
"def",
"get_value",
"(",
"self",
",",
"symbol",
")",
":",
"default",
"=",
"\"default\"",
"if",
"not",
"self",
".",
"parameters",
":",
"return",
"None",
"# Hierarchically lookup the value",
"result",
"=",
"None",
"if",
"default",
"in",
"self",
".",
"parameters"... | 42.37037 | 22.37037 |
def tick(self):
"""Mark the passage of time and decay the current rate accordingly."""
instant_rate = self.count / float(self.tick_interval_s)
self.count = 0
if self.initialized:
self.rate += (self.alpha * (instant_rate - self.rate))
else:
self.rate = instant_rate
self.initialized = True | [
"def",
"tick",
"(",
"self",
")",
":",
"instant_rate",
"=",
"self",
".",
"count",
"/",
"float",
"(",
"self",
".",
"tick_interval_s",
")",
"self",
".",
"count",
"=",
"0",
"if",
"self",
".",
"initialized",
":",
"self",
".",
"rate",
"+=",
"(",
"self",
... | 39.555556 | 15.555556 |
def write(self, fptr):
"""Write a data entry url box to file.
"""
# Make sure it is written out as null-terminated.
url = self.url
if self.url[-1] != chr(0):
url = url + chr(0)
url = url.encode()
length = 8 + 1 + 3 + len(url)
write_buffer = struct.pack('>I4sBBBB',
length, b'url ',
self.version,
self.flag[0], self.flag[1], self.flag[2])
fptr.write(write_buffer)
fptr.write(url) | [
"def",
"write",
"(",
"self",
",",
"fptr",
")",
":",
"# Make sure it is written out as null-terminated.",
"url",
"=",
"self",
".",
"url",
"if",
"self",
".",
"url",
"[",
"-",
"1",
"]",
"!=",
"chr",
"(",
"0",
")",
":",
"url",
"=",
"url",
"+",
"chr",
"("... | 35.0625 | 13.25 |
def get_frame(self, idx, history_length=1):
""" Return frame from the buffer """
if idx >= self.current_size:
raise VelException("Requested frame beyond the size of the buffer")
if history_length > 1:
assert self.state_buffer.shape[-1] == 1, \
"State buffer must have last dimension of 1 if we want frame history"
accumulator = []
last_frame = self.state_buffer[idx]
accumulator.append(last_frame)
for i in range(history_length - 1):
prev_idx = (idx - 1) % self.buffer_capacity
if prev_idx == self.current_idx:
raise VelException("Cannot provide enough history for the frame")
elif self.dones_buffer[prev_idx]:
# If previous frame was done - just append zeroes
accumulator.append(np.zeros_like(last_frame))
else:
idx = prev_idx
accumulator.append(self.state_buffer[idx])
# We're pushing the elements in reverse order
return np.concatenate(accumulator[::-1], axis=-1) | [
"def",
"get_frame",
"(",
"self",
",",
"idx",
",",
"history_length",
"=",
"1",
")",
":",
"if",
"idx",
">=",
"self",
".",
"current_size",
":",
"raise",
"VelException",
"(",
"\"Requested frame beyond the size of the buffer\"",
")",
"if",
"history_length",
">",
"1",... | 38.75 | 20.392857 |
def sqlCreate(self):
""" Reasonably portable SQL CREATE for defined fields.
Returns:
string: Portable as possible SQL Create for all-reads table.
"""
count = 0
qry_str = "CREATE TABLE Meter_Reads ( \n\r"
qry_str = self.fillCreate(qry_str)
ekm_log(qry_str, 4)
return qry_str | [
"def",
"sqlCreate",
"(",
"self",
")",
":",
"count",
"=",
"0",
"qry_str",
"=",
"\"CREATE TABLE Meter_Reads ( \\n\\r\"",
"qry_str",
"=",
"self",
".",
"fillCreate",
"(",
"qry_str",
")",
"ekm_log",
"(",
"qry_str",
",",
"4",
")",
"return",
"qry_str"
] | 34 | 14.3 |
def _check_data_port_name(self, data_port):
"""Checks the validity of a data port name
Checks whether the name of the given data port is already used by anther data port within the state. Names
must be unique with input data ports and output data ports.
:param rafcon.core.data_port.DataPort data_port: The data port to be checked
:return bool validity, str message: validity is True, when the data port is valid, False else. message gives
more information especially if the data port is not valid
"""
if data_port.data_port_id in self.input_data_ports:
for input_data_port in self.input_data_ports.values():
if data_port.name == input_data_port.name and data_port is not input_data_port:
return False, "data port name already existing in state's input data ports"
elif data_port.data_port_id in self.output_data_ports:
for output_data_port in self.output_data_ports.values():
if data_port.name == output_data_port.name and data_port is not output_data_port:
return False, "data port name already existing in state's output data ports"
return True, "valid" | [
"def",
"_check_data_port_name",
"(",
"self",
",",
"data_port",
")",
":",
"if",
"data_port",
".",
"data_port_id",
"in",
"self",
".",
"input_data_ports",
":",
"for",
"input_data_port",
"in",
"self",
".",
"input_data_ports",
".",
"values",
"(",
")",
":",
"if",
... | 58.095238 | 35.380952 |
def pending(self, start='-', stop='+', count=1000, consumer=None):
"""
List pending messages within the consumer group for this stream.
:param start: start id (or '-' for oldest pending)
:param stop: stop id (or '+' for newest pending)
:param count: limit number of messages returned
:param consumer: restrict message list to the given consumer
:returns: A list containing status for each pending message. Each
pending message returns [id, consumer, idle time, deliveries].
"""
return self.database.xpending_range(self.key, self.group, start, stop,
count, consumer) | [
"def",
"pending",
"(",
"self",
",",
"start",
"=",
"'-'",
",",
"stop",
"=",
"'+'",
",",
"count",
"=",
"1000",
",",
"consumer",
"=",
"None",
")",
":",
"return",
"self",
".",
"database",
".",
"xpending_range",
"(",
"self",
".",
"key",
",",
"self",
"."... | 52.461538 | 23.076923 |
def from_str(cls, s):
""" Accepts both the output of to_simple_str() and __str__(). """
if not isinstance(s, str):
raise TypeError("Expected an str instance, received %r" % (s,))
return cls(cls.bits_from_str(s)) | [
"def",
"from_str",
"(",
"cls",
",",
"s",
")",
":",
"if",
"not",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected an str instance, received %r\"",
"%",
"(",
"s",
",",
")",
")",
"return",
"cls",
"(",
"cls",
".",
"bits_... | 48.6 | 12 |
def filter_nan(s, o):
"""
this functions removed the data from simulated and observed data
whereever the observed data contains nan
this is used by all other functions, otherwise they will produce nan as
output
"""
data = np.array([s.flatten(), o.flatten()])
data = np.transpose(data)
data = data[~np.isnan(data).any(1)]
return data[:, 0], data[:, 1] | [
"def",
"filter_nan",
"(",
"s",
",",
"o",
")",
":",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"s",
".",
"flatten",
"(",
")",
",",
"o",
".",
"flatten",
"(",
")",
"]",
")",
"data",
"=",
"np",
".",
"transpose",
"(",
"data",
")",
"data",
"=",
"... | 33.416667 | 15.916667 |
def _on_connect(self, participant):
"""Called from the WebSocket consumer. Checks if all players in the group
have connected; runs :meth:`when_all_players_ready` once all connections
are established.
"""
lock = get_redis_lock()
if not lock:
lock = fake_lock()
with lock:
self.refresh_from_db()
if self.ran_ready_function:
return
for player in self.get_players():
if Connection.objects.filter(participant__code=player.participant.code).count() == 0:
return
self.when_all_players_ready()
self.ran_ready_function = timezone.now()
self.save()
self.send('state', 'period_start')
if self.period_length():
# TODO: Should replace this with something like Huey/Celery so it'll survive a server restart.
self._timer = threading.Timer(
self.period_length(),
lambda: self.send('state', 'period_end'))
self._timer.start() | [
"def",
"_on_connect",
"(",
"self",
",",
"participant",
")",
":",
"lock",
"=",
"get_redis_lock",
"(",
")",
"if",
"not",
"lock",
":",
"lock",
"=",
"fake_lock",
"(",
")",
"with",
"lock",
":",
"self",
".",
"refresh_from_db",
"(",
")",
"if",
"self",
".",
... | 38.206897 | 17.586207 |
def subset(train, idx, keep_other=True):
"""Subset the `train=(x, y)` data tuple, each of the form:
- list, np.ndarray
- tuple, np.ndarray
- dictionary, np.ndarray
- np.ndarray, np.ndarray
# Note
In case there are other data present in the tuple:
`(x, y, other1, other2, ...)`, these get passed on as:
`(x_sub, y_sub, other1, other2)`
# Arguments
train: `(x,y, other1, other2, ...)` tuple of data
idx: indices to subset the data with
keep_other: bool; If True, the additional tuple elements `(other1, other2, ...)` are passed
together with `(x, y)` but don't get subsetted.
"""
test_len(train)
y = train[1][idx]
# x split
if isinstance(train[0], (list, tuple)):
x = [x[idx] for x in train[0]]
elif isinstance(train[0], dict):
x = {k: v[idx] for k, v in train[0].items()}
elif isinstance(train[0], np.ndarray):
x = train[0][idx]
else:
raise ValueError("Input can only be of type: list, dict or np.ndarray")
if keep_other:
return (x, y) + train[2:]
else:
return (x, y) | [
"def",
"subset",
"(",
"train",
",",
"idx",
",",
"keep_other",
"=",
"True",
")",
":",
"test_len",
"(",
"train",
")",
"y",
"=",
"train",
"[",
"1",
"]",
"[",
"idx",
"]",
"# x split",
"if",
"isinstance",
"(",
"train",
"[",
"0",
"]",
",",
"(",
"list",... | 31.542857 | 18.828571 |
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results | [
"def",
"get_metric_by_week",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"from_date",
",",
"limit",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"kwargs",
".",
"get",
"(",
"\"connection\"",
",",
"None",
")",
"closest_monday_fro... | 50.419355 | 31.387097 |
def batch_row_ids(data_batch):
""" Generate row ids based on the current mini-batch """
item = data_batch.data[0]
user = data_batch.data[1]
return {'user_weight': user.astype(np.int64),
'item_weight': item.astype(np.int64)} | [
"def",
"batch_row_ids",
"(",
"data_batch",
")",
":",
"item",
"=",
"data_batch",
".",
"data",
"[",
"0",
"]",
"user",
"=",
"data_batch",
".",
"data",
"[",
"1",
"]",
"return",
"{",
"'user_weight'",
":",
"user",
".",
"astype",
"(",
"np",
".",
"int64",
")... | 41 | 8.333333 |
def getName(self, value, defaultName = None):
'''
Get the enumerate name of a specified value.
:param value: the enumerate value
:param defaultName: returns if the enumerate value is not defined
:returns: the corresponding enumerate value or *defaultName* if not found
'''
for k,v in self._values.items():
if v == value:
return k
return defaultName | [
"def",
"getName",
"(",
"self",
",",
"value",
",",
"defaultName",
"=",
"None",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_values",
".",
"items",
"(",
")",
":",
"if",
"v",
"==",
"value",
":",
"return",
"k",
"return",
"defaultName"
] | 39.090909 | 17.636364 |
def inject(self, span_context, format, carrier):
"""Injects `span_context` into `carrier`.
The type of `carrier` is determined by `format`. See the
:class:`Format` class/namespace for the built-in OpenTracing formats.
Implementations *must* raise :exc:`UnsupportedFormatException` if
`format` is unknown or disallowed.
:param span_context: the :class:`SpanContext` instance to inject
:type span_context: SpanContext
:param format: a python object instance that represents a given
carrier format. `format` may be of any type, and `format` equality
is defined by python ``==`` equality.
:type format: Format
:param carrier: the format-specific carrier object to inject into
"""
if format in Tracer._supported_formats:
return
raise UnsupportedFormatException(format) | [
"def",
"inject",
"(",
"self",
",",
"span_context",
",",
"format",
",",
"carrier",
")",
":",
"if",
"format",
"in",
"Tracer",
".",
"_supported_formats",
":",
"return",
"raise",
"UnsupportedFormatException",
"(",
"format",
")"
] | 42.238095 | 21.761905 |
def init_app(self, app, path='templates.yaml'):
"""Initializes Ask app by setting configuration variables, loading templates, and maps Ask route to a flask view.
The Ask instance is given the following configuration variables by calling on Flask's configuration:
`ASK_APPLICATION_ID`:
Turn on application ID verification by setting this variable to an application ID or a
list of allowed application IDs. By default, application ID verification is disabled and a
warning is logged. This variable should be set in production to ensure
requests are being sent by the applications you specify.
Default: None
`ASK_VERIFY_REQUESTS`:
Enables or disables Alexa request verification, which ensures requests sent to your skill
are from Amazon's Alexa service. This setting should not be disabled in production.
It is useful for mocking JSON requests in automated tests.
Default: True
`ASK_VERIFY_TIMESTAMP_DEBUG`:
Turn on request timestamp verification while debugging by setting this to True.
Timestamp verification helps mitigate against replay attacks. It relies on the system clock
being synchronized with an NTP server. This setting should not be enabled in production.
Default: False
`ASK_PRETTY_DEBUG_LOGS`:
Add tabs and linebreaks to the Alexa request and response printed to the debug log.
This improves readability when printing to the console, but breaks formatting when logging to CloudWatch.
Default: False
"""
if self._route is None:
raise TypeError("route is a required argument when app is not None")
self.app = app
app.ask = self
app.add_url_rule(self._route, view_func=self._flask_view_func, methods=['POST'])
app.jinja_loader = ChoiceLoader([app.jinja_loader, YamlLoader(app, path)]) | [
"def",
"init_app",
"(",
"self",
",",
"app",
",",
"path",
"=",
"'templates.yaml'",
")",
":",
"if",
"self",
".",
"_route",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"route is a required argument when app is not None\"",
")",
"self",
".",
"app",
"=",
"app",... | 46.952381 | 34.619048 |
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio() | [
"def",
"tree_compare",
"(",
"self",
",",
"othertree",
",",
"vntree_meta",
"=",
"False",
")",
":",
"return",
"SequenceMatcher",
"(",
"None",
",",
"json",
".",
"dumps",
"(",
"self",
".",
"to_treedict",
"(",
"vntree_meta",
"=",
"vntree_meta",
")",
",",
"defau... | 47.5 | 22.611111 |
def draw_nodes(self):
"""
Draw nodes to screen.
"""
node_r = self.node_sizes
for i, node in enumerate(self.nodes):
x = self.node_coords["x"][i]
y = self.node_coords["y"][i]
color = self.node_colors[i]
node_patch = patches.Ellipse(
(x, y), node_r[i], node_r[i], lw=0, color=color, zorder=2
)
self.ax.add_patch(node_patch) | [
"def",
"draw_nodes",
"(",
"self",
")",
":",
"node_r",
"=",
"self",
".",
"node_sizes",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"self",
".",
"nodes",
")",
":",
"x",
"=",
"self",
".",
"node_coords",
"[",
"\"x\"",
"]",
"[",
"i",
"]",
"y",
"=... | 33.538462 | 8.153846 |
def _mzmlListAttribToTuple(oldList):
"""Turns the param entries of elements in a list elements into tuples, used
in :func:`MzmlScan._fromJSON()` and :func:`MzmlPrecursor._fromJSON()`.
.. note:: only intended for a list of elements that contain params. For
example the mzML element ``selectedIonList`` or ``scanWindowList``.
:param oldList: [[paramList, paramList, ...], ...]
:returns: [[paramTuple, paramTuple, ...], ...]
"""
newList = list()
for oldParamList in oldList:
newParamLIst = [tuple(param) for param in oldParamList]
newList.append(newParamLIst)
return newList | [
"def",
"_mzmlListAttribToTuple",
"(",
"oldList",
")",
":",
"newList",
"=",
"list",
"(",
")",
"for",
"oldParamList",
"in",
"oldList",
":",
"newParamLIst",
"=",
"[",
"tuple",
"(",
"param",
")",
"for",
"param",
"in",
"oldParamList",
"]",
"newList",
".",
"appe... | 38.6875 | 20.5625 |
def progress(progress):
"""Convert given progress to a JSON object.
Check that progress can be represented as float between 0 and 1 and
return it in JSON of the form:
{"proc.progress": progress}
"""
if isinstance(progress, int) or isinstance(progress, float):
progress = float(progress)
else:
try:
progress = float(json.loads(progress))
except (TypeError, ValueError):
return warning("Progress must be a float.")
if not 0 <= progress <= 1:
return warning("Progress must be a float between 0 and 1.")
return json.dumps({'proc.progress': progress}) | [
"def",
"progress",
"(",
"progress",
")",
":",
"if",
"isinstance",
"(",
"progress",
",",
"int",
")",
"or",
"isinstance",
"(",
"progress",
",",
"float",
")",
":",
"progress",
"=",
"float",
"(",
"progress",
")",
"else",
":",
"try",
":",
"progress",
"=",
... | 29.857143 | 20.047619 |
def add_error(self, group, term, sub_term, value):
"""For records that are not defined as terms, either add it to the
errors list."""
self._errors[(group, term, sub_term)] = value | [
"def",
"add_error",
"(",
"self",
",",
"group",
",",
"term",
",",
"sub_term",
",",
"value",
")",
":",
"self",
".",
"_errors",
"[",
"(",
"group",
",",
"term",
",",
"sub_term",
")",
"]",
"=",
"value"
] | 40 | 12.6 |
def workflow_set_details(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /workflow-xxxx/setDetails API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Details-and-Links#API-method%3A-%2Fclass-xxxx%2FsetDetails
"""
return DXHTTPRequest('/%s/setDetails' % object_id, input_params, always_retry=always_retry, **kwargs) | [
"def",
"workflow_set_details",
"(",
"object_id",
",",
"input_params",
"=",
"{",
"}",
",",
"always_retry",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"DXHTTPRequest",
"(",
"'/%s/setDetails'",
"%",
"object_id",
",",
"input_params",
",",
"always_re... | 55.285714 | 36.142857 |
def list_joined_topics(self, start=0):
"""
已加入的所有小组的话题列表
:param start: 翻页
:return: 带下一页的列表
"""
xml = self.api.xml(API_GROUP_HOME, params={'start': start})
return build_list_result(self._parse_topic_table(xml, 'title,comment,created,group'), xml) | [
"def",
"list_joined_topics",
"(",
"self",
",",
"start",
"=",
"0",
")",
":",
"xml",
"=",
"self",
".",
"api",
".",
"xml",
"(",
"API_GROUP_HOME",
",",
"params",
"=",
"{",
"'start'",
":",
"start",
"}",
")",
"return",
"build_list_result",
"(",
"self",
".",
... | 33.555556 | 18.888889 |
def make_ranges(self, file_url):
"""
Divides file_url size into an array of ranges to be downloaded by workers.
:param: file_url: ProjectFileUrl: file url to download
:return: [(int,int)]: array of (start, end) tuples
"""
size = file_url.size
bytes_per_chunk = self.determine_bytes_per_chunk(size)
start = 0
ranges = []
while size > 0:
amount = bytes_per_chunk
if amount > size:
amount = size
ranges.append((start, start + amount - 1))
start += amount
size -= amount
return ranges | [
"def",
"make_ranges",
"(",
"self",
",",
"file_url",
")",
":",
"size",
"=",
"file_url",
".",
"size",
"bytes_per_chunk",
"=",
"self",
".",
"determine_bytes_per_chunk",
"(",
"size",
")",
"start",
"=",
"0",
"ranges",
"=",
"[",
"]",
"while",
"size",
">",
"0",... | 34.833333 | 15.055556 |
def segment_text(text=os.path.join(DATA_PATH, 'goodreads-omniscient-books.txt'),
start=None, stop=r'^Rate\ this', ignore=r'^[\d]'):
""" Split text into segments (sections, paragraphs) using regular expressions to trigger breaks.start
"""
start = start if hasattr(start, 'match') else re.compile(start) if start else None
stop = stop if hasattr(stop, 'match') else re.compile(stop) if stop else None
ignore = ignore if hasattr(ignore, 'match') else re.compile(ignore) if ignore else None
segments = []
segment = []
with open(text) as fin:
for line in fin:
if start is not None and start.match(line):
segments += [segment] if len(segment) else []
segment = [line]
elif stop is not None and stop.match(line):
segments += [segment]
segment = []
elif ignore is not None and ignore.match(line):
continue
else:
segment += [segment] | [
"def",
"segment_text",
"(",
"text",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DATA_PATH",
",",
"'goodreads-omniscient-books.txt'",
")",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"r'^Rate\\ this'",
",",
"ignore",
"=",
"r'^[\\d]'",
")",
":",
"start",
"="... | 45.681818 | 20.772727 |
def _get_directory_stash(self, path):
"""Stashes a directory.
Directories are stashed adjacent to their original location if
possible, or else moved/copied into the user's temp dir."""
try:
save_dir = AdjacentTempDirectory(path)
save_dir.create()
except OSError:
save_dir = TempDirectory(kind="uninstall")
save_dir.create()
self._save_dirs[os.path.normcase(path)] = save_dir
return save_dir.path | [
"def",
"_get_directory_stash",
"(",
"self",
",",
"path",
")",
":",
"try",
":",
"save_dir",
"=",
"AdjacentTempDirectory",
"(",
"path",
")",
"save_dir",
".",
"create",
"(",
")",
"except",
"OSError",
":",
"save_dir",
"=",
"TempDirectory",
"(",
"kind",
"=",
"\... | 32.533333 | 18.266667 |
def is_cnpj(numero, estrito=False):
"""Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
"""
try:
cnpj(digitos(numero) if not estrito else numero)
return True
except NumeroCNPJError:
pass
return False | [
"def",
"is_cnpj",
"(",
"numero",
",",
"estrito",
"=",
"False",
")",
":",
"try",
":",
"cnpj",
"(",
"digitos",
"(",
"numero",
")",
"if",
"not",
"estrito",
"else",
"numero",
")",
"return",
"True",
"except",
"NumeroCNPJError",
":",
"pass",
"return",
"False"
... | 36.2 | 22.933333 |
def post(self, path, body):
"""POST request."""
return self._make_request('post',
self._format_url(API_ROOT + path), {
'json': body
}) | [
"def",
"post",
"(",
"self",
",",
"path",
",",
"body",
")",
":",
"return",
"self",
".",
"_make_request",
"(",
"'post'",
",",
"self",
".",
"_format_url",
"(",
"API_ROOT",
"+",
"path",
")",
",",
"{",
"'json'",
":",
"body",
"}",
")"
] | 30.833333 | 10 |
def parse_version(package):
"""
Statically parse the version number from __init__.py
CommandLine:
python -c "import setup; print(setup.parse_version('ubelt'))"
"""
from os.path import dirname, join, exists
import ast
# Check if the package is a single-file or multi-file package
_candiates = [
join(dirname(__file__), package + '.py'),
join(dirname(__file__), package, '__init__.py'),
]
_found = [init_fpath for init_fpath in _candiates if exists(init_fpath)]
if len(_found) > 0:
init_fpath = _found[0]
elif len(_found) > 1:
raise Exception('parse_version found multiple init files')
elif len(_found) == 0:
raise Exception('Cannot find package init file')
with open(init_fpath, 'r') as file_:
sourcecode = file_.read()
pt = ast.parse(sourcecode)
class VersionVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for target in node.targets:
if getattr(target, 'id', None) == '__version__':
self.version = node.value.s
visitor = VersionVisitor()
visitor.visit(pt)
return visitor.version | [
"def",
"parse_version",
"(",
"package",
")",
":",
"from",
"os",
".",
"path",
"import",
"dirname",
",",
"join",
",",
"exists",
"import",
"ast",
"# Check if the package is a single-file or multi-file package",
"_candiates",
"=",
"[",
"join",
"(",
"dirname",
"(",
"__... | 33.676471 | 16.794118 |
def build_interface(iface, iface_type, enabled, **settings):
'''
Build an interface script for a network interface.
CLI Example:
.. code-block:: bash
salt '*' ip.build_interface eth0 eth <settings>
'''
if __grains__['lsb_distrib_id'] == 'nilrt':
raise salt.exceptions.CommandExecutionError('Not supported in this version.')
if iface_type != 'eth':
raise salt.exceptions.CommandExecutionError('Interface type not supported: {0}:'.format(iface_type))
if 'proto' not in settings or settings['proto'] == 'dhcp': # default protocol type used is dhcp
set_dhcp_linklocal_all(iface)
elif settings['proto'] != 'static':
exc_msg = 'Protocol type: {0} is not supported'.format(settings['proto'])
raise salt.exceptions.CommandExecutionError(exc_msg)
else:
address = settings['ipaddr']
netmask = settings['netmask']
gateway = settings['gateway']
dns = []
for key, val in six.iteritems(settings):
if 'dns' in key or 'domain' in key:
dns += val
set_static_all(iface, address, netmask, gateway, dns)
if enabled:
up(iface)
return get_interface(iface) | [
"def",
"build_interface",
"(",
"iface",
",",
"iface_type",
",",
"enabled",
",",
"*",
"*",
"settings",
")",
":",
"if",
"__grains__",
"[",
"'lsb_distrib_id'",
"]",
"==",
"'nilrt'",
":",
"raise",
"salt",
".",
"exceptions",
".",
"CommandExecutionError",
"(",
"'N... | 34.911765 | 24.264706 |
def absolute_path(path=None, base_dir=None):
"""
Return absolute path if path is local.
Parameters:
-----------
path : path to file
base_dir : base directory used for absolute path
Returns:
--------
absolute path
"""
if path_is_remote(path):
return path
else:
if os.path.isabs(path):
return path
else:
if base_dir is None or not os.path.isabs(base_dir):
raise TypeError("base_dir must be an absolute path.")
return os.path.abspath(os.path.join(base_dir, path)) | [
"def",
"absolute_path",
"(",
"path",
"=",
"None",
",",
"base_dir",
"=",
"None",
")",
":",
"if",
"path_is_remote",
"(",
"path",
")",
":",
"return",
"path",
"else",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"return",
"path",
... | 25.681818 | 19.863636 |
def standard_sc_expr_str(sc):
"""
Standard symbol/choice printing function. Uses plain Kconfig syntax, and
displays choices as <choice> (or <choice NAME>, for named choices).
See expr_str().
"""
if sc.__class__ is Symbol:
return '"{}"'.format(escape(sc.name)) if sc.is_constant else sc.name
# Choice
return "<choice {}>".format(sc.name) if sc.name else "<choice>" | [
"def",
"standard_sc_expr_str",
"(",
"sc",
")",
":",
"if",
"sc",
".",
"__class__",
"is",
"Symbol",
":",
"return",
"'\"{}\"'",
".",
"format",
"(",
"escape",
"(",
"sc",
".",
"name",
")",
")",
"if",
"sc",
".",
"is_constant",
"else",
"sc",
".",
"name",
"#... | 32.833333 | 23.333333 |
def liked(parser, token):
"""
{% liked objects by user as varname %}
"""
tag, objects, _, user, _, varname = token.split_contents()
return LikedObjectsNode(objects, user, varname) | [
"def",
"liked",
"(",
"parser",
",",
"token",
")",
":",
"tag",
",",
"objects",
",",
"_",
",",
"user",
",",
"_",
",",
"varname",
"=",
"token",
".",
"split_contents",
"(",
")",
"return",
"LikedObjectsNode",
"(",
"objects",
",",
"user",
",",
"varname",
"... | 32.333333 | 8.333333 |
def to_hdf(self, path_or_buf, key, **kwargs):
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
format : {'fixed', 'table'}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
append : bool, default False
For Table formats, append the input data to the existing.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
dropna : bool, default False
If true, ALL nan rows will not be written to store.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs) | [
"def",
"to_hdf",
"(",
"self",
",",
"path_or_buf",
",",
"key",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pandas",
".",
"io",
"import",
"pytables",
"return",
"pytables",
".",
"to_hdf",
"(",
"path_or_buf",
",",
"key",
",",
"self",
",",
"*",
"*",
"kwar... | 39.970874 | 21.485437 |
def execute_before_scenario_steps(self, context):
"""
actions before each scenario
:param context: It’s a clever place where you and behave can store information to share around, automatically managed by behave.
"""
if not self.feature_error:
self.__execute_steps_by_action(context, ACTIONS_BEFORE_SCENARIO)
if context.dyn_env.scenario_error:
# Mark this Scenario as skipped. Steps will not be executed.
context.scenario.mark_skipped() | [
"def",
"execute_before_scenario_steps",
"(",
"self",
",",
"context",
")",
":",
"if",
"not",
"self",
".",
"feature_error",
":",
"self",
".",
"__execute_steps_by_action",
"(",
"context",
",",
"ACTIONS_BEFORE_SCENARIO",
")",
"if",
"context",
".",
"dyn_env",
".",
"s... | 46.363636 | 20.727273 |
def load(path):
"""
Load pickled object from the specified file path.
Parameters
----------
path : string
File path
Returns
-------
unpickled : type of object stored in file
"""
f = open(path, 'rb')
try:
return pickle.load(f)
finally:
f.close() | [
"def",
"load",
"(",
"path",
")",
":",
"f",
"=",
"open",
"(",
"path",
",",
"'rb'",
")",
"try",
":",
"return",
"pickle",
".",
"load",
"(",
"f",
")",
"finally",
":",
"f",
".",
"close",
"(",
")"
] | 16.722222 | 21.611111 |
def connect_forwarder(forward_host=None,
forward_port=None,
max_retries=-1,
sleep_interval=1.0):
"""connect_forwarder
:param forward_host: host for receiving forwarded packets
:param forward_port: port for the forwarded packets
:param max_retries: retries, -1 = infinite
:param sleep_interval: how often to retry in this loop
"""
forward_skt = None
retry_count = 0
if max_retries == -1:
retry_count = -2
if forward_host and forward_port:
while not forward_skt and \
retry_count < max_retries:
try:
forward_skt = socket.socket()
log.info(("connecting to forward={}:{}")
.format(forward_host,
forward_port))
forward_skt.connect((forward_host,
forward_port))
log.debug(("connected to forward={}:{}")
.format(forward_host,
forward_port))
except Exception as s:
forward_skt = None
log.error(("Failed to connect forward address={}:{} "
"with ex={}")
.format(forward_host,
forward_port,
s))
if max_retries == -1:
retry_count = -2
else:
retry_count += 1
# end of try/ex
time.sleep(sleep_interval)
# end of setting up forward
# end forward_host and forward_port
return forward_skt | [
"def",
"connect_forwarder",
"(",
"forward_host",
"=",
"None",
",",
"forward_port",
"=",
"None",
",",
"max_retries",
"=",
"-",
"1",
",",
"sleep_interval",
"=",
"1.0",
")",
":",
"forward_skt",
"=",
"None",
"retry_count",
"=",
"0",
"if",
"max_retries",
"==",
... | 35.595745 | 11.489362 |
def interact_plain(header=UP_LINE, local_ns=None,
module=None, dummy=None,
stack_depth=1, global_ns=None):
"""
Create an interactive python console
"""
frame = sys._getframe(stack_depth)
variables = {}
if local_ns is not None:
variables.update(local_ns)
else:
variables.update(frame.f_locals)
if global_ns is not None:
variables.update(local_ns)
else:
variables.update(frame.f_globals)
shell = code.InteractiveConsole(variables)
return shell.interact(banner=header) | [
"def",
"interact_plain",
"(",
"header",
"=",
"UP_LINE",
",",
"local_ns",
"=",
"None",
",",
"module",
"=",
"None",
",",
"dummy",
"=",
"None",
",",
"stack_depth",
"=",
"1",
",",
"global_ns",
"=",
"None",
")",
":",
"frame",
"=",
"sys",
".",
"_getframe",
... | 25.545455 | 14.090909 |
def get_POST_data(self):
"""
Returns:
dict: POST data, which can be sent to webform using \
:py:mod:`urllib` or similar library
"""
self._postprocess()
# some fields need to be remapped (depends on type of media)
self._apply_mapping(
self.mapping.get(self._POST["P0502010__b"], self.mapping["else"])
)
self._check_required_fields()
return self._POST | [
"def",
"get_POST_data",
"(",
"self",
")",
":",
"self",
".",
"_postprocess",
"(",
")",
"# some fields need to be remapped (depends on type of media)",
"self",
".",
"_apply_mapping",
"(",
"self",
".",
"mapping",
".",
"get",
"(",
"self",
".",
"_POST",
"[",
"\"P050201... | 28.1875 | 21.0625 |
def get_pull_request_files(project, num, auth=False):
"""get list of files in a pull request"""
url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
return get_paged_request(url, headers=header) | [
"def",
"get_pull_request_files",
"(",
"project",
",",
"num",
",",
"auth",
"=",
"False",
")",
":",
"url",
"=",
"\"https://api.github.com/repos/{project}/pulls/{num}/files\"",
".",
"format",
"(",
"project",
"=",
"project",
",",
"num",
"=",
"num",
")",
"if",
"auth"... | 40.625 | 20.75 |
def whois_domains(self, domains):
"""Calls WHOIS domain end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_result}
"""
api_name = 'opendns-whois-domain'
fmt_url_path = u'whois/{0}'
return self._multi_get(api_name, fmt_url_path, domains) | [
"def",
"whois_domains",
"(",
"self",
",",
"domains",
")",
":",
"api_name",
"=",
"'opendns-whois-domain'",
"fmt_url_path",
"=",
"u'whois/{0}'",
"return",
"self",
".",
"_multi_get",
"(",
"api_name",
",",
"fmt_url_path",
",",
"domains",
")"
] | 31 | 12.454545 |
def find(self, i):
'''
API:
find(self, i)
Description:
Returns root of set that has i.
Input:
i: Item.
Return:
Returns root of set that has i.
'''
current = i
edge_list = []
while len(self.get_neighbors(current)) != 0:
successor = self.get_neighbors(current)[0]
edge_list.append((current, successor))
current = successor
if self.optimize:
for e in edge_list:
if e[1] != current:
self.del_edge((e[0], e[1]))
self.add_edge(e[0], current)
return current | [
"def",
"find",
"(",
"self",
",",
"i",
")",
":",
"current",
"=",
"i",
"edge_list",
"=",
"[",
"]",
"while",
"len",
"(",
"self",
".",
"get_neighbors",
"(",
"current",
")",
")",
"!=",
"0",
":",
"successor",
"=",
"self",
".",
"get_neighbors",
"(",
"curr... | 29.043478 | 15.913043 |
def _check_action(action):
"""check for invalid actions"""
if isinstance(action, types.StringTypes):
action = action.lower()
if action not in ['learn', 'forget', 'report', 'revoke']:
raise SpamCError('The action option is invalid')
return action | [
"def",
"_check_action",
"(",
"action",
")",
":",
"if",
"isinstance",
"(",
"action",
",",
"types",
".",
"StringTypes",
")",
":",
"action",
"=",
"action",
".",
"lower",
"(",
")",
"if",
"action",
"not",
"in",
"[",
"'learn'",
",",
"'forget'",
",",
"'report... | 33.875 | 16 |
def sorted(self, by, **kwargs):
"""Sort array by a column.
Parameters
==========
by: str
Name of the columns to sort by(e.g. 'time').
"""
sort_idc = np.argsort(self[by], **kwargs)
return self.__class__(
self[sort_idc],
h5loc=self.h5loc,
split_h5=self.split_h5,
name=self.name
) | [
"def",
"sorted",
"(",
"self",
",",
"by",
",",
"*",
"*",
"kwargs",
")",
":",
"sort_idc",
"=",
"np",
".",
"argsort",
"(",
"self",
"[",
"by",
"]",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"__class__",
"(",
"self",
"[",
"sort_idc",
"]",
... | 25.866667 | 15.133333 |
def is_consistent(self) -> bool:
"""
Returns True if number of nodes are consistent with number of leaves
"""
from ledger.compact_merkle_tree import CompactMerkleTree
return self.nodeCount == CompactMerkleTree.get_expected_node_count(
self.leafCount) | [
"def",
"is_consistent",
"(",
"self",
")",
"->",
"bool",
":",
"from",
"ledger",
".",
"compact_merkle_tree",
"import",
"CompactMerkleTree",
"return",
"self",
".",
"nodeCount",
"==",
"CompactMerkleTree",
".",
"get_expected_node_count",
"(",
"self",
".",
"leafCount",
... | 42.285714 | 16.571429 |
def _create_connection(self, future):
"""Create a new PostgreSQL connection
:param tornado.concurrent.Future future: future for new conn result
"""
LOGGER.debug('Creating a new connection for %s', self.pid)
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
try:
connection = self._psycopg2_connect(kwargs)
except (psycopg2.Error, OSError, socket.error) as error:
future.set_exception(error)
return
# Add the connection for use in _poll_connection
fd = connection.fileno()
self._connections[fd] = connection
def on_connected(cf):
"""Invoked by the IOLoop when the future is complete for the
connection
:param Future cf: The future for the initial connection
"""
if cf.exception():
self._cleanup_fd(fd, True)
future.set_exception(cf.exception())
else:
try:
# Add the connection to the pool
LOGGER.debug('Connection established for %s', self.pid)
self._pool_manager.add(self.pid, connection)
except (ValueError, pool.PoolException) as err:
LOGGER.exception('Failed to add %r to the pool', self.pid)
self._cleanup_fd(fd)
future.set_exception(err)
return
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2cffi connects and leaves the
# connection in a weird state: consts.STATUS_DATESTYLE,
# returning from Connection._setup without setting the state
# as const.STATUS_OK
if utils.PYPY:
connection.status = extensions.STATUS_READY
# Register the custom data types
self._register_unicode(connection)
self._register_uuid(connection)
# Set the future result
future.set_result(connection)
# Add a future that fires once connected
self._futures[fd] = concurrent.Future()
self._ioloop.add_future(self._futures[fd], on_connected)
# Add the connection to the IOLoop
self._ioloop.add_handler(connection.fileno(),
self._on_io_events,
ioloop.IOLoop.WRITE) | [
"def",
"_create_connection",
"(",
"self",
",",
"future",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Creating a new connection for %s'",
",",
"self",
".",
"pid",
")",
"# Create a new PostgreSQL connection",
"kwargs",
"=",
"utils",
".",
"uri_to_kwargs",
"(",
"self",
"... | 36.088235 | 20.25 |
def _validate_channel_definition(self, jp2h, colr):
"""Validate the channel definition box."""
cdef_lst = [j for (j, box) in enumerate(jp2h.box)
if box.box_id == 'cdef']
if len(cdef_lst) > 1:
msg = ("Only one channel definition box is allowed in the "
"JP2 header.")
raise IOError(msg)
elif len(cdef_lst) == 1:
cdef = jp2h.box[cdef_lst[0]]
if colr.colorspace == core.SRGB:
if any([chan + 1 not in cdef.association or
cdef.channel_type[chan] != 0 for chan in [0, 1, 2]]):
msg = ("All color channels must be defined in the "
"channel definition box.")
raise IOError(msg)
elif colr.colorspace == core.GREYSCALE:
if 0 not in cdef.channel_type:
msg = ("All color channels must be defined in the "
"channel definition box.")
raise IOError(msg) | [
"def",
"_validate_channel_definition",
"(",
"self",
",",
"jp2h",
",",
"colr",
")",
":",
"cdef_lst",
"=",
"[",
"j",
"for",
"(",
"j",
",",
"box",
")",
"in",
"enumerate",
"(",
"jp2h",
".",
"box",
")",
"if",
"box",
".",
"box_id",
"==",
"'cdef'",
"]",
"... | 49.428571 | 12.761905 |
def convert_ranges(cls, ranges, length):
"""Converts to valid byte ranges"""
result = []
for start, end in ranges:
if end is None:
result.append( (start, length-1) )
elif start is None:
s = length - end
result.append( (0 if s < 0 else s, length-1) )
else:
result.append( (start, end if end < length else length-1) )
return result | [
"def",
"convert_ranges",
"(",
"cls",
",",
"ranges",
",",
"length",
")",
":",
"result",
"=",
"[",
"]",
"for",
"start",
",",
"end",
"in",
"ranges",
":",
"if",
"end",
"is",
"None",
":",
"result",
".",
"append",
"(",
"(",
"start",
",",
"length",
"-",
... | 37.5 | 13.916667 |
def next(self):
"""next(self) -> Annot"""
CheckParent(self)
val = _fitz.Annot_next(self)
if val:
val.thisown = True
val.parent = self.parent # copy owning page object from previous annot
val.parent._annot_refs[id(val)] = val
return val | [
"def",
"next",
"(",
"self",
")",
":",
"CheckParent",
"(",
"self",
")",
"val",
"=",
"_fitz",
".",
"Annot_next",
"(",
"self",
")",
"if",
"val",
":",
"val",
".",
"thisown",
"=",
"True",
"val",
".",
"parent",
"=",
"self",
".",
"parent",
"# copy owning pa... | 27.545455 | 21.090909 |
def inspect_built_image(self):
"""
inspect built image
:return: dict
"""
logger.info("inspecting built image '%s'", self.image_id)
self.ensure_is_built()
# dict with lots of data, see man docker-inspect
inspect_data = self.tasker.inspect_image(self.image_id)
return inspect_data | [
"def",
"inspect_built_image",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"inspecting built image '%s'\"",
",",
"self",
".",
"image_id",
")",
"self",
".",
"ensure_is_built",
"(",
")",
"# dict with lots of data, see man docker-inspect",
"inspect_data",
"=",
"s... | 31 | 15.363636 |
def _merge_with_other_stm(self, other: "IfContainer") -> None:
"""
Merge other statement to this statement
"""
merge = self._merge_statement_lists
newCases = []
for (c, caseA), (_, caseB) in zip(self.cases, other.cases):
newCases.append((c, merge(caseA, caseB)))
self.cases = newCases
if self.default is not None:
self.default = merge(self.default, other.default)
self._on_merge(other) | [
"def",
"_merge_with_other_stm",
"(",
"self",
",",
"other",
":",
"\"IfContainer\"",
")",
"->",
"None",
":",
"merge",
"=",
"self",
".",
"_merge_statement_lists",
"newCases",
"=",
"[",
"]",
"for",
"(",
"c",
",",
"caseA",
")",
",",
"(",
"_",
",",
"caseB",
... | 31.333333 | 17.2 |
def is_rdemo(file_name):
"""
Return True if file_name matches a regexp for an R demo. False otherwise.
:param file_name: file to test
"""
packaged_demos = ["h2o.anomaly.R", "h2o.deeplearning.R", "h2o.gbm.R", "h2o.glm.R", "h2o.glrm.R", "h2o.kmeans.R",
"h2o.naiveBayes.R", "h2o.prcomp.R", "h2o.randomForest.R"]
if file_name in packaged_demos: return True
if re.match("^rdemo.*\.(r|R|ipynb)$", file_name): return True
return False | [
"def",
"is_rdemo",
"(",
"file_name",
")",
":",
"packaged_demos",
"=",
"[",
"\"h2o.anomaly.R\"",
",",
"\"h2o.deeplearning.R\"",
",",
"\"h2o.gbm.R\"",
",",
"\"h2o.glm.R\"",
",",
"\"h2o.glrm.R\"",
",",
"\"h2o.kmeans.R\"",
",",
"\"h2o.naiveBayes.R\"",
",",
"\"h2o.prcomp.R\"... | 47.3 | 23.1 |
def fromxml(node):
"""Return a profile instance from the given XML description. Node can be a string or an etree._Element."""
if not isinstance(node,ElementTree._Element): #pylint: disable=protected-access
node = parsexmlstring(node)
args = []
if node.tag == 'profile':
for node in node:
if node.tag == 'input':
for subnode in node:
if subnode.tag.lower() == 'inputtemplate':
args.append(InputTemplate.fromxml(subnode))
elif node.tag == 'output':
for subnode in node:
if subnode.tag.lower() == 'outputtemplate':
args.append(OutputTemplate.fromxml(subnode))
elif subnode.tag.lower() == 'parametercondition':
args.append(ParameterCondition.fromxml(subnode))
return Profile(*args) | [
"def",
"fromxml",
"(",
"node",
")",
":",
"if",
"not",
"isinstance",
"(",
"node",
",",
"ElementTree",
".",
"_Element",
")",
":",
"#pylint: disable=protected-access",
"node",
"=",
"parsexmlstring",
"(",
"node",
")",
"args",
"=",
"[",
"]",
"if",
"node",
".",
... | 47.6 | 19.5 |
def textify(self, nums:Collection[int], sep=' ') -> List[str]:
"Convert a list of `nums` to their tokens."
return sep.join([self.itos[i] for i in nums]) if sep is not None else [self.itos[i] for i in nums] | [
"def",
"textify",
"(",
"self",
",",
"nums",
":",
"Collection",
"[",
"int",
"]",
",",
"sep",
"=",
"' '",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"sep",
".",
"join",
"(",
"[",
"self",
".",
"itos",
"[",
"i",
"]",
"for",
"i",
"in",
"nu... | 73 | 33 |
def returner(load):
'''
Return data to a postgres server
'''
conn = _get_conn()
if conn is None:
return None
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, return, id, success)
VALUES (%s, %s, %s, %s, %s)'''
try:
ret = six.text_type(load['return'])
except UnicodeDecodeError:
ret = str(load['return'])
job_ret = {'return': ret}
if 'retcode' in load:
job_ret['retcode'] = load['retcode']
if 'success' in load:
job_ret['success'] = load['success']
cur.execute(
sql, (
load['fun'],
load['jid'],
salt.utils.json.dumps(job_ret),
load['id'],
load.get('success'),
)
)
_close_conn(conn) | [
"def",
"returner",
"(",
"load",
")",
":",
"conn",
"=",
"_get_conn",
"(",
")",
"if",
"conn",
"is",
"None",
":",
"return",
"None",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"sql",
"=",
"'''INSERT INTO salt_returns\n (fun, jid, return, id, success)\n ... | 25.566667 | 15.7 |
def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize | [
"def",
"GroupSizer",
"(",
"field_number",
",",
"is_repeated",
",",
"is_packed",
")",
":",
"tag_size",
"=",
"_TagSize",
"(",
"field_number",
")",
"*",
"2",
"assert",
"not",
"is_packed",
"if",
"is_repeated",
":",
"def",
"RepeatedFieldSize",
"(",
"value",
")",
... | 27.75 | 14 |
def dict_values(src):
"""
Recursively get values in dict.
Unlike the builtin dict.values() function, this method will descend into
nested dicts, returning all nested values.
Arguments:
src (dict): Source dict.
Returns:
list: List of values.
"""
for v in src.values():
if isinstance(v, dict):
for v in dict_values(v):
yield v
else:
yield v | [
"def",
"dict_values",
"(",
"src",
")",
":",
"for",
"v",
"in",
"src",
".",
"values",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"for",
"v",
"in",
"dict_values",
"(",
"v",
")",
":",
"yield",
"v",
"else",
":",
"yield",
"v"... | 22.473684 | 18.473684 |
def _reindex(self):
"""
Create a case-insensitive index of the paths
"""
self.index = []
for path in self.paths:
target_path = os.path.normpath(os.path.join(BASE_PATH,
path))
for root, subdirs, files in os.walk(target_path):
for f in files:
self.index.append(
(os.path.join(root, f).lower(),
os.path.join(root, f))) | [
"def",
"_reindex",
"(",
"self",
")",
":",
"self",
".",
"index",
"=",
"[",
"]",
"for",
"path",
"in",
"self",
".",
"paths",
":",
"target_path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"BASE_PATH",
",",
"pa... | 39.076923 | 12.461538 |
def giant_text_sqltype(dialect: Dialect) -> str:
"""
Returns the SQL column type used to make very large text columns for a
given dialect.
Args:
dialect: a SQLAlchemy :class:`Dialect`
Returns:
the SQL data type of "giant text", typically 'LONGTEXT' for MySQL
and 'NVARCHAR(MAX)' for SQL Server.
"""
if dialect.name == SqlaDialectName.SQLSERVER:
return 'NVARCHAR(MAX)'
elif dialect.name == SqlaDialectName.MYSQL:
return 'LONGTEXT'
else:
raise ValueError("Unknown dialect: {}".format(dialect.name)) | [
"def",
"giant_text_sqltype",
"(",
"dialect",
":",
"Dialect",
")",
"->",
"str",
":",
"if",
"dialect",
".",
"name",
"==",
"SqlaDialectName",
".",
"SQLSERVER",
":",
"return",
"'NVARCHAR(MAX)'",
"elif",
"dialect",
".",
"name",
"==",
"SqlaDialectName",
".",
"MYSQL"... | 33.235294 | 17.941176 |
def pprint(self, output, prefix=""):
"""
Pretty-print the encoded output using ascii art.
:param output: to print
:param prefix: printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
offset = description[i][1]
nextoffset = description[i+1][1]
print "%s |" % bitsToString(output[offset:nextoffset]),
print | [
"def",
"pprint",
"(",
"self",
",",
"output",
",",
"prefix",
"=",
"\"\"",
")",
":",
"print",
"prefix",
",",
"description",
"=",
"self",
".",
"getDescription",
"(",
")",
"+",
"[",
"(",
"\"end\"",
",",
"self",
".",
"getWidth",
"(",
")",
")",
"]",
"for... | 32.357143 | 14.357143 |
def embedded_tweet(self):
"""
Get the retweeted Tweet OR the quoted Tweet and return it as a Tweet object
Returns:
Tweet (or None, if the Tweet is neither a quote tweet or a Retweet):
a Tweet representing the quote Tweet or the Retweet
(see tweet_embeds.get_embedded_tweet, this is that value as a Tweet)
Raises:
NotATweetError: if embedded tweet is malformed
"""
embedded_tweet = tweet_embeds.get_embedded_tweet(self)
if embedded_tweet is not None:
try:
return Tweet(embedded_tweet)
except NotATweetError as nate:
raise(NotATweetError("The embedded tweet payload {} appears malformed." +
" Failed with '{}'".format(embedded_tweet, nate)))
else:
return None | [
"def",
"embedded_tweet",
"(",
"self",
")",
":",
"embedded_tweet",
"=",
"tweet_embeds",
".",
"get_embedded_tweet",
"(",
"self",
")",
"if",
"embedded_tweet",
"is",
"not",
"None",
":",
"try",
":",
"return",
"Tweet",
"(",
"embedded_tweet",
")",
"except",
"NotATwee... | 40.761905 | 23.904762 |
def configure_settings(settings, environment_settings=True):
'''
Given a settings object, run automatic configuration of all
the apps in INSTALLED_APPS.
'''
changes = 1
iterations = 0
while changes:
changes = 0
app_names = ['django_autoconfig'] + list(settings['INSTALLED_APPS'])
if environment_settings:
app_names.append('django_autoconfig.environment_settings')
for app_name in app_names:
import django_autoconfig.contrib
if autoconfig_module_exists(app_name):
module = importlib.import_module("%s.autoconfig" % (app_name,))
elif app_name in django_autoconfig.contrib.CONTRIB_CONFIGS:
module = django_autoconfig.contrib.CONTRIB_CONFIGS[app_name]
else:
continue
changes += merge_dictionaries(
settings,
getattr(module, 'SETTINGS', {}),
template_special_case=True,
)
changes += merge_dictionaries(
settings,
getattr(module, 'DEFAULT_SETTINGS', {}),
only_defaults=True,
)
for relationship in getattr(module, 'RELATIONSHIPS', []):
changes += relationship.apply_changes(settings)
if iterations >= MAX_ITERATIONS:
raise ImproperlyConfigured(
'Autoconfiguration could not reach a consistent state'
)
iterations += 1
LOGGER.debug("Autoconfiguration took %d iterations.", iterations) | [
"def",
"configure_settings",
"(",
"settings",
",",
"environment_settings",
"=",
"True",
")",
":",
"changes",
"=",
"1",
"iterations",
"=",
"0",
"while",
"changes",
":",
"changes",
"=",
"0",
"app_names",
"=",
"[",
"'django_autoconfig'",
"]",
"+",
"list",
"(",
... | 38.475 | 20.075 |
def xslt_transformation(xml, template):
"""
Transform `xml` using XSLT `template`.
Args:
xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
template (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
Returns:
str: Transformed `xml` as string.
"""
transformer = ET.XSLT(
_read_template(template)
)
newdom = transformer(
_read_marcxml(xml)
)
return ET.tostring(newdom, pretty_print=True, encoding="utf-8") | [
"def",
"xslt_transformation",
"(",
"xml",
",",
"template",
")",
":",
"transformer",
"=",
"ET",
".",
"XSLT",
"(",
"_read_template",
"(",
"template",
")",
")",
"newdom",
"=",
"transformer",
"(",
"_read_marcxml",
"(",
"xml",
")",
")",
"return",
"ET",
".",
"... | 26.238095 | 19.857143 |
def _learn(self, legislator):
"""
Expects a dictionary with full_name, first_name, last_name and
middle_name elements as key.
While this can grow quickly, we should never be dealing with
more than a few hundred legislators at a time so don't worry about
it.
"""
name, obj = legislator, legislator['_id']
if (legislator['roles'] and legislator['roles'][0]['term'] ==
self._term and legislator['roles'][0]['type'] == 'member'):
chamber = legislator['roles'][0]['chamber']
else:
try:
chamber = legislator['old_roles'][self._term][0].get('chamber')
except KeyError:
raise ValueError("no role in legislator %s [%s] for term %s" %
(legislator['full_name'], legislator['_id'],
self._term))
if '_code' in name:
code = name['_code']
if code in self._codes[chamber] or code in self._codes[None]:
raise ValueError("non-unique legislator code [%s] for %s" %
(code, name['full_name']))
self._codes[chamber][code] = obj
self._codes[None][code] = obj
# We throw possible forms of this name into a set because we
# don't want to try to add the same form twice for the same
# name
forms = set()
def add_form(form):
forms.add(self._normalize(form))
add_form(name['full_name'])
add_form(name['_scraped_name'])
add_form(name['last_name'])
if name['first_name']:
add_form("%s, %s" % (name['last_name'], name['first_name']))
add_form("%s %s" % (name['first_name'], name['last_name']))
add_form("%s, %s" % (name['last_name'], name['first_name'][0]))
add_form("%s (%s)" % (name['last_name'], name['first_name']))
add_form("%s %s" % (name['first_name'][0], name['last_name']))
add_form("%s (%s)" % (name['last_name'], name['first_name'][0]))
if name['middle_name']:
add_form("%s, %s %s" % (name['last_name'], name['first_name'],
name['middle_name']))
add_form("%s, %s %s" % (name['last_name'],
name['first_name'][0],
name['middle_name']))
add_form("%s %s %s" % (name['first_name'],
name['middle_name'],
name['last_name']))
add_form("%s, %s %s" % (name['last_name'],
name['first_name'][0],
name['middle_name'][0]))
add_form("%s %s %s" % (name['first_name'],
name['middle_name'][0],
name['last_name']))
add_form("%s, %s %s" % (name['last_name'],
name['first_name'],
name['middle_name'][0]))
add_form("%s, %s.%s." % (name['last_name'],
name['first_name'][0],
name['middle_name'][0]))
for form in forms:
form = self._normalize(form)
if form in self._names[chamber]:
self._names[chamber][form] = None
else:
self._names[chamber][form] = obj
if form in self._names[None]:
self._names[None][form] = None
else:
self._names[None][form] = obj | [
"def",
"_learn",
"(",
"self",
",",
"legislator",
")",
":",
"name",
",",
"obj",
"=",
"legislator",
",",
"legislator",
"[",
"'_id'",
"]",
"if",
"(",
"legislator",
"[",
"'roles'",
"]",
"and",
"legislator",
"[",
"'roles'",
"]",
"[",
"0",
"]",
"[",
"'term... | 44.578313 | 21.46988 |
def read_table(self, table, key_filter=True):
"""
Yield rows in the [incr tsdb()] *table* that pass any defined
filters, and with values changed by any applicators. If no
filters or applicators are defined, the result is the same as
from ItsdbProfile.read_raw_table().
"""
filters = self.filters[None] + self.filters[table]
if key_filter:
for f in self.relations[table]:
key = f.name
if f.key and (self._index.get(key) is not None):
ids = self._index[key]
# Can't keep local variables (like ids) in the scope of
# the lambda expression, so make it a default argument.
# Source: http://stackoverflow.com/a/938493/1441112
function = lambda r, x, ids=ids: x in ids
filters.append(([key], function))
applicators = self.applicators[table]
rows = self.read_raw_table(table)
return filter_rows(filters, apply_rows(applicators, rows)) | [
"def",
"read_table",
"(",
"self",
",",
"table",
",",
"key_filter",
"=",
"True",
")",
":",
"filters",
"=",
"self",
".",
"filters",
"[",
"None",
"]",
"+",
"self",
".",
"filters",
"[",
"table",
"]",
"if",
"key_filter",
":",
"for",
"f",
"in",
"self",
"... | 50.380952 | 16 |
def make_clean_visible_file(i_chunk, clean_visible_path):
'''make a temp file of clean_visible text'''
_clean = open(clean_visible_path, 'wb')
_clean.write('<?xml version="1.0" encoding="UTF-8"?>')
_clean.write('<root>')
for idx, si in enumerate(i_chunk):
if si.stream_id is None:
# create the FILENAME element anyway, so the ordering
# remains the same as the i_chunk and can be aligned.
stream_id = ''
else:
stream_id = si.stream_id
doc = lxml.etree.Element("FILENAME", stream_id=stream_id)
if si.body and si.body.clean_visible:
try:
# is UTF-8, and etree wants .text to be unicode
doc.text = si.body.clean_visible.decode('utf8')
except ValueError:
doc.text = drop_invalid_and_upper_utf8_chars(
si.body.clean_visible.decode('utf8'))
except Exception, exc:
# this should never ever fail, because if it does,
# then it means that clean_visible (or more likely
# clean_html) is not what it is supposed to be.
# Therefore, do not take it lightly:
logger.critical(traceback.format_exc(exc))
logger.critical('failed on stream_id=%s to follow:',
si.stream_id)
logger.critical(repr(si.body.clean_visible))
logger.critical('above was stream_id=%s', si.stream_id)
# [I don't know who calls this, but note that this
# will *always* fail if clean_visible isn't valid UTF-8.]
raise
else:
doc.text = ''
_clean.write(lxml.etree.tostring(doc, encoding='UTF-8'))
_clean.write('</root>')
_clean.close()
logger.info(clean_visible_path)
'''
## hack to capture html for inspection
_html = open(clean_visible_path + '-html', 'wb')
for idx, si in enumerate(i_chunk):
_html.write('<FILENAME docid="%s">' % si.stream_id)
if si.body and si.body.clean_html:
_html.write(si.body.clean_html)
_html.write('</FILENAME>\n')
_html.close()
## replace this with log.info()
print clean_visible_path + '-html'
''' | [
"def",
"make_clean_visible_file",
"(",
"i_chunk",
",",
"clean_visible_path",
")",
":",
"_clean",
"=",
"open",
"(",
"clean_visible_path",
",",
"'wb'",
")",
"_clean",
".",
"write",
"(",
"'<?xml version=\"1.0\" encoding=\"UTF-8\"?>'",
")",
"_clean",
".",
"write",
"(",
... | 43.230769 | 16.769231 |
def get_doctype(self, index, name):
"""
Returns a doctype given an index and a name
"""
if index not in self.indices:
self.get_all_indices()
return self.indices.get(index, {}).get(name, None) | [
"def",
"get_doctype",
"(",
"self",
",",
"index",
",",
"name",
")",
":",
"if",
"index",
"not",
"in",
"self",
".",
"indices",
":",
"self",
".",
"get_all_indices",
"(",
")",
"return",
"self",
".",
"indices",
".",
"get",
"(",
"index",
",",
"{",
"}",
")... | 33.857143 | 6.142857 |
def unstar(self, login, repo):
"""Unstar to login/repo
:param str login: (required), owner of the repo
:param str repo: (required), name of the repo
:return: bool
"""
resp = False
if login and repo:
url = self._build_url('user', 'starred', login, repo)
resp = self._boolean(self._delete(url), 204, 404)
return resp | [
"def",
"unstar",
"(",
"self",
",",
"login",
",",
"repo",
")",
":",
"resp",
"=",
"False",
"if",
"login",
"and",
"repo",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'user'",
",",
"'starred'",
",",
"login",
",",
"repo",
")",
"resp",
"=",
"self",... | 32.666667 | 16.5 |
def get_object_id_from_graph(access_token=None):
'''Return the object ID for the Graph user who owns the access token.
Args:
access_token (str): A Microsoft Graph access token. (Not an Azure access token.)
If not provided, attempt to get it from MSI_ENDPOINT.
Returns:
An object ID string for a user or service principal.
'''
if access_token is None:
access_token = get_graph_token_from_msi()
endpoint = 'https://' + GRAPH_RESOURCE_HOST + '/v1.0/me/'
headers = {'Authorization': 'Bearer ' + access_token, 'Host': GRAPH_RESOURCE_HOST}
ret = requests.get(endpoint, headers=headers)
return ret.json()['id'] | [
"def",
"get_object_id_from_graph",
"(",
"access_token",
"=",
"None",
")",
":",
"if",
"access_token",
"is",
"None",
":",
"access_token",
"=",
"get_graph_token_from_msi",
"(",
")",
"endpoint",
"=",
"'https://'",
"+",
"GRAPH_RESOURCE_HOST",
"+",
"'/v1.0/me/'",
"headers... | 39.882353 | 27.764706 |
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
"""
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output | [
"def",
"check_output",
"(",
"self",
",",
"cmd",
")",
":",
"ret",
",",
"output",
"=",
"self",
".",
"_call",
"(",
"cmd",
",",
"True",
")",
"if",
"ret",
"!=",
"0",
":",
"# pragma: no cover",
"raise",
"RemoteCommandFailure",
"(",
"command",
"=",
"cmd",
","... | 38.375 | 7 |
def _git_enable_branch(desired_branch):
"""Enable desired branch name."""
preserved_branch = _git_get_current_branch()
try:
if preserved_branch != desired_branch:
_tool_run('git checkout ' + desired_branch)
yield
finally:
if preserved_branch and preserved_branch != desired_branch:
_tool_run('git checkout ' + preserved_branch) | [
"def",
"_git_enable_branch",
"(",
"desired_branch",
")",
":",
"preserved_branch",
"=",
"_git_get_current_branch",
"(",
")",
"try",
":",
"if",
"preserved_branch",
"!=",
"desired_branch",
":",
"_tool_run",
"(",
"'git checkout '",
"+",
"desired_branch",
")",
"yield",
"... | 38.2 | 16.1 |
def check_captcha(self, captcha, value, id=None):
""" http://api.yandex.ru/cleanweb/doc/dg/concepts/check-captcha.xml"""
payload = {'captcha': captcha,
'value': value,
'id': id}
r = self.request('get', 'http://cleanweb-api.yandex.ru/1.0/check-captcha', params=payload)
root = ET.fromstring(r.content)
if root.findall('ok'):
return True
if root.findall('failed'):
return False | [
"def",
"check_captcha",
"(",
"self",
",",
"captcha",
",",
"value",
",",
"id",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"'captcha'",
":",
"captcha",
",",
"'value'",
":",
"value",
",",
"'id'",
":",
"id",
"}",
"r",
"=",
"self",
".",
"request",
"(",... | 43.181818 | 12.454545 |
def router_add(self, params):
"""add new router (mongos) into existing configuration"""
if self.uses_rs_configdb:
# Replica set configdb.
rs_id = self._configsvrs[0]
config_members = ReplicaSets().members(rs_id)
configdb = '%s/%s' % (
rs_id, ','.join(m['host'] for m in config_members))
else:
configdb = ','.join(Servers().hostname(item)
for item in self._configsvrs)
server_id = params.pop('server_id', None)
version = params.pop('version', self._version)
params.update({'configdb': configdb})
if self.enable_ipv6:
common.enable_ipv6_single(params)
# Remove flags that turn auth on.
params = self._strip_auth(params)
self._routers.append(Servers().create(
'mongos', params, sslParams=self.sslParams, autostart=True,
version=version, server_id=server_id))
return {'id': self._routers[-1], 'hostname': Servers().hostname(self._routers[-1])} | [
"def",
"router_add",
"(",
"self",
",",
"params",
")",
":",
"if",
"self",
".",
"uses_rs_configdb",
":",
"# Replica set configdb.",
"rs_id",
"=",
"self",
".",
"_configsvrs",
"[",
"0",
"]",
"config_members",
"=",
"ReplicaSets",
"(",
")",
".",
"members",
"(",
... | 43.75 | 15.125 |
def process_file(self):
"""Deprecated."""
warnings.warn(DeprecationWarning("'self.process_file' is deprecated"))
return os.path.join(self._raw["config_dir"], self._raw["process"]) | [
"def",
"process_file",
"(",
"self",
")",
":",
"warnings",
".",
"warn",
"(",
"DeprecationWarning",
"(",
"\"'self.process_file' is deprecated\"",
")",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_raw",
"[",
"\"config_dir\"",
"]",
",",
"se... | 50 | 22.25 |
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last | [
"def",
"_find_address_range",
"(",
"addresses",
")",
":",
"it",
"=",
"iter",
"(",
"addresses",
")",
"first",
"=",
"last",
"=",
"next",
"(",
"it",
")",
"for",
"ip",
"in",
"it",
":",
"if",
"ip",
".",
"_ip",
"!=",
"last",
".",
"_ip",
"+",
"1",
":",
... | 24.166667 | 19.888889 |
def has_file(self, name: str):
'''
check whether this directory contains the file.
'''
return os.path.isfile(self._path / name) | [
"def",
"has_file",
"(",
"self",
",",
"name",
":",
"str",
")",
":",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"_path",
"/",
"name",
")"
] | 31 | 18.2 |
def indicator(self, data):
"""Update the request URI to include the Indicator for specific indicator retrieval.
Args:
data (string): The indicator value
"""
# handle hashes in form md5 : sha1 : sha256
data = self.get_first_hash(data)
super(File, self).indicator(data) | [
"def",
"indicator",
"(",
"self",
",",
"data",
")",
":",
"# handle hashes in form md5 : sha1 : sha256",
"data",
"=",
"self",
".",
"get_first_hash",
"(",
"data",
")",
"super",
"(",
"File",
",",
"self",
")",
".",
"indicator",
"(",
"data",
")"
] | 35.555556 | 11 |
def random_sense(ambiguous_word: str, pos=None) -> "wn.Synset":
"""
Returns a random sense.
:param ambiguous_word: String, a single word.
:param pos: String, one of 'a', 'r', 's', 'n', 'v', or None.
:return: A random Synset.
"""
if pos is None:
return custom_random.choice(wn.synsets(ambiguous_word))
else:
return custom_random.choice(wn.synsets(ambiguous_word, pos)) | [
"def",
"random_sense",
"(",
"ambiguous_word",
":",
"str",
",",
"pos",
"=",
"None",
")",
"->",
"\"wn.Synset\"",
":",
"if",
"pos",
"is",
"None",
":",
"return",
"custom_random",
".",
"choice",
"(",
"wn",
".",
"synsets",
"(",
"ambiguous_word",
")",
")",
"els... | 31.153846 | 20.230769 |
def hexdump(src, length=8, colorize=False):
""" Produce a string hexdump of src, for debug output.
Input: bytestring; output: text string
"""
if not src:
return str(src)
if type(src) is not bytes:
raise yubico_exception.InputError('Hexdump \'src\' must be bytestring (got %s)' % type(src))
offset = 0
result = ''
for this in group(src, length):
if colorize:
last, this = this[-1], this[:-1]
colors = DumpColors()
color = colors.get('RESET')
if ord_byte(last) & yubikey_defs.RESP_PENDING_FLAG:
# write to key
color = colors.get('BLUE')
elif ord_byte(last) & yubikey_defs.SLOT_WRITE_FLAG:
color = colors.get('GREEN')
hex_s = color + ' '.join(["%02x" % ord_byte(x) for x in this]) + colors.get('RESET')
hex_s += " %02x" % ord_byte(last)
else:
hex_s = ' '.join(["%02x" % ord_byte(x) for x in this])
result += "%04X %s\n" % (offset, hex_s)
offset += length
return result | [
"def",
"hexdump",
"(",
"src",
",",
"length",
"=",
"8",
",",
"colorize",
"=",
"False",
")",
":",
"if",
"not",
"src",
":",
"return",
"str",
"(",
"src",
")",
"if",
"type",
"(",
"src",
")",
"is",
"not",
"bytes",
":",
"raise",
"yubico_exception",
".",
... | 38.178571 | 16.714286 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.