code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def range_(range, no_border, html):
"""Prints the given range in a formatted table either in a plain ASCII or HTML.
The only required argument is the range definition, e.g. "A2s+ A5o+ 55+"
"""
from .hand import Range
border = not no_border
result = Range(range).to_html() if html else Range(range).to_ascii(border)
click.echo(result)
|
Prints the given range in a formatted table either in a plain ASCII or HTML.
The only required argument is the range definition, e.g. "A2s+ A5o+ 55+"
|
def _compute(self):
"""Compute y min and max and y scale and set labels"""
self.min_ = self._min or 0
self.max_ = self._max or 0
if self.max_ - self.min_ == 0:
self.min_ -= 1
self.max_ += 1
self._box.set_polar_box(0, 1, self.min_, self.max_)
|
Compute y min and max and y scale and set labels
|
def deprecated(replacement=None, message=None):
"""
Decorator to mark classes or functions as deprecated,
with a possible replacement.
Args:
replacement (callable): A replacement class or method.
message (str): A warning message to be displayed.
Returns:
Original function, but with a warning to use the updated class.
"""
def wrap(old):
def wrapped(*args, **kwargs):
msg = "%s is deprecated" % old.__name__
if replacement is not None:
if isinstance(replacement, property):
r = replacement.fget
elif isinstance(replacement, (classmethod, staticmethod)):
r = replacement.__func__
else:
r = replacement
msg += "; use %s in %s instead." % (r.__name__, r.__module__)
if message is not None:
msg += "\n" + message
warnings.simplefilter('default')
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return old(*args, **kwargs)
return wrapped
return wrap
|
Decorator to mark classes or functions as deprecated,
with a possible replacement.
Args:
replacement (callable): A replacement class or method.
message (str): A warning message to be displayed.
Returns:
Original function, but with a warning to use the updated class.
|
def hget(self, hashkey, attribute):
"""Emulate hget."""
redis_hash = self._get_hash(hashkey, 'HGET')
return redis_hash.get(self._encode(attribute))
|
Emulate hget.
|
def get_parser():
"Specifies the arguments and defaults, and returns the parser."
parser = argparse.ArgumentParser(prog="hiwenet")
parser.add_argument("-f", "--in_features_path", action="store", dest="in_features_path",
required=True,
help="Abs. path to file containing features for a given subject")
parser.add_argument("-g", "--groups_path", action="store", dest="groups_path",
required=True,
help="path to a file containing element-wise membership into groups/nodes/patches.")
parser.add_argument("-w", "--weight_method", action="store", dest="weight_method",
default=default_weight_method, required=False,
help="Method used to estimate the weight between the pair of nodes. Default : {}".format(
default_weight_method))
parser.add_argument("-o", "--out_weights_path", action="store", dest="out_weights_path",
default=default_out_weights_path, required=False,
help="Where to save the extracted weight matrix. If networkx output is returned, it would be saved in GraphML format. Default: nothing saved.")
parser.add_argument("-n", "--num_bins", action="store", dest="num_bins",
default=default_num_bins, required=False,
help="Number of bins used to construct the histogram. Default : {}".format(default_num_bins))
parser.add_argument("-r", "--edge_range", action="store", dest="edge_range",
default=default_edge_range, required=False,
nargs = 2,
help="The range of edges (two finite values) within which to bin the given values e.g. --edge_range 1 6 "
"This can be helpful to ensure correspondence across multiple invocations of hiwenet (for different subjects),"
" in terms of range across all bins as well as individual bin edges. "
"Default : {}, to automatically compute from the given values.".format(default_edge_range))
parser.add_argument("-t", "--trim_outliers", action="store", dest="trim_outliers",
default=default_trim_behaviour, required=False,
help="Boolean flag indicating whether to trim the extreme/outlying values. Default True.")
parser.add_argument("-p", "--trim_percentile", action="store", dest="trim_percentile",
default=default_trim_percentile, required=False,
help="Small value specifying the percentile of outliers to trim. "
"Default: {0}%% , must be in open interval (0, 100).".format(default_trim_percentile))
parser.add_argument("-x", "--return_networkx_graph", action="store", dest="return_networkx_graph",
default=default_return_networkx_graph, required=False,
help="Boolean flag indicating whether to return a networkx graph populated with weights computed. Default: False")
return parser
|
Specifies the arguments and defaults, and returns the parser.
|
def enabled(name, runas=None):
'''
Ensure the RabbitMQ plugin is enabled.
name
The name of the plugin
runas
The user to run the rabbitmq-plugin command as
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
try:
plugin_enabled = __salt__['rabbitmq.plugin_is_enabled'](name, runas=runas)
except CommandExecutionError as err:
ret['result'] = False
ret['comment'] = 'Error: {0}'.format(err)
return ret
if plugin_enabled:
ret['comment'] = 'Plugin \'{0}\' is already enabled.'.format(name)
return ret
if not __opts__['test']:
try:
__salt__['rabbitmq.enable_plugin'](name, runas=runas)
except CommandExecutionError as err:
ret['result'] = False
ret['comment'] = 'Error: {0}'.format(err)
return ret
ret['changes'].update({'old': '', 'new': name})
if __opts__['test'] and ret['changes']:
ret['result'] = None
ret['comment'] = 'Plugin \'{0}\' is set to be enabled.'.format(name)
return ret
ret['comment'] = 'Plugin \'{0}\' was enabled.'.format(name)
return ret
|
Ensure the RabbitMQ plugin is enabled.
name
The name of the plugin
runas
The user to run the rabbitmq-plugin command as
|
def find_from(path):
"""Find path of an .ensime config, searching recursively upward from path.
Args:
path (str): Path of a file or directory from where to start searching.
Returns:
str: Canonical path of nearest ``.ensime``, or ``None`` if not found.
"""
realpath = os.path.realpath(path)
config_path = os.path.join(realpath, '.ensime')
if os.path.isfile(config_path):
return config_path
elif realpath == os.path.abspath('/'):
return None
else:
dirname = os.path.dirname(realpath)
return ProjectConfig.find_from(dirname)
|
Find path of an .ensime config, searching recursively upward from path.
Args:
path (str): Path of a file or directory from where to start searching.
Returns:
str: Canonical path of nearest ``.ensime``, or ``None`` if not found.
|
def segment_intersection1(start0, end0, start1, end1, s):
"""Image for :func:`.segment_intersection` docstring."""
if NO_IMAGES:
return
line0 = bezier.Curve.from_nodes(stack1d(start0, end0))
line1 = bezier.Curve.from_nodes(stack1d(start1, end1))
ax = line0.plot(2)
line1.plot(256, ax=ax)
(x_val,), (y_val,) = line0.evaluate(s)
ax.plot([x_val], [y_val], color="black", marker="o")
ax.axis("scaled")
save_image(ax.figure, "segment_intersection1.png")
|
Image for :func:`.segment_intersection` docstring.
|
def make_tuple(stream, tuple_key, values, roots=None):
"""Creates a HeronTuple
:param stream: protobuf message ``StreamId``
:param tuple_key: tuple id
:param values: a list of values
:param roots: a list of protobuf message ``RootId``
"""
component_name = stream.component_name
stream_id = stream.id
gen_task = roots[0].taskid if roots is not None and len(roots) > 0 else None
return HeronTuple(id=str(tuple_key), component=component_name, stream=stream_id,
task=gen_task, values=values, creation_time=time.time(), roots=roots)
|
Creates a HeronTuple
:param stream: protobuf message ``StreamId``
:param tuple_key: tuple id
:param values: a list of values
:param roots: a list of protobuf message ``RootId``
|
def elasticsearch_matcher(text_log_error):
"""
Query Elasticsearch and score the results.
Uses a filtered search checking test, status, expected, and the message
as a phrase query with non-alphabet tokens removed.
"""
# Note: Elasticsearch is currently disabled in all environments (see bug 1527868).
if not settings.ELASTICSEARCH_URL:
return []
failure_line = text_log_error.metadata.failure_line
if failure_line.action != "test_result" or not failure_line.message:
logger.debug("Skipped elasticsearch matching")
return
filters = [
{'term': {'test': failure_line.test}},
{'term': {'status': failure_line.status}},
{'term': {'expected': failure_line.expected}},
{'exists': {'field': 'best_classification'}}
]
if failure_line.subtest:
query = filters.append({'term': {'subtest': failure_line.subtest}})
query = {
'query': {
'bool': {
'filter': filters,
'must': [{
'match_phrase': {
'message': failure_line.message[:1024],
},
}],
},
},
}
try:
results = search(query)
except Exception:
logger.error("Elasticsearch lookup failed: %s %s %s %s %s",
failure_line.test, failure_line.subtest, failure_line.status,
failure_line.expected, failure_line.message)
raise
if len(results) > 1:
args = (
text_log_error.id,
failure_line.id,
len(results),
)
logger.info('text_log_error=%i failure_line=%i Elasticsearch produced %i results' % args)
newrelic.agent.record_custom_event('es_matches', {
'num_results': len(results),
'text_log_error_id': text_log_error.id,
'failure_line_id': failure_line.id,
})
scorer = MatchScorer(failure_line.message)
matches = [(item, item['message']) for item in results]
best_match = scorer.best_match(matches)
if not best_match:
return
score, es_result = best_match
# TODO: score all results and return
# TODO: just return results with score above cut off?
return [(score, es_result['best_classification'])]
|
Query Elasticsearch and score the results.
Uses a filtered search checking test, status, expected, and the message
as a phrase query with non-alphabet tokens removed.
|
def _get_dataruns(self):
'''Returns a list of dataruns, in order.
'''
if self._data_runs is None:
raise DataStreamError("Resident datastream don't have dataruns")
if not self._data_runs_sorted:
self._data_runs.sort(key=_itemgetter(0))
self._data_runs_sorted = True
return [data[1] for data in self._data_runs]
|
Returns a list of dataruns, in order.
|
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = U.OptionParser(version="%prog version: $Id$",
usage=usage,
description=globals()["__doc__"])
group = U.OptionGroup(parser, "count_tab-specific options")
group.add_option("--barcode-separator", dest="bc_sep",
type="string", help="separator between read id and UMI "
" and (optionally) the cell barcode", default="_")
group.add_option("--per-cell", dest="per_cell",
action="store_true",
help="Readname includes cell barcode as well as UMI in "
"format: read[sep]UMI[sep]CB")
parser.add_option_group(group)
# add common options (-h/--help, ...) and parse command line
(options, args) = U.Start(parser, argv=argv, add_group_dedup_options=False,
add_sam_options=False)
nInput, nOutput = 0, 0
# set the method with which to extract umis from reads
if options.per_cell:
bc_getter = partial(
sam_methods.get_cell_umi_read_string, sep=options.bc_sep)
else:
bc_getter = partial(
sam_methods.get_umi_read_string, sep=options.bc_sep)
if options.per_cell:
options.stdout.write("%s\t%s\t%s\n" % ("cell", "gene", "count"))
else:
options.stdout.write("%s\t%s\n" % ("gene", "count"))
# set up UMIClusterer functor with methods specific to
# specified options.method
processor = network.UMIClusterer(options.method)
for gene, counts in sam_methods.get_gene_count_tab(
options.stdin,
bc_getter=bc_getter):
for cell in counts.keys():
umis = counts[cell].keys()
nInput += sum(counts[cell].values())
# group the umis
groups = processor(
counts[cell],
threshold=options.threshold)
gene_count = len(groups)
if options.per_cell:
options.stdout.write("%s\t%s\t%i\n" % (cell, gene, gene_count))
else:
options.stdout.write("%s\t%i\n" % (gene, gene_count))
nOutput += gene_count
U.info("Number of reads counted: %i" % nOutput)
U.Stop()
|
script main.
parses command line options in sys.argv, unless *argv* is given.
|
def IterateAllClientSnapshots(self, min_last_ping=None, batch_size=50000):
"""Iterates over all available clients and yields client snapshot objects.
Args:
min_last_ping: If provided, only snapshots for clients with last-ping
timestamps newer than (or equal to) the given value will be returned.
batch_size: Always reads <batch_size> snapshots at a time.
Yields:
An rdfvalues.objects.ClientSnapshot object for each client in the db.
"""
all_client_ids = self.ReadAllClientIDs(min_last_ping=min_last_ping)
for batch in collection.Batch(all_client_ids, batch_size):
res = self.MultiReadClientSnapshot(batch)
for snapshot in itervalues(res):
if snapshot:
yield snapshot
|
Iterates over all available clients and yields client snapshot objects.
Args:
min_last_ping: If provided, only snapshots for clients with last-ping
timestamps newer than (or equal to) the given value will be returned.
batch_size: Always reads <batch_size> snapshots at a time.
Yields:
An rdfvalues.objects.ClientSnapshot object for each client in the db.
|
def _sendline(self, line):
"""Send exactly one line to the device
Args:
line str: data send to device
"""
logging.info('%s: sending line', self.port)
# clear buffer
self._lines = []
try:
self._read()
except socket.error:
logging.debug('%s: Nothing cleared', self.port)
print 'sending [%s]' % line
self._write(line + '\r\n')
# wait for write to complete
time.sleep(0.1)
|
Send exactly one line to the device
Args:
line str: data send to device
|
def interp_value(mass, age, feh, icol,
grid, mass_col, ages, fehs, grid_Ns):
# return_box):
"""mass, age, feh are *single values* at which values are desired
icol is the column index of desired value
grid is nfeh x nage x max(nmass) x ncols array
mass_col is the column index of mass
ages is grid of ages
fehs is grid of fehs
grid_Ns keeps track of nmass in each slice (beyond this are nans)
"""
Nage = len(ages)
Nfeh = len(fehs)
ifeh = searchsorted(fehs, Nfeh, feh)
iage = searchsorted(ages, Nage, age)
pts = np.zeros((8,3))
vals = np.zeros(8)
i_f = ifeh - 1
i_a = iage - 1
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[0, 0] = grid[i_f, i_a, imass, mass_col]
pts[0, 1] = ages[i_a]
pts[0, 2] = fehs[i_f]
vals[0] = grid[i_f, i_a, imass, icol]
pts[1, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[1, 1] = ages[i_a]
pts[1, 2] = fehs[i_f]
vals[1] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh - 1
i_a = iage
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[2, 0] = grid[i_f, i_a, imass, mass_col]
pts[2, 1] = ages[i_a]
pts[2, 2] = fehs[i_f]
vals[2] = grid[i_f, i_a, imass, icol]
pts[3, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[3, 1] = ages[i_a]
pts[3, 2] = fehs[i_f]
vals[3] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh
i_a = iage - 1
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[4, 0] = grid[i_f, i_a, imass, mass_col]
pts[4, 1] = ages[i_a]
pts[4, 2] = fehs[i_f]
vals[4] = grid[i_f, i_a, imass, icol]
pts[5, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[5, 1] = ages[i_a]
pts[5, 2] = fehs[i_f]
vals[5] = grid[i_f, i_a, imass-1, icol]
i_f = ifeh
i_a = iage
Nmass = grid_Ns[i_f, i_a]
imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass)
pts[6, 0] = grid[i_f, i_a, imass, mass_col]
pts[6, 1] = ages[i_a]
pts[6, 2] = fehs[i_f]
vals[6] = grid[i_f, i_a, imass, icol]
pts[7, 0] = grid[i_f, i_a, imass-1, mass_col]
pts[7, 1] = ages[i_a]
pts[7, 2] = fehs[i_f]
vals[7] = grid[i_f, i_a, imass-1, icol]
# if return_box:
# return pts, vals
# else:
return interp_box(mass, age, feh, pts, vals)
|
mass, age, feh are *single values* at which values are desired
icol is the column index of desired value
grid is nfeh x nage x max(nmass) x ncols array
mass_col is the column index of mass
ages is grid of ages
fehs is grid of fehs
grid_Ns keeps track of nmass in each slice (beyond this are nans)
|
def validate_gcs_path(path, require_object):
""" Check whether a given path is a valid GCS path.
Args:
path: the config to check.
require_object: if True, the path has to be an object path but not bucket path.
Raises:
Exception if the path is invalid
"""
bucket, key = datalab.storage._bucket.parse_name(path)
if bucket is None:
raise Exception('Invalid GCS path "%s"' % path)
if require_object and key is None:
raise Exception('It appears the GCS path "%s" is a bucket path but not an object path' % path)
|
Check whether a given path is a valid GCS path.
Args:
path: the config to check.
require_object: if True, the path has to be an object path but not bucket path.
Raises:
Exception if the path is invalid
|
def view_dupl_sources(token, dstore):
"""
Show the sources with the same ID and the truly duplicated sources
"""
fields = ['source_id', 'code', 'gidx1', 'gidx2', 'num_ruptures']
dic = group_array(dstore['source_info'].value[fields], 'source_id')
sameid = []
dupl = []
for source_id, group in dic.items():
if len(group) > 1: # same ID sources
sources = []
for rec in group:
geom = dstore['source_geom'][rec['gidx1']:rec['gidx2']]
src = Source(source_id, rec['code'], geom, rec['num_ruptures'])
sources.append(src)
if all_equal(sources):
dupl.append(source_id)
sameid.append(source_id)
if not dupl:
return ''
msg = str(dupl) + '\n'
msg += ('Found %d source(s) with the same ID and %d true duplicate(s)'
% (len(sameid), len(dupl)))
fakedupl = set(sameid) - set(dupl)
if fakedupl:
msg += '\nHere is a fake duplicate: %s' % fakedupl.pop()
return msg
|
Show the sources with the same ID and the truly duplicated sources
|
def open(
self,
fs_url, # type: Text
writeable=True, # type: bool
create=False, # type: bool
cwd=".", # type: Text
default_protocol="osfs", # type: Text
):
# type: (...) -> Tuple[FS, Text]
"""Open a filesystem from a FS URL.
Returns a tuple of a filesystem object and a path. If there is
no path in the FS URL, the path value will be `None`.
Arguments:
fs_url (str): A filesystem URL.
writeable (bool, optional): `True` if the filesystem must be
writeable.
create (bool, optional): `True` if the filesystem should be
created if it does not exist.
cwd (str): The current working directory.
Returns:
(FS, str): a tuple of ``(<filesystem>, <path from url>)``
"""
if "://" not in fs_url:
# URL may just be a path
fs_url = "{}://{}".format(default_protocol, fs_url)
parse_result = parse_fs_url(fs_url)
protocol = parse_result.protocol
open_path = parse_result.path
opener = self.get_opener(protocol)
open_fs = opener.open_fs(fs_url, parse_result, writeable, create, cwd)
return open_fs, open_path
|
Open a filesystem from a FS URL.
Returns a tuple of a filesystem object and a path. If there is
no path in the FS URL, the path value will be `None`.
Arguments:
fs_url (str): A filesystem URL.
writeable (bool, optional): `True` if the filesystem must be
writeable.
create (bool, optional): `True` if the filesystem should be
created if it does not exist.
cwd (str): The current working directory.
Returns:
(FS, str): a tuple of ``(<filesystem>, <path from url>)``
|
def sepBy1(p, sep):
'''`sepBy1(p, sep)` parses one or more occurrences of `p`, separated by
`sep`. Returns a list of values returned by `p`.'''
return separated(p, sep, 1, maxt=float('inf'), end=False)
|
`sepBy1(p, sep)` parses one or more occurrences of `p`, separated by
`sep`. Returns a list of values returned by `p`.
|
def _save_json_file(
self, file, val,
pretty=False, compact=True, sort=True, encoder=None
):
"""
Save data to json file
:param file: Writable file or path to file
:type file: FileIO | str | unicode
:param val: Value or struct to save
:type val: None | int | float | str | list | dict
:param pretty: Format data to be readable (default: False)
:type pretty: bool
:param compact: Format data to be compact (default: True)
:type compact: bool
:param sort: Sort keys (default: True)
:type sort: bool
:param encoder: Use custom json encoder
:type encoder: T <= flotils.loadable.DateTimeEncoder
:rtype: None
:raises IOError: Failed to save
"""
try:
save_json_file(file, val, pretty, compact, sort, encoder)
except:
self.exception("Failed to save to {}".format(file))
raise IOError("Saving file failed")
|
Save data to json file
:param file: Writable file or path to file
:type file: FileIO | str | unicode
:param val: Value or struct to save
:type val: None | int | float | str | list | dict
:param pretty: Format data to be readable (default: False)
:type pretty: bool
:param compact: Format data to be compact (default: True)
:type compact: bool
:param sort: Sort keys (default: True)
:type sort: bool
:param encoder: Use custom json encoder
:type encoder: T <= flotils.loadable.DateTimeEncoder
:rtype: None
:raises IOError: Failed to save
|
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = max(self.width - self.current_indent, 11)
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
|
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
|
def constant_tuples(self):
"""
Returns
-------
constants: [(String, Constant)]
"""
return [constant_tuple for tuple_prior in self.tuple_prior_tuples for constant_tuple in
tuple_prior[1].constant_tuples] + self.direct_constant_tuples
|
Returns
-------
constants: [(String, Constant)]
|
def energy(self, strand, dotparens, temp=37.0, pseudo=False, material=None,
dangles='some', sodium=1.0, magnesium=0.0):
'''Calculate the free energy of a given sequence structure. Runs the
\'energy\' command.
:param strand: Strand on which to run energy. Strands must be either
coral.DNA or coral.RNA).
:type strand: coral.DNA or coral.RNA
:param dotparens: The structure in dotparens notation.
:type dotparens: str
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: The free energy of the sequence with the specified secondary
structure.
:rtype: float
'''
# Set the material (will be used to set command material flag)
material = self._set_material(strand, material)
# Set up command flags
cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
magnesium, multi=False)
# Set up the input file and run the command. Note: no STDOUT
lines = [str(strand), dotparens]
stdout = self._run('energy', cmd_args, lines).split('\n')
# Return the energy
return float(stdout[-2])
|
Calculate the free energy of a given sequence structure. Runs the
\'energy\' command.
:param strand: Strand on which to run energy. Strands must be either
coral.DNA or coral.RNA).
:type strand: coral.DNA or coral.RNA
:param dotparens: The structure in dotparens notation.
:type dotparens: str
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: The free energy of the sequence with the specified secondary
structure.
:rtype: float
|
def _finalize_ticks(self, axis, element, xticks, yticks, zticks):
"""
Apply ticks with appropriate offsets.
"""
yalignments = None
if xticks is not None:
ticks, labels, yalignments = zip(*sorted(xticks, key=lambda x: x[0]))
xticks = (list(ticks), list(labels))
super(BarPlot, self)._finalize_ticks(axis, element, xticks, yticks, zticks)
if yalignments:
for t, y in zip(axis.get_xticklabels(), yalignments):
t.set_y(y)
|
Apply ticks with appropriate offsets.
|
def check_errors(self, response):
" Check some common errors."
# Read content.
content = response.content
if 'status' not in content:
raise self.GeneralError('We expect a status field.')
# Return the decoded content if status is success.
if content['status'] == 'success':
response._content = content
return
# Expect messages if some kind of error.
if 'msgs' not in content:
raise self.GeneralError('We expcet messages in case of error.')
try:
messages = list(content['msgs'])
except:
raise self.GeneralError("Messages must be a list.")
# Try to found common errors in the response.
for msg in messages:
if 'LVL' in msg and msg['LVL'] == 'ERROR':
# Check if is a not found error.
if msg['ERR_CD'] == 'NOT_FOUND':
raise self.NotFoundError(msg['INFO'])
# Duplicated target.
elif msg['ERR_CD'] == 'TARGET_EXISTS':
raise self.TargetExistsError(msg['INFO'])
# Some other error.
else:
raise self.DynectError(msg['INFO'])
raise self.GeneralError("We need at least one error message.")
|
Check some common errors.
|
def applyMassCalMs1(msrunContainer, specfile, dataFit, **kwargs):
"""Applies a correction function to the MS1 ion m/z arrays in order to
correct for a m/z dependent m/z error.
:param msrunContainer: intance of :class:`maspy.core.MsrunContainer`,
containing the :class:`maspy.core.Sai` items of the "specfile".
:param specfile: filename of an ms-run file to which the m/z calibration
should be applied
:param dataFit: a :class:`maspy.auxiliary.DataFit` object, containing
processed calibration data.
:param toleranceMode: "relative" or "absolute"
Specifies how the ``massTolerance`` value is applied, by default
"relative".
"""
toleranceMode = kwargs.get('toleranceMode', 'relative')
if toleranceMode == 'relative':
for si in msrunContainer.getItems(specfile,
selector=lambda si: si.msLevel==1):
mzArr = msrunContainer.saic[specfile][si.id].arrays['mz']
corrArr = dataFit.corrArray(mzArr)
mzArr *= (1 + corrArr)
elif toleranceMode == 'absolute':
for si in msrunContainer.getItems(specfile,
selector=lambda si: si.msLevel==1):
mzArr = msrunContainer.saic[specfile][si.id].arrays['mz']
corrArr = dataFit.corrArray(mzArr)
mzArr += corrArr
else:
raise Exception('#TODO: a proper exception text')
|
Applies a correction function to the MS1 ion m/z arrays in order to
correct for a m/z dependent m/z error.
:param msrunContainer: intance of :class:`maspy.core.MsrunContainer`,
containing the :class:`maspy.core.Sai` items of the "specfile".
:param specfile: filename of an ms-run file to which the m/z calibration
should be applied
:param dataFit: a :class:`maspy.auxiliary.DataFit` object, containing
processed calibration data.
:param toleranceMode: "relative" or "absolute"
Specifies how the ``massTolerance`` value is applied, by default
"relative".
|
def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar):
'''
helper method for present, ensure scheduled actions are setup
'''
tmp = copy.deepcopy(
__salt__['config.option'](scheduled_actions_from_pillar, {})
)
# merge with data from state
if scheduled_actions:
tmp = dictupdate.update(tmp, scheduled_actions)
return tmp
|
helper method for present, ensure scheduled actions are setup
|
def toggle_spot_cfg(self):
"""Show the dialog where you select graphics and a name for a place,
or hide it if already showing.
"""
if self.app.manager.current == 'spotcfg':
dummyplace = self.screendummyplace
self.ids.placetab.remove_widget(dummyplace)
dummyplace.clear()
if self.app.spotcfg.prefix:
dummyplace.prefix = self.app.spotcfg.prefix
dummyplace.num = dummynum(
self.app.character, dummyplace.prefix
) + 1
dummyplace.paths = self.app.spotcfg.imgpaths
self.ids.placetab.add_widget(dummyplace)
else:
self.app.spotcfg.prefix = self.ids.dummyplace.prefix
self.app.spotcfg.toggle()
|
Show the dialog where you select graphics and a name for a place,
or hide it if already showing.
|
def as_xml(self):
"""Return the XML error representation.
:returntype: :etree:`ElementTree.Element`"""
result = ElementTree.Element(self.error_qname)
result.append(deepcopy(self.condition))
if self.text:
text = ElementTree.SubElement(result, self.text_qname)
if self.language:
text.set(XML_LANG_QNAME, self.language)
text.text = self.text
return result
|
Return the XML error representation.
:returntype: :etree:`ElementTree.Element`
|
def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
|
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
|
def _wait_for_exec_ready(self):
"""
Wait for response.
:return: CliResponse object coming in
:raises: TestStepTimeout, TestStepError
"""
while not self.response_received.wait(1) and self.query_timeout != 0:
if self.query_timeout != 0 and self.query_timeout < self.get_time():
if self.prev:
cmd = self.prev.cmd
else:
cmd = "???"
self.logger.error("CMD timeout: "+ cmd)
self.query_timeout = 0
raise TestStepTimeout(self.name + " CMD timeout: " + cmd)
self.logger.debug("Waiting for response... "
"timeout=%d", self.query_timeout - self.get_time())
self._dut_is_alive()
if self.response_coming_in == -1:
if self.query_async_response is not None:
# fullfill the async response with a dummy response and clean the state
self.query_async_response.set_response(CliResponse())
self.query_async_response = None
# raise and log the error
self.logger.error("No response received, DUT died")
raise TestStepError("No response received, DUT "+self.name+" died")
# if an async response is pending, fullfill it with the result
if self.query_async_response is not None:
self.query_async_response.set_response(self.response_coming_in)
self.query_async_response = None
self.query_timeout = 0
return self.response_coming_in
|
Wait for response.
:return: CliResponse object coming in
:raises: TestStepTimeout, TestStepError
|
def gen(self):
"""
Generate stable LogicalIds based on the prefix and given data. This method ensures that the logicalId is
deterministic and stable based on input prefix & data object. In other words:
logicalId changes *if and only if* either the `prefix` or `data_obj` changes
Internally we simply use a SHA1 of the data and append to the prefix to create the logicalId.
NOTE: LogicalIDs are how CloudFormation identifies a resource. If this ID changes, CFN will delete and
create a new resource. This can be catastrophic for most resources. So it is important to be *always*
backwards compatible here.
:return: LogicalId that can be used to construct resources
:rtype string
"""
data_hash = self.get_hash()
return "{prefix}{hash}".format(prefix=self._prefix, hash=data_hash)
|
Generate stable LogicalIds based on the prefix and given data. This method ensures that the logicalId is
deterministic and stable based on input prefix & data object. In other words:
logicalId changes *if and only if* either the `prefix` or `data_obj` changes
Internally we simply use a SHA1 of the data and append to the prefix to create the logicalId.
NOTE: LogicalIDs are how CloudFormation identifies a resource. If this ID changes, CFN will delete and
create a new resource. This can be catastrophic for most resources. So it is important to be *always*
backwards compatible here.
:return: LogicalId that can be used to construct resources
:rtype string
|
def post_signup(self, user, login_user=None, send_email=None):
"""Executes post signup actions: sending the signal, logging in the user and
sending the welcome email
"""
self.signup_signal.send(self, user=user)
if (login_user is None and self.options["login_user_on_signup"]) or login_user:
self._login(user, user.signup_provider)
to_email = getattr(user, self.options["email_column"], None)
if to_email and ((send_email is None and self.options["send_welcome_email"]) or send_email):
template = "users/welcome.txt" if self.options["send_welcome_email"] == True else self.options["send_welcome_email"]
current_app.features.emails.send(to_email, template, user=user)
|
Executes post signup actions: sending the signal, logging in the user and
sending the welcome email
|
def get_objectives(self):
"""Gets the objective list resulting from the search.
return: (osid.learning.ObjectiveList) - the objective list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.ObjectiveList(self._results, runtime=self._runtime)
|
Gets the objective list resulting from the search.
return: (osid.learning.ObjectiveList) - the objective list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
|
def get_unix_ioctl_terminal_size():
"""Get the terminal size of a UNIX terminal using the ioctl UNIX command."""
def ioctl_gwinsz(fd):
try:
import fcntl
import termios
import struct
return struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except (IOError, OSError):
return None
cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not cr:
try:
f = open(os.ctermid())
cr = ioctl_gwinsz(f.fileno())
f.close()
except (IOError, OSError):
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except KeyError:
return None
return int(cr[1]), int(cr[0])
|
Get the terminal size of a UNIX terminal using the ioctl UNIX command.
|
def upload(self):
"""
upload via the method configured
:return:
"""
if self.upload_method == "setup":
self.upload_by_setup()
if self.upload_method == "twine":
self.upload_by_twine()
if self.upload_method == "gemfury":
self.upload_by_gemfury()
|
upload via the method configured
:return:
|
def getChangeSets(self):
"""Get all the ChangeSets of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.ChangeSet` objects
:rtype: list
"""
changeset_tag = ("rtc_cm:com.ibm.team.filesystem.workitems."
"change_set.com.ibm.team.scm.ChangeSet")
return (self.rtc_obj
._get_paged_resources("ChangeSet",
workitem_id=self.identifier,
customized_attr=changeset_tag,
page_size="10"))
|
Get all the ChangeSets of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.ChangeSet` objects
:rtype: list
|
async def get_object(self, Bucket: str, Key: str, **kwargs) -> dict:
"""
S3 GetObject. Takes same args as Boto3 documentation
Decrypts any CSE
:param Bucket: S3 Bucket
:param Key: S3 Key (filepath)
:return: returns same response as a normal S3 get_object
"""
if self._s3_client is None:
await self.setup()
# Ok so if we are doing a range get. We need to align the range start/end with AES block boundaries
# 9223372036854775806 is 8EiB so I have no issue with hardcoding it.
# We pass the actual start, desired start and desired end to the decrypt function so that it can
# generate the correct IV's for starting decryption at that block and then chop off the start and end of the
# AES block so it matches what the user is expecting.
_range = kwargs.get('Range')
actual_range_start = None
desired_range_start = None
desired_range_end = None
if _range:
range_match = RANGE_REGEX.match(_range)
if not range_match:
raise ValueError('Dont understand this range value {0}'.format(_range))
desired_range_start = int(range_match.group(1))
desired_range_end = range_match.group(2)
if desired_range_end is None:
desired_range_end = 9223372036854775806
else:
desired_range_end = int(desired_range_end)
actual_range_start, actual_range_end = _get_adjusted_crypto_range(desired_range_start, desired_range_end)
# Update range with actual start_end
kwargs['Range'] = 'bytes={0}-{1}'.format(actual_range_start, actual_range_end)
s3_response = await self._s3_client.get_object(Bucket=Bucket, Key=Key, **kwargs)
file_data = await s3_response['Body'].read()
metadata = s3_response['Metadata']
whole_file_length = int(s3_response['ResponseMetadata']['HTTPHeaders']['content-length'])
if 'x-amz-key' not in metadata and 'x-amz-key-v2' not in metadata:
# No crypto
return s3_response
if 'x-amz-key' in metadata:
# Crypto V1
body = await self._decrypt_v1(file_data, metadata, actual_range_start)
else:
# Crypto V2
body = await self._decrypt_v2(file_data, metadata, whole_file_length,
actual_range_start, desired_range_start,
desired_range_end)
s3_response['Body'] = DummyAIOFile(body)
return s3_response
|
S3 GetObject. Takes same args as Boto3 documentation
Decrypts any CSE
:param Bucket: S3 Bucket
:param Key: S3 Key (filepath)
:return: returns same response as a normal S3 get_object
|
def wrsamp(self, expanded=False, write_dir=''):
"""
Write a wfdb header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files.
"""
# Perform field validity and cohesion checks, and write the
# header file.
self.wrheader(write_dir=write_dir)
if self.n_sig > 0:
# Perform signal validity and cohesion checks, and write the
# associated dat files.
self.wr_dats(expanded=expanded, write_dir=write_dir)
|
Write a wfdb header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files.
|
def as_ul(self, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as ul
"""
return self.__do_menu("as_ul", show_leaf, current_linkable, class_current)
|
It returns breadcrumb as ul
|
def _fuzzy_custom_query(issn, titles):
"""
Este metodo constroi a lista de filtros por título de periódico que
será aplicada na pesquisa boleana como match por similaridade "should".
A lista de filtros é coletada do template de pesquisa customizada
do periódico, quanto este template existir.
"""
custom_queries = journal_titles.load(issn).get('should', [])
titles = [{'title': i} for i in titles if i not in [x['title'] for x in custom_queries]]
titles.extend(custom_queries)
for item in titles:
if len(item['title'].strip()) == 0:
continue
query = {
"fuzzy": {
"reference_source_cleaned": {
"value": utils.cleanup_string(item['title']),
"fuzziness": item.get('fuzziness', 3),
"max_expansions": 50
}
}
}
yield query
|
Este metodo constroi a lista de filtros por título de periódico que
será aplicada na pesquisa boleana como match por similaridade "should".
A lista de filtros é coletada do template de pesquisa customizada
do periódico, quanto este template existir.
|
def public_copy(self):
"""Yield the corresponding public node for this node."""
d = dict(chain_code=self._chain_code, depth=self._depth,
parent_fingerprint=self._parent_fingerprint,
child_index=self._child_index, public_pair=self.public_pair())
return self.__class__(**d)
|
Yield the corresponding public node for this node.
|
def get(self, request, bot_id, id, format=None):
"""
Get hook by id
---
serializer: HookSerializer
responseMessages:
- code: 401
message: Not authenticated
"""
return super(HookDetail, self).get(request, bot_id, id, format)
|
Get hook by id
---
serializer: HookSerializer
responseMessages:
- code: 401
message: Not authenticated
|
def cmd_show(docid):
"""
Arguments: <doc_id>
Show document information (but not its content, see 'dump').
See 'search' for the document id.
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"type": "ImgDoc",
"nb_pages": 3,
"pages": [
{"n": 1, "lines": 10, "words": 22},
{"n": 2, "lines": 20, "words": 22},
{"n": 3, "lines": 30, "words": 34},
],
"labels": ["aaa", "bbb"],
"first_line": "vwklsd wldkwq",
}
"""
dsearch = get_docsearch()
doc = dsearch.get(docid)
r = {
'type': str(type(doc)),
'nb_pages': doc.nb_pages,
'labels': [l.name for l in doc.labels],
'first_line': _get_first_line(doc),
'pages': []
}
for page in doc.pages:
nb_lines = 0
nb_words = 0
for line in page.boxes:
nb_lines += 1
nb_words += len(line.word_boxes)
r['pages'].append({
"n": page.page_nb + 1,
"lines": nb_lines,
"words": nb_words,
})
reply(r)
|
Arguments: <doc_id>
Show document information (but not its content, see 'dump').
See 'search' for the document id.
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"type": "ImgDoc",
"nb_pages": 3,
"pages": [
{"n": 1, "lines": 10, "words": 22},
{"n": 2, "lines": 20, "words": 22},
{"n": 3, "lines": 30, "words": 34},
],
"labels": ["aaa", "bbb"],
"first_line": "vwklsd wldkwq",
}
|
def make_witness_input_and_witness(outpoint, sequence,
stack=None, **kwargs):
'''
Outpoint, int, list(bytearray) -> (Input, InputWitness)
'''
if 'decred' in riemann.get_current_network_name():
return(make_witness_input(outpoint, sequence),
make_decred_witness(value=kwargs['value'],
height=kwargs['height'],
index=kwargs['index'],
stack_script=kwargs['stack_script'],
redeem_script=kwargs['redeem_script']))
return (make_witness_input(outpoint, sequence),
make_witness(stack))
|
Outpoint, int, list(bytearray) -> (Input, InputWitness)
|
def result(self):
""" Concatenate accumulated tensors """
return {k: torch.stack(v) for k, v in self.accumulants.items()}
|
Concatenate accumulated tensors
|
def describe_keypairs(self, *keypair_names):
"""Returns information about key pairs available."""
keypairs = {}
for index, keypair_name in enumerate(keypair_names):
keypairs["KeyName.%d" % (index + 1)] = keypair_name
query = self.query_factory(
action="DescribeKeyPairs", creds=self.creds,
endpoint=self.endpoint, other_params=keypairs)
d = query.submit()
return d.addCallback(self.parser.describe_keypairs)
|
Returns information about key pairs available.
|
def subprogram_prototype(vo):
'''Generate a canonical prototype string
Args:
vo (VhdlFunction, VhdlProcedure): Subprogram object
Returns:
Prototype string.
'''
plist = '; '.join(str(p) for p in vo.parameters)
if isinstance(vo, VhdlFunction):
if len(vo.parameters) > 0:
proto = 'function {}({}) return {};'.format(vo.name, plist, vo.return_type)
else:
proto = 'function {} return {};'.format(vo.name, vo.return_type)
else: # procedure
proto = 'procedure {}({});'.format(vo.name, plist)
return proto
|
Generate a canonical prototype string
Args:
vo (VhdlFunction, VhdlProcedure): Subprogram object
Returns:
Prototype string.
|
def create_table(cls):
"""
create_table
Manually create a temporary table for model in test data base.
:return:
"""
schema_editor = getattr(connection, 'schema_editor', None)
if schema_editor:
with schema_editor() as schema_editor:
schema_editor.create_model(cls)
else:
raw_sql, _ = connection.creation.sql_create_model(
cls,
no_style(),
[])
cls.delete_table()
cursor = connection.cursor()
try:
cursor.execute(*raw_sql)
finally:
cursor.close()
|
create_table
Manually create a temporary table for model in test data base.
:return:
|
def find(self, title):
"""Fetch and return the first spreadsheet with the given title.
Args:
title(str): title/name of the spreadsheet to return
Returns:
SpreadSheet: new SpreadSheet instance
Raises:
KeyError: if no spreadsheet with the given ``title`` is found
"""
files = backend.iterfiles(self._drive, name=title)
try:
return next(self[id] for id, _ in files)
except StopIteration:
raise KeyError(title)
|
Fetch and return the first spreadsheet with the given title.
Args:
title(str): title/name of the spreadsheet to return
Returns:
SpreadSheet: new SpreadSheet instance
Raises:
KeyError: if no spreadsheet with the given ``title`` is found
|
def tgread_bool(self):
"""Reads a Telegram boolean value."""
value = self.read_int(signed=False)
if value == 0x997275b5: # boolTrue
return True
elif value == 0xbc799737: # boolFalse
return False
else:
raise RuntimeError('Invalid boolean code {}'.format(hex(value)))
|
Reads a Telegram boolean value.
|
def delete_queue(queues):
"""Delete the given queues."""
current_queues.delete(queues=queues)
click.secho(
'Queues {} have been deleted.'.format(
queues or current_queues.queues.keys()),
fg='green'
)
|
Delete the given queues.
|
def random_filter(objects, reduction_factor, seed=42):
"""
Given a list of objects, returns a sublist by extracting randomly
some elements. The reduction factor (< 1) tells how small is the extracted
list compared to the original list.
"""
assert 0 < reduction_factor <= 1, reduction_factor
rnd = random.Random(seed)
out = []
for obj in objects:
if rnd.random() <= reduction_factor:
out.append(obj)
return out
|
Given a list of objects, returns a sublist by extracting randomly
some elements. The reduction factor (< 1) tells how small is the extracted
list compared to the original list.
|
def pos(self):
""" The position of this event in the local coordinate system of the
visual.
"""
if self._pos is None:
tr = self.visual.get_transform('canvas', 'visual')
self._pos = tr.map(self.mouse_event.pos)
return self._pos
|
The position of this event in the local coordinate system of the
visual.
|
def _get_owner_cover_photo_upload_server(session, group_id, crop_x=0, crop_y=0, crop_x2=795, crop_y2=200):
"""
https://vk.com/dev/photos.getOwnerCoverPhotoUploadServer
"""
group_id = abs(group_id)
response = session.fetch("photos.getOwnerCoverPhotoUploadServer", group_id=group_id, crop_x=crop_x, crop_y=crop_y, crop_x2=crop_x2, crop_y2=crop_y2)
return response['upload_url']
|
https://vk.com/dev/photos.getOwnerCoverPhotoUploadServer
|
def parse(self, rrstr):
# type: (bytes) -> None
'''
Parse a Rock Ridge Sharing Protocol record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('SP record already initialized!')
(su_len, su_entry_version_unused, check_byte1, check_byte2,
self.bytes_to_skip) = struct.unpack_from('=BBBBB', rrstr[:7], 2)
# We assume that the caller has already checked the su_entry_version,
# so we don't bother.
if su_len != RRSPRecord.length():
raise pycdlibexception.PyCdlibInvalidISO('Invalid length on rock ridge extension')
if check_byte1 != 0xbe or check_byte2 != 0xef:
raise pycdlibexception.PyCdlibInvalidISO('Invalid check bytes on rock ridge extension')
self._initialized = True
|
Parse a Rock Ridge Sharing Protocol record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
Nothing.
|
def _get_aggregated_node_list(self, data):
"""
Returns list of main and secondary mac addresses.
"""
node_list = []
for node in data:
local_addresses = [node['primary']]
if 'secondary' in node:
local_addresses += node['secondary']
node_list.append(local_addresses)
return node_list
|
Returns list of main and secondary mac addresses.
|
def color_from_hls(hue, light, sat):
""" Takes a hls color and converts to proper hue
Bulbs use a BGR order instead of RGB """
if light > 0.95: #too bright, let's just switch to white
return 256
elif light < 0.05: #too dark, let's shut it off
return -1
else:
hue = (-hue + 1 + 2.0/3.0) % 1 # invert and translate by 2/3
return int(floor(hue * 256))
|
Takes a hls color and converts to proper hue
Bulbs use a BGR order instead of RGB
|
def delay(self, seconds=0, minutes=0):
"""
Parameters
----------
seconds: float
The number of seconds to freeze in place.
"""
minutes += int(seconds / 60)
seconds = seconds % 60
seconds += float(minutes * 60)
self.robot.pause()
if not self.robot.is_simulating():
_sleep(seconds)
self.robot.resume()
return self
|
Parameters
----------
seconds: float
The number of seconds to freeze in place.
|
def __request_mark_sent(self, requestId):
"""Set send time & clear exception from request if set, ignoring non-existent requests"""
with self.__requests:
try:
req = self.__requests[requestId]
except KeyError:
# request might have had a response already have been removed by receiving thread
pass
else:
req.exception = None
req._send_time = monotonic()
|
Set send time & clear exception from request if set, ignoring non-existent requests
|
async def build_proof_req_json(self, cd_id2spec: dict, cache_only: bool = False) -> str:
"""
Build and return indy-sdk proof request for input attributes and timestamps by cred def id.
Raise AbsentInterval if caller specifies cache_only and default non-revocation intervals, but
revocation cache does not have delta frames for any revocation registries on a specified cred def.
:param cd_id2spec: dict mapping cred def ids to:
- (optionally) 'attrs': lists of names of attributes of interest (omit for all, empty list or None for none)
- (optionally) 'minima': (pred) integer lower-bounds of interest (omit, empty list, or None for none)
- (optionally), 'interval': (2-tuple) pair of epoch second counts marking 'from' and 'to' timestamps,
or single epoch second count to set 'from' and 'to' the same: default (now, now) if cache_only
is clear, or latest values from cache if cache_only is set.
e.g.,
::
{
'Vx4E82R17q...:3:CL:16:0': {
'attrs': [ # request attrs 'name' and 'favouriteDrink' from this cred def's schema
'name',
'favouriteDrink'
],
'minima': { # request predicate score>=80 from this cred def
'score': 80
}
'interval': 1528116008 # same instant for all attrs and preds of corresponding schema
},
'R17v42T4pk...:3:CL:19:0': None, # request all attrs, no preds, default intervals on all attrs
'e3vc5K168n...:3:CL:23:0': {}, # request all attrs, no preds, default intervals on all attrs
'Z9ccax812j...:3:CL:27:0': { # request all attrs, no preds, this interval on all attrs
'interval': (1528112408, 1528116008)
},
'9cHbp54C8n...:3:CL:37:0': { # request no attrs, one pred, specify interval on pred
'attrs': [], # or equivalently, 'attrs': None
'minima': {
'employees': '50' # nicety: implementation converts to int for caller
},
'interval': (1528029608, 1528116008)
},
'6caBcmLi33...:3:CL:41:0': { # all attrs, one pred, default intervals to now on attrs & pred
'minima': {
'regEpoch': 1514782800
}
}
...
}
:param cache_only: (True) take default intervals (per cred def id) from latest cached deltas, or
(default False) use current time
:return: indy-sdk proof request json
"""
LOGGER.debug('HolderProver.build_proof_req_json >>> cd_id2spec: %s, cache_only: %s', cd_id2spec, cache_only)
cd_id2schema = {}
now = int(time())
proof_req = {
'nonce': str(int(time())),
'name': 'proof_req',
'version': '0.0',
'requested_attributes': {},
'requested_predicates': {}
}
for cd_id in cd_id2spec:
interval = None
cred_def = json.loads(await self.get_cred_def(cd_id))
seq_no = cred_def_id2seq_no(cd_id)
cd_id2schema[cd_id] = json.loads(await self.get_schema(seq_no))
if 'revocation' in cred_def['value']:
if cache_only and not (cd_id2spec.get(cd_id, {}) or {}).get('interval', None):
with REVO_CACHE.lock:
(fro, to) = REVO_CACHE.dflt_interval(cd_id)
if not (fro and to):
LOGGER.debug(
'HolderProver.build_proof_req_json: <!< no cached delta for non-revoc interval on %s',
cd_id)
raise AbsentInterval('No cached delta for non-revoc interval on {}'.format(cd_id))
interval = {
'from': fro,
'to': to
}
else:
fro_to = cd_id2spec[cd_id].get('interval', (now, now)) if cd_id2spec[cd_id] else (now, now)
interval = {
'from': fro_to if isinstance(fro_to, int) else min(fro_to),
'to': fro_to if isinstance(fro_to, int) else max(fro_to)
}
for attr in (cd_id2spec[cd_id].get('attrs', cd_id2schema[cd_id]['attrNames']) or []
if cd_id2spec[cd_id] else cd_id2schema[cd_id]['attrNames']):
attr_uuid = '{}_{}_uuid'.format(seq_no, attr)
proof_req['requested_attributes'][attr_uuid] = {
'name': attr,
'restrictions': [{
'cred_def_id': cd_id
}]
}
if interval:
proof_req['requested_attributes'][attr_uuid]['non_revoked'] = interval
for attr in (cd_id2spec[cd_id].get('minima', {}) or {} if cd_id2spec[cd_id] else {}):
pred_uuid = '{}_{}_uuid'.format(seq_no, attr)
try:
proof_req['requested_predicates'][pred_uuid] = {
'name': attr,
'p_type': '>=',
'p_value': int(cd_id2spec[cd_id]['minima'][attr]),
'restrictions': [{
'cred_def_id': cd_id
}]
}
except ValueError:
LOGGER.info(
'cannot build predicate on non-int minimum %s for %s',
cd_id2spec[cd_id]['minima'][attr],
attr)
continue # int conversion failed - reject candidate
if interval:
proof_req['requested_predicates'][pred_uuid]['non_revoked'] = interval
rv_json = json.dumps(proof_req)
LOGGER.debug('HolderProver.build_proof_req_json <<< %s', rv_json)
return rv_json
|
Build and return indy-sdk proof request for input attributes and timestamps by cred def id.
Raise AbsentInterval if caller specifies cache_only and default non-revocation intervals, but
revocation cache does not have delta frames for any revocation registries on a specified cred def.
:param cd_id2spec: dict mapping cred def ids to:
- (optionally) 'attrs': lists of names of attributes of interest (omit for all, empty list or None for none)
- (optionally) 'minima': (pred) integer lower-bounds of interest (omit, empty list, or None for none)
- (optionally), 'interval': (2-tuple) pair of epoch second counts marking 'from' and 'to' timestamps,
or single epoch second count to set 'from' and 'to' the same: default (now, now) if cache_only
is clear, or latest values from cache if cache_only is set.
e.g.,
::
{
'Vx4E82R17q...:3:CL:16:0': {
'attrs': [ # request attrs 'name' and 'favouriteDrink' from this cred def's schema
'name',
'favouriteDrink'
],
'minima': { # request predicate score>=80 from this cred def
'score': 80
}
'interval': 1528116008 # same instant for all attrs and preds of corresponding schema
},
'R17v42T4pk...:3:CL:19:0': None, # request all attrs, no preds, default intervals on all attrs
'e3vc5K168n...:3:CL:23:0': {}, # request all attrs, no preds, default intervals on all attrs
'Z9ccax812j...:3:CL:27:0': { # request all attrs, no preds, this interval on all attrs
'interval': (1528112408, 1528116008)
},
'9cHbp54C8n...:3:CL:37:0': { # request no attrs, one pred, specify interval on pred
'attrs': [], # or equivalently, 'attrs': None
'minima': {
'employees': '50' # nicety: implementation converts to int for caller
},
'interval': (1528029608, 1528116008)
},
'6caBcmLi33...:3:CL:41:0': { # all attrs, one pred, default intervals to now on attrs & pred
'minima': {
'regEpoch': 1514782800
}
}
...
}
:param cache_only: (True) take default intervals (per cred def id) from latest cached deltas, or
(default False) use current time
:return: indy-sdk proof request json
|
def result(self, state, action):
'''Return the resulting state after moving a piece to the empty space.
(the "action" parameter contains the piece to move)
'''
rows = string_to_list(state)
row_e, col_e = find_location(rows, 'e')
row_n, col_n = find_location(rows, action)
rows[row_e][col_e], rows[row_n][col_n] = rows[row_n][col_n], rows[row_e][col_e]
return list_to_string(rows)
|
Return the resulting state after moving a piece to the empty space.
(the "action" parameter contains the piece to move)
|
def _iso_handler(obj):
"""
Transforms an object into it's ISO format, if possible.
If the object can't be transformed, then an error is raised for the JSON
parser.
This is meant to be used on datetime instances, but will work with any
object having a method called isoformat.
:param obj: object to transform into it's ISO format
:return: the ISO format of the object
"""
if hasattr(obj, 'isoformat'):
result = obj.isoformat()
else:
raise TypeError("Unserializable object {} of type {}".format(obj,
type(obj)))
return result
|
Transforms an object into it's ISO format, if possible.
If the object can't be transformed, then an error is raised for the JSON
parser.
This is meant to be used on datetime instances, but will work with any
object having a method called isoformat.
:param obj: object to transform into it's ISO format
:return: the ISO format of the object
|
def get_advances_declines(self, as_json=False):
"""
:return: a list of dictionaries with advance decline data
:raises: URLError, HTTPError
"""
url = self.advances_declines_url
req = Request(url, None, self.headers)
# raises URLError or HTTPError
resp = self.opener.open(req)
# for py3 compat covert byte file like object to
# string file like object
resp = byte_adaptor(resp)
resp_dict = json.load(resp)
resp_list = [self.clean_server_response(item)
for item in resp_dict['data']]
return self.render_response(resp_list, as_json)
|
:return: a list of dictionaries with advance decline data
:raises: URLError, HTTPError
|
def is_avro(path_or_buffer):
"""Return True if path (or buffer) points to an Avro file.
Parameters
----------
path_or_buffer: path to file or file-like object
Path to file
"""
if is_str(path_or_buffer):
fp = open(path_or_buffer, 'rb')
close = True
else:
fp = path_or_buffer
close = False
try:
header = fp.read(len(MAGIC))
return header == MAGIC
finally:
if close:
fp.close()
|
Return True if path (or buffer) points to an Avro file.
Parameters
----------
path_or_buffer: path to file or file-like object
Path to file
|
def sendhello(self):
try:
# send hello
cli_hello_msg = "<hello>\n" +\
" <capabilities>\n" +\
" <capability>urn:ietf:params:netconf:base:1.0</capability>\n" +\
" </capabilities>\n" +\
"</hello>\n"
self._cParams.set('cli_hello', cli_hello_msg)
self._hConn.sendmsg(cli_hello_msg)
# recv hello
ser_hello_msg = self._hConn.recvmsg()
self._cParams.set('ser_hello', ser_hello_msg)
except:
print 'BNClient: Call sendhello fail'
sys.exit()
""" end of function exchgcaps """
|
end of function exchgcaps
|
def field(self, name):
"""
Returns the field on this struct with the given name. Will try to find this
name on all ancestors if this struct extends another.
If found, returns a dict with keys: 'name', 'comment', 'type', 'is_array'
If not found, returns None
:Parameters:
name
string name of field to lookup
"""
if self.fields.has_key(name):
return self.fields[name]
elif self.extends:
if not self.parent:
self.parent = self.contract.struct(self.extends)
return self.parent.field(name)
else:
return None
|
Returns the field on this struct with the given name. Will try to find this
name on all ancestors if this struct extends another.
If found, returns a dict with keys: 'name', 'comment', 'type', 'is_array'
If not found, returns None
:Parameters:
name
string name of field to lookup
|
def create_marking_iobject(self,
uid=None,
timestamp=timezone.now(),
metadata_dict=None,
id_namespace_uri=DINGOS_DEFAULT_ID_NAMESPACE_URI,
iobject_family_name=DINGOS_IOBJECT_FAMILY_NAME,
iobject_family_revison_name=DINGOS_REVISION_NAME,
iobject_type_name=DINGOS_DEFAULT_IMPORT_MARKING_TYPE_NAME,
iobject_type_namespace_uri=DINGOS_NAMESPACE_URI,
iobject_type_revision_name=DINGOS_REVISION_NAME,
):
"""
A specialized version of create_iobject with defaults set such that a default marking object is created.
"""
if not uid:
uid = uuid.uuid1()
iobject, created = self.create_iobject(iobject_family_name=iobject_family_name,
iobject_family_revision_name=iobject_family_revison_name,
iobject_type_name=iobject_type_name,
iobject_type_namespace_uri=iobject_type_namespace_uri,
iobject_type_revision_name=iobject_type_revision_name,
iobject_data=metadata_dict,
uid=uid,
identifier_ns_uri=id_namespace_uri,
timestamp=timestamp,
)
return iobject
|
A specialized version of create_iobject with defaults set such that a default marking object is created.
|
def _check_samples_line(klass, arr):
"""Peform additional check on samples line"""
if len(arr) <= len(REQUIRE_NO_SAMPLE_HEADER):
if tuple(arr) != REQUIRE_NO_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
"Sample header line indicates no sample but does not "
"equal required prefix {}".format("\t".join(REQUIRE_NO_SAMPLE_HEADER))
)
elif tuple(arr[: len(REQUIRE_SAMPLE_HEADER)]) != REQUIRE_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
'Sample header line (starting with "#CHROM") does not '
"start with required prefix {}".format("\t".join(REQUIRE_SAMPLE_HEADER))
)
|
Peform additional check on samples line
|
def user_lookup(self, ids, id_type="user_id"):
"""
A generator that returns users for supplied user ids, screen_names,
or an iterator of user_ids of either. Use the id_type to indicate
which you are supplying (user_id or screen_name)
"""
if id_type not in ['user_id', 'screen_name']:
raise RuntimeError("id_type must be user_id or screen_name")
if not isinstance(ids, types.GeneratorType):
ids = iter(ids)
# TODO: this is similar to hydrate, maybe they could share code?
lookup_ids = []
def do_lookup():
ids_str = ",".join(lookup_ids)
log.info("looking up users %s", ids_str)
url = 'https://api.twitter.com/1.1/users/lookup.json'
params = {id_type: ids_str}
try:
resp = self.get(url, params=params, allow_404=True)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.warning("no users matching %s", ids_str)
raise e
return resp.json()
for id in ids:
lookup_ids.append(id.strip())
if len(lookup_ids) == 100:
for u in do_lookup():
yield u
lookup_ids = []
if len(lookup_ids) > 0:
for u in do_lookup():
yield u
|
A generator that returns users for supplied user ids, screen_names,
or an iterator of user_ids of either. Use the id_type to indicate
which you are supplying (user_id or screen_name)
|
def _generate_type_code_query(self, value):
"""Generate type-code queries.
Notes:
If the value of the type-code query exists in `TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING, then we
query the specified field, along with the given value according to the mapping.
See: https://github.com/inspirehep/inspire-query-parser/issues/79
Otherwise, we query both ``document_type`` and ``publication_info``.
"""
mapping_for_value = self.TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING.get(value, None)
if mapping_for_value:
return generate_match_query(*mapping_for_value, with_operator_and=True)
else:
return {
'bool': {
'minimum_should_match': 1,
'should': [
generate_match_query('document_type', value, with_operator_and=True),
generate_match_query('publication_type', value, with_operator_and=True),
]
}
}
|
Generate type-code queries.
Notes:
If the value of the type-code query exists in `TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING, then we
query the specified field, along with the given value according to the mapping.
See: https://github.com/inspirehep/inspire-query-parser/issues/79
Otherwise, we query both ``document_type`` and ``publication_info``.
|
def convert_rect(self, rect):
'''
Converts the relative position of @rect into an absolute position.To be
used for event considerations, blitting is handled directly by the
Container().
'''
return self.container.convert_rect(rect.move(self.pos))
|
Converts the relative position of @rect into an absolute position.To be
used for event considerations, blitting is handled directly by the
Container().
|
def inspiral_range(psd, snr=8, mass1=1.4, mass2=1.4, fmin=None, fmax=None,
horizon=False):
"""Calculate the inspiral sensitive distance from a GW strain PSD
The method returns the distance (in megaparsecs) to which an compact
binary inspiral with the given component masses would be detectable
given the instrumental PSD. The calculation is as defined in:
https://dcc.ligo.org/LIGO-T030276/public
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
fmin : `float`, optional
the lower frequency cut-off of the integral, default: `psd.df`
fmax : `float`, optional
the maximum frequency limit of the integral, defaults to
innermost stable circular orbit (ISCO) frequency
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
range : `~astropy.units.Quantity`
the calculated inspiral range [Mpc]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`inspiral_range`:
>>> from gwpy.astro import inspiral_range
>>> r = inspiral_range(hoff, fmin=30)
>>> print(r)
70.4612102889 Mpc
"""
mass1 = units.Quantity(mass1, 'solMass').to('kg')
mass2 = units.Quantity(mass2, 'solMass').to('kg')
mtotal = mass1 + mass2
# compute ISCO
fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to('Hz')
# format frequency limits
fmax = units.Quantity(fmax or fisco, 'Hz')
if fmax > fisco:
warnings.warn("Upper frequency bound greater than %s-%s ISCO "
"frequency of %s, using ISCO" % (mass1, mass2, fisco))
fmax = fisco
if fmin is None:
fmin = psd.df # avoid using 0 as lower limit
fmin = units.Quantity(fmin, 'Hz')
# integrate
f = psd.frequencies.to('Hz')
condition = (f >= fmin) & (f < fmax)
integrand = inspiral_range_psd(psd[condition], snr=snr, mass1=mass1,
mass2=mass2, horizon=horizon)
result = units.Quantity(
integrate.trapz(integrand.value, f.value[condition]),
unit=integrand.unit * units.Hertz)
return (result ** (1/2.)).to('Mpc')
|
Calculate the inspiral sensitive distance from a GW strain PSD
The method returns the distance (in megaparsecs) to which an compact
binary inspiral with the given component masses would be detectable
given the instrumental PSD. The calculation is as defined in:
https://dcc.ligo.org/LIGO-T030276/public
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
fmin : `float`, optional
the lower frequency cut-off of the integral, default: `psd.df`
fmax : `float`, optional
the maximum frequency limit of the integral, defaults to
innermost stable circular orbit (ISCO) frequency
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
range : `~astropy.units.Quantity`
the calculated inspiral range [Mpc]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`inspiral_range`:
>>> from gwpy.astro import inspiral_range
>>> r = inspiral_range(hoff, fmin=30)
>>> print(r)
70.4612102889 Mpc
|
def do_stack(self,args):
"""Go to the specified stack. stack -h for detailed help."""
parser = CommandArgumentParser("stack")
parser.add_argument(dest='stack',help='stack index or name');
args = vars(parser.parse_args(args))
print "loading stack {}".format(args['stack'])
try:
index = int(args['stack'])
stackSummary = self.wrappedStack['resourcesByTypeIndex']['AWS::CloudFormation::Stack'][index]
except ValueError:
stackSummary = self.wrappedStack['resourcesByTypeName']['AWS::CloudFormation::Stack'][args['stack']]
self.stackResource(stackSummary.stack_name,stackSummary.logical_id)
|
Go to the specified stack. stack -h for detailed help.
|
def all_instr(self, start, end, instr, target=None, include_beyond_target=False):
"""
Find all `instr` in the block from start to end.
`instr` is any Python opcode or a list of opcodes
If `instr` is an opcode with a target (like a jump), a target
destination can be specified which must match precisely.
Return a list with indexes to them or [] if none found.
"""
code = self.code
assert(start >= 0 and end <= len(code))
try:
None in instr
except:
instr = [instr]
result = []
extended_arg = 0
for offset in self.op_range(start, end):
op = code[offset]
if op == self.opc.EXTENDED_ARG:
arg = code2num(code, offset+1) | extended_arg
extended_arg = extended_arg_val(self.opc, arg)
continue
if op in instr:
if target is None:
result.append(offset)
else:
t = self.get_target(offset, extended_arg)
if include_beyond_target and t >= target:
result.append(offset)
elif t == target:
result.append(offset)
pass
pass
pass
extended_arg = 0
pass
return result
|
Find all `instr` in the block from start to end.
`instr` is any Python opcode or a list of opcodes
If `instr` is an opcode with a target (like a jump), a target
destination can be specified which must match precisely.
Return a list with indexes to them or [] if none found.
|
def update_config(self, d):
"""
Updates the config object.
:param d: dict
"""
for key, value in d.items():
if hasattr(self, key):
if key == "requirements":
items, value = value, []
for item in items:
if isinstance(item, basestring):
req = RequirementConfig(path=item)
elif isinstance(item, dict):
path, item = item.popitem()
req = RequirementConfig(
path=path,
pin=item.get("pin", None),
compile=item.get("compile", False),
update=item.get("update", Config.UPDATE_ALL)
)
value.append(req)
# add constraint requirement files to config
if req.compile:
for spec in req.compile.specs:
value.append(RequirementConfig(path=spec, pin=False))
elif key == "assignees":
# assignees can be a string or a list. If it's a string, convert it to a list
# to make things consistent
if isinstance(value, basestring):
value = [value, ]
elif key == 'gitlab':
value = GitlabConfig(**value)
elif key == 'pr_prefix':
# make sure that pr prefixes don't contain a PIPE
if "|" in value:
continue
# cast ints and floats to str
if isinstance(value, (int, float)) and not isinstance(value, bool):
value = str(value)
setattr(self, key, value)
|
Updates the config object.
:param d: dict
|
def list_all_zip_codes_geo_zones(cls, **kwargs):
"""List ZipCodesGeoZones
Return a list of ZipCodesGeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_zip_codes_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[ZipCodesGeoZone]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_zip_codes_geo_zones_with_http_info(**kwargs)
else:
(data) = cls._list_all_zip_codes_geo_zones_with_http_info(**kwargs)
return data
|
List ZipCodesGeoZones
Return a list of ZipCodesGeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_zip_codes_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[ZipCodesGeoZone]
If the method is called asynchronously,
returns the request thread.
|
def _register_elements(self, elements):
""" Takes elements from the metadata class and creates a base model for all backend models .
"""
self.elements = elements
for key, obj in elements.items():
obj.contribute_to_class(self.metadata, key)
# Create the common Django fields
fields = {}
for key, obj in elements.items():
if obj.editable:
field = obj.get_field()
if not field.help_text:
if key in self.bulk_help_text:
field.help_text = self.bulk_help_text[key]
fields[key] = field
# 0. Abstract base model with common fields
base_meta = type('Meta', (), self.original_meta)
class BaseMeta(base_meta):
abstract = True
app_label = 'seo'
fields['Meta'] = BaseMeta
# Do we need this?
fields['__module__'] = __name__ #attrs['__module__']
self.MetadataBaseModel = type('%sBase' % self.name, (models.Model,), fields)
|
Takes elements from the metadata class and creates a base model for all backend models .
|
def tlog_inv(y, th=1, r=_display_max, d=_l_mmax):
"""
Inverse truncated log10 transform.
Values
Parameters
----------
y : num | num iterable
values to be transformed.
th : num
Inverse values below th are transormed to th.
Must be > positive.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
tlog_inv(r) = 10**d
Returns
-------
Array of transformed values.
"""
if th <= 0:
raise ValueError('Threshold value must be positive. %s given.' % th)
x = 10 ** (y * 1. * d / r)
try:
x[x < th] = th
except TypeError:
if x < th: x = th
return x
|
Inverse truncated log10 transform.
Values
Parameters
----------
y : num | num iterable
values to be transformed.
th : num
Inverse values below th are transormed to th.
Must be > positive.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
tlog_inv(r) = 10**d
Returns
-------
Array of transformed values.
|
def adjustWPPointer(self):
'''Adjust the position and orientation of
the waypoint pointer.'''
self.headingWPText.set_size(self.fontSize)
headingRotate = mpl.transforms.Affine2D().rotate_deg_around(0.0,0.0,-self.wpBearing+self.heading)+self.axes.transData
self.headingWPText.set_transform(headingRotate)
angle = self.wpBearing - self.heading
if angle < 0:
angle += 360
if (angle > 90) and (angle < 270):
headRot = angle-180
else:
headRot = angle
self.headingWPText.set_rotation(-headRot)
self.headingWPTri.set_transform(headingRotate)
self.headingWPText.set_text('%.f' % (angle))
|
Adjust the position and orientation of
the waypoint pointer.
|
def get_summarizer(self, name):
'''
import summarizers on-demand
'''
if name in self.summarizers:
pass
elif name == 'lexrank':
from . import lexrank
self.summarizers[name] = lexrank.summarize
elif name == 'mcp':
from . import mcp_summ
self.summarizers[name] = mcp_summ.summarize
return self.summarizers[name]
|
import summarizers on-demand
|
def _get_object_from_python_path(python_path):
"""Method that will fetch a Marshmallow schema from a path to it.
Args:
python_path (str): The string path to the Marshmallow schema.
Returns:
marshmallow.Schema: The schema matching the provided path.
Raises:
TypeError: This is raised if the specified object isn't
a Marshmallow schema.
"""
# Dissect the path
python_path = python_path.split('.')
module_path = python_path[:-1]
object_class = python_path[-1]
if isinstance(module_path, list):
module_path = '.'.join(module_path)
# Grab the object
module = import_module(module_path)
schema = getattr(module, object_class)
if isclass(schema):
schema = schema()
return schema
|
Method that will fetch a Marshmallow schema from a path to it.
Args:
python_path (str): The string path to the Marshmallow schema.
Returns:
marshmallow.Schema: The schema matching the provided path.
Raises:
TypeError: This is raised if the specified object isn't
a Marshmallow schema.
|
def _Reg2Py(data, size, data_type):
"""Converts a Windows Registry value to the corresponding Python data type."""
if data_type == winreg.REG_DWORD:
if size == 0:
return 0
# DWORD is an unsigned 32-bit integer, see:
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-dtyp/262627d8-3418-4627-9218-4ffe110850b2
return ctypes.cast(data, ctypes.POINTER(ctypes.c_uint32)).contents.value
elif data_type == winreg.REG_SZ or data_type == winreg.REG_EXPAND_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00")
elif data_type == winreg.REG_MULTI_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00").split(u"\x00")
else:
if size == 0:
return None
return ctypes.string_at(data, size)
|
Converts a Windows Registry value to the corresponding Python data type.
|
def get_instance(self, payload):
"""
Build an instance of ReservationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
)
|
Build an instance of ReservationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
|
def fit(self, X):
"""Compute the Robust Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data of one subject.
"""
logger.info('Starting RSRM')
# Check that the regularizer value is positive
if 0.0 >= self.lam:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough timepoints to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment timepoints "
"between subjects.")
# Create a new random state
self.random_state_ = np.random.RandomState(self.rand_seed)
# Run RSRM
self.w_, self.r_, self.s_ = self._rsrm(X)
return self
|
Compute the Robust Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data of one subject.
|
def read_saved_screenshot_to_array(self, screen_id, bitmap_format):
"""Screenshot in requested format is retrieved to an array of bytes.
in screen_id of type int
Saved guest screen to read from.
in bitmap_format of type :class:`BitmapFormat`
The requested format.
out width of type int
Image width.
out height of type int
Image height.
return data of type str
Array with resulting image data.
"""
if not isinstance(screen_id, baseinteger):
raise TypeError("screen_id can only be an instance of type baseinteger")
if not isinstance(bitmap_format, BitmapFormat):
raise TypeError("bitmap_format can only be an instance of type BitmapFormat")
(data, width, height) = self._call("readSavedScreenshotToArray",
in_p=[screen_id, bitmap_format])
return (data, width, height)
|
Screenshot in requested format is retrieved to an array of bytes.
in screen_id of type int
Saved guest screen to read from.
in bitmap_format of type :class:`BitmapFormat`
The requested format.
out width of type int
Image width.
out height of type int
Image height.
return data of type str
Array with resulting image data.
|
def modulate(data):
'''
Generate Bell 202 AFSK samples for the given symbol generator
Consumes raw wire symbols and produces the corresponding AFSK samples.
'''
seconds_per_sample = 1.0 / audiogen.sampler.FRAME_RATE
phase, seconds, bits = 0, 0, 0
# construct generators
clock = (x / BAUD_RATE for x in itertools.count(1))
tones = (MARK_HZ if bit else SPACE_HZ for bit in data)
for boundary, frequency in itertools.izip(clock, tones):
# frequency of current symbol is determined by how much
# we advance the signal's phase in each audio frame
phase_change_per_sample = TWO_PI / (audiogen.sampler.FRAME_RATE / frequency)
# produce samples for the current symbol
# until we reach the next clock boundary
while seconds < boundary:
yield math.sin(phase)
seconds += seconds_per_sample
phase += phase_change_per_sample
if phase > TWO_PI:
phase -= TWO_PI
bits += 1
logger.debug("bits = %d, time = %.7f ms, expected time = %.7f ms, error = %.7f ms, baud rate = %.6f Hz" \
% (bits, 1000 * seconds, 1000 * bits / BAUD_RATE, 1000 * (seconds - bits / BAUD_RATE), bits / seconds))
|
Generate Bell 202 AFSK samples for the given symbol generator
Consumes raw wire symbols and produces the corresponding AFSK samples.
|
def assemble(
iterable, patterns=None, minimum_items=2, case_sensitive=True,
assume_padded_when_ambiguous=False
):
'''Assemble items in *iterable* into discreet collections.
*patterns* may be specified as a list of regular expressions to limit
the returned collection possibilities. Use this when interested in
collections that only match specific patterns. Each pattern must contain
the expression from :py:data:`DIGITS_PATTERN` exactly once.
A selection of common expressions are available in :py:data:`PATTERNS`.
.. note::
If a pattern is supplied as a string it will be automatically compiled
to a :py:class:`re.RegexObject` instance for convenience.
When *patterns* is not specified, collections are formed by examining all
possible groupings of the items in *iterable* based around common numerical
components.
*minimum_items* dictates the minimum number of items a collection must have
in order to be included in the result. The default is 2, filtering out
single item collections.
If *case_sensitive* is False, then items will be treated as part of the same
collection when they only differ in casing. To avoid ambiguity, the
resulting collection will always be lowercase. For example, "item.0001.dpx"
and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx".
.. note::
Any compiled *patterns* will also respect the set case sensitivity.
For certain collections it may be ambiguous whether they are padded or not.
For example, 1000-1010 can be considered either an unpadded collection or a
four padded collection. By default, Clique is conservative and assumes that
the collection is unpadded. To change this behaviour, set
*assume_padded_when_ambiguous* to True and any ambiguous collection will have
a relevant padding set.
.. note::
*assume_padded_when_ambiguous* has no effect on collections that are
unambiguous. For example, 1-100 will always be considered unpadded
regardless of the *assume_padded_when_ambiguous* setting.
Return tuple of two lists (collections, remainder) where 'collections' is a
list of assembled :py:class:`~clique.collection.Collection` instances and
'remainder' is a list of items that did not belong to any collection.
'''
collection_map = defaultdict(set)
collections = []
remainder = []
# Compile patterns.
flags = 0
if not case_sensitive:
flags |= re.IGNORECASE
compiled_patterns = []
if patterns is not None:
if not patterns:
return collections, list(iterable)
for pattern in patterns:
if isinstance(pattern, basestring):
compiled_patterns.append(re.compile(pattern, flags=flags))
else:
compiled_patterns.append(pattern)
else:
compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags))
# Process iterable.
for item in iterable:
matched = False
for pattern in compiled_patterns:
for match in pattern.finditer(item):
index = match.group('index')
head = item[:match.start('index')]
tail = item[match.end('index'):]
if not case_sensitive:
head = head.lower()
tail = tail.lower()
padding = match.group('padding')
if padding:
padding = len(index)
else:
padding = 0
key = (head, tail, padding)
collection_map[key].add(int(index))
matched = True
if not matched:
remainder.append(item)
# Form collections.
merge_candidates = []
for (head, tail, padding), indexes in collection_map.items():
collection = Collection(head, tail, padding, indexes)
collections.append(collection)
if collection.padding == 0:
merge_candidates.append(collection)
# Merge together collections that align on padding boundaries. For example,
# 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only
# indexes within the padding width limit are merged. If a collection is
# entirely merged into another then it will not be included as a separate
# collection in the results.
fully_merged = []
for collection in collections:
if collection.padding == 0:
continue
for candidate in merge_candidates:
if (
candidate.head == collection.head and
candidate.tail == collection.tail
):
merged_index_count = 0
for index in candidate.indexes:
if len(str(abs(index))) == collection.padding:
collection.indexes.add(index)
merged_index_count += 1
if merged_index_count == len(candidate.indexes):
fully_merged.append(candidate)
# Filter out fully merged collections.
collections = [collection for collection in collections
if collection not in fully_merged]
# Filter out collections that do not have at least as many indexes as
# minimum_items. In addition, add any members of a filtered collection,
# which are not members of an unfiltered collection, to the remainder.
filtered = []
remainder_candidates = []
for collection in collections:
if len(collection.indexes) >= minimum_items:
filtered.append(collection)
else:
for member in collection:
remainder_candidates.append(member)
for candidate in remainder_candidates:
# Check if candidate has already been added to remainder to avoid
# duplicate entries.
if candidate in remainder:
continue
has_membership = False
for collection in filtered:
if candidate in collection:
has_membership = True
break
if not has_membership:
remainder.append(candidate)
# Set padding for all ambiguous collections according to the
# assume_padded_when_ambiguous setting.
if assume_padded_when_ambiguous:
for collection in filtered:
if (
not collection.padding and collection.indexes
):
indexes = list(collection.indexes)
first_index_width = len(str(indexes[0]))
last_index_width = len(str(indexes[-1]))
if first_index_width == last_index_width:
collection.padding = first_index_width
return filtered, remainder
|
Assemble items in *iterable* into discreet collections.
*patterns* may be specified as a list of regular expressions to limit
the returned collection possibilities. Use this when interested in
collections that only match specific patterns. Each pattern must contain
the expression from :py:data:`DIGITS_PATTERN` exactly once.
A selection of common expressions are available in :py:data:`PATTERNS`.
.. note::
If a pattern is supplied as a string it will be automatically compiled
to a :py:class:`re.RegexObject` instance for convenience.
When *patterns* is not specified, collections are formed by examining all
possible groupings of the items in *iterable* based around common numerical
components.
*minimum_items* dictates the minimum number of items a collection must have
in order to be included in the result. The default is 2, filtering out
single item collections.
If *case_sensitive* is False, then items will be treated as part of the same
collection when they only differ in casing. To avoid ambiguity, the
resulting collection will always be lowercase. For example, "item.0001.dpx"
and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx".
.. note::
Any compiled *patterns* will also respect the set case sensitivity.
For certain collections it may be ambiguous whether they are padded or not.
For example, 1000-1010 can be considered either an unpadded collection or a
four padded collection. By default, Clique is conservative and assumes that
the collection is unpadded. To change this behaviour, set
*assume_padded_when_ambiguous* to True and any ambiguous collection will have
a relevant padding set.
.. note::
*assume_padded_when_ambiguous* has no effect on collections that are
unambiguous. For example, 1-100 will always be considered unpadded
regardless of the *assume_padded_when_ambiguous* setting.
Return tuple of two lists (collections, remainder) where 'collections' is a
list of assembled :py:class:`~clique.collection.Collection` instances and
'remainder' is a list of items that did not belong to any collection.
|
def initialize_segment_register_x64(self, state, concrete_target):
"""
Set the gs register in the angr to the value of the fs register in the concrete process
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return: None
"""
_l.debug("Synchronizing gs segment register")
state.regs.gs = self._read_gs_register_x64(concrete_target)
|
Set the gs register in the angr to the value of the fs register in the concrete process
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return: None
|
def get_rlzs_by_gsim(oqparam):
"""
Return an ordered dictionary gsim -> [realization index]. Work for
gsim logic trees with a single tectonic region type.
"""
cinfo = source.CompositionInfo.fake(get_gsim_lt(oqparam))
ra = cinfo.get_rlzs_assoc()
dic = {}
for rlzi, gsim_by_trt in enumerate(ra.gsim_by_trt):
dic[gsim_by_trt['*']] = [rlzi]
return dic
|
Return an ordered dictionary gsim -> [realization index]. Work for
gsim logic trees with a single tectonic region type.
|
def create(self, date_at=None, minutes=0, note='', user_id=None,
project_id=None, service_id=None):
""" date_at - date of time entry. Format YYYY-MM-DD. default: today
minutes - default: 0
note - default: '' (empty string)
user_id - default: actual user id (only admin users can edit this)
project_id - default: None
service_id - default: None
"""
keywords = {
'date_at': date_at,
'minutes': minutes,
'note': note,
'user_id': user_id,
'project_id': project_id,
'service_id': service_id,
}
foo = dict()
foo['time_entry'] = keywords
path = partial(_path, self.adapter)
path = _path(self.adapter)
return self._post(path, **foo)
|
date_at - date of time entry. Format YYYY-MM-DD. default: today
minutes - default: 0
note - default: '' (empty string)
user_id - default: actual user id (only admin users can edit this)
project_id - default: None
service_id - default: None
|
def resource_get(self, resource_name):
"""
Return resource info
:param resource_name: Resource name as returned by resource_get_list()
:type resource_name: str
:return: Resource information (empty if not found)
name: Resource name
hash: Resource hash
path: Path to resource
checked: Last time information was updated
:rtype: dict[str, str]
"""
try:
with self._resource_lock:
res = self._resources[resource_name]
except KeyError:
return {}
return res
|
Return resource info
:param resource_name: Resource name as returned by resource_get_list()
:type resource_name: str
:return: Resource information (empty if not found)
name: Resource name
hash: Resource hash
path: Path to resource
checked: Last time information was updated
:rtype: dict[str, str]
|
def lambda_handler(event, context):
"""Main handler."""
auth = check_auth(event, role=["admin"])
if not auth['success']:
return auth
table = boto3.resource("dynamodb").Table(os.environ['database'])
results = table.scan()
output = {'success': True, 'events': list(), 'eventsCount': 0}
for item in results.get('Items', list()):
output['events'].append(item)
output['eventsCount'] = len(output['events'])
return output
|
Main handler.
|
def user_segment(self):
"""
| Comment: The id of the user segment to which this section belongs
"""
if self.api and self.user_segment_id:
return self.api._get_user_segment(self.user_segment_id)
|
| Comment: The id of the user segment to which this section belongs
|
def parse_sv_frequencies(variant):
"""Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict)
"""
frequency_keys = [
'clingen_cgh_benignAF',
'clingen_cgh_benign',
'clingen_cgh_pathogenicAF',
'clingen_cgh_pathogenic',
'clingen_ngi',
'clingen_ngiAF',
'swegen',
'swegenAF',
'decipherAF',
'decipher'
]
sv_frequencies = {}
for key in frequency_keys:
value = variant.INFO.get(key, 0)
if 'AF' in key:
value = float(value)
else:
value = int(value)
if value > 0:
sv_frequencies[key] = value
return sv_frequencies
|
Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict)
|
def iter_issue_events(self, number=-1, etag=None):
"""Iterates over issue events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
"""
url = self._build_url('issues', 'events', base_url=self._api)
return self._iter(int(number), url, IssueEvent, etag=etag)
|
Iterates over issue events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
|
def usage(ecode, msg=''):
"""
Print usage and msg and exit with given code.
"""
print >> sys.stderr, __doc__
if msg:
print >> sys.stderr, msg
sys.exit(ecode)
|
Print usage and msg and exit with given code.
|
def set_coords(self, names, inplace=None):
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
See also
--------
Dataset.swap_dims
"""
# TODO: allow inserting new coordinates with this method, like
# DataFrame.set_index?
# nb. check in self._variables, not self.data_vars to insure that the
# operation is idempotent
inplace = _check_inplace(inplace)
if isinstance(names, str):
names = [names]
self._assert_all_in_dataset(names)
obj = self if inplace else self.copy()
obj._coord_names.update(names)
return obj
|
Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
See also
--------
Dataset.swap_dims
|
def add_consumer_tag(self, tag):
"""Add a Consumer tag.
:param str tag: Consumer tag.
:return:
"""
if not is_string(tag):
raise AMQPChannelError('consumer tag needs to be a string')
if tag not in self._consumer_tags:
self._consumer_tags.append(tag)
|
Add a Consumer tag.
:param str tag: Consumer tag.
:return:
|
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
|
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.