text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def JP(cpu, target):
"""
Jumps short if parity.
:param cpu: current CPU.
:param target: destination operand.
"""
cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.PF, target.read(), cpu.PC) | [
"def",
"JP",
"(",
"cpu",
",",
"target",
")",
":",
"cpu",
".",
"PC",
"=",
"Operators",
".",
"ITEBV",
"(",
"cpu",
".",
"address_bit_size",
",",
"cpu",
".",
"PF",
",",
"target",
".",
"read",
"(",
")",
",",
"cpu",
".",
"PC",
")"
] | 29 | 15.75 |
def create_file(self, path, fp, force=False, update=False):
"""Store a new file at `path` in this storage.
The contents of the file descriptor `fp` (opened in 'rb' mode)
will be uploaded to `path` which is the full path at
which to store the file.
To force overwrite of an existing file, set `force=True`.
To overwrite an existing file only if the files differ, set `update=True`
"""
if 'b' not in fp.mode:
raise ValueError("File has to be opened in binary mode.")
# all paths are assumed to be absolute
path = norm_remote_path(path)
directory, fname = os.path.split(path)
directories = directory.split(os.path.sep)
# navigate to the right parent object for our file
parent = self
for directory in directories:
# skip empty directory names
if directory:
parent = parent.create_folder(directory, exist_ok=True)
url = parent._new_file_url
# When uploading a large file (>a few MB) that already exists
# we sometimes get a ConnectionError instead of a status == 409.
connection_error = False
# peek at the file to check if it is an empty file which needs special
# handling in requests. If we pass a file like object to data that
# turns out to be of length zero then no file is created on the OSF.
# See: https://github.com/osfclient/osfclient/pull/135
if file_empty(fp):
response = self._put(url, params={'name': fname}, data=b'')
else:
try:
response = self._put(url, params={'name': fname}, data=fp)
except ConnectionError:
connection_error = True
if connection_error or response.status_code == 409:
if not force and not update:
# one-liner to get file size from file pointer from
# https://stackoverflow.com/a/283719/2680824
file_size_bytes = get_local_file_size(fp)
large_file_cutoff = 2**20 # 1 MB in bytes
if connection_error and file_size_bytes < large_file_cutoff:
msg = (
"There was a connection error which might mean {} " +
"already exists. Try again with the `--force` flag " +
"specified."
).format(path)
raise RuntimeError(msg)
else:
# note in case of connection error, we are making an inference here
raise FileExistsError(path)
else:
# find the upload URL for the file we are trying to update
for file_ in self.files:
if norm_remote_path(file_.path) == path:
if not force:
if checksum(path) == file_.hashes.get('md5'):
# If the hashes are equal and force is False,
# we're done here
break
# in the process of attempting to upload the file we
# moved through it -> reset read position to beginning
# of the file
fp.seek(0)
file_.update(fp)
break
else:
raise RuntimeError("Could not create a new file at "
"({}) nor update it.".format(path)) | [
"def",
"create_file",
"(",
"self",
",",
"path",
",",
"fp",
",",
"force",
"=",
"False",
",",
"update",
"=",
"False",
")",
":",
"if",
"'b'",
"not",
"in",
"fp",
".",
"mode",
":",
"raise",
"ValueError",
"(",
"\"File has to be opened in binary mode.\"",
")",
... | 45.282051 | 20.987179 |
def sort(self):
"""Sort the data so that x is monotonically increasing and contains
no duplicates.
"""
if 'wavelength' in self.rsr:
# Only one detector apparently:
self.rsr['wavelength'], self.rsr['response'] = \
sort_data(self.rsr['wavelength'], self.rsr['response'])
else:
for detector_name in self.rsr:
(self.rsr[detector_name]['wavelength'],
self.rsr[detector_name]['response']) = \
sort_data(self.rsr[detector_name]['wavelength'],
self.rsr[detector_name]['response']) | [
"def",
"sort",
"(",
"self",
")",
":",
"if",
"'wavelength'",
"in",
"self",
".",
"rsr",
":",
"# Only one detector apparently:",
"self",
".",
"rsr",
"[",
"'wavelength'",
"]",
",",
"self",
".",
"rsr",
"[",
"'response'",
"]",
"=",
"sort_data",
"(",
"self",
".... | 45.285714 | 15.428571 |
def namedspace(typename, required_fields=(), optional_fields=(), mutable_fields=(),
default_values=frozendict(), default_value_factories=frozendict(),
return_none=False):
"""Builds a new class that encapsulates a namespace and provides
various ways to access it.
The typename argument is required and is the name of the
namedspace class that will be generated.
The required_fields and optional_fields arguments can be a string
or sequence of strings and together specify the fields that
instances of the namedspace class have.
Values for the required fields must be provided somehow when the
instance is created. Values for optional fields may be provided
later, or maybe not at all.
If an optional field is queried before its value has been set,
an AttributeError will be raised. This behavior can be altered
to cause None to be returned instead by setting the return_none
keyword argument to True.
The mutable_fields argument specifies which fields will be mutable,
if any. By default, all fields are immutable and all instances are
hashable and can be used as dictionary keys. If any fields are set
as mutable, all instances are not hashable and cannot be used as
dictionary keys.
The default_values mapping provides simple default values for the
fields.
The default_value_factories mapping provides a more flexible, but
more complex, mechanism for providing default values. The value of
each item is a callable that takes a single argument, the
namedspace instance, and returns the default value for the field.
The default_values_factories mapping is only consulted if there
is no default value for the field in the default_values mapping.
Here is a simple example, using only the required fields argument:
>>> SimpleNS = namedspace("SimpleNS", ("id", "name", "description"))
>>> SimpleNS
<class 'namedspace.SimpleNS'>
There are built-in properties to access collections and iterators
associated with the namespace class.
>>> SimpleNS._field_names
('id', 'name', 'description')
>>> tuple(SimpleNS._field_names_iter)
('id', 'name', 'description')
Once the class has been created, it can be instantiated like any
other class. However, a value for all of the required fields must
be provided.
>>> simple_ns = SimpleNS(id=1, description="Simple Description")
Traceback (most recent call last):
<snip/>
ValueError: A value for field 'name' is required.
>>> simple_ns = SimpleNS(id=1, name="Simple Name", description="Simple Description")
>>> simple_ns
SimpleNS(id=1, name='Simple Name', description='Simple Description')
An instance of a namedspace class provides standard attribute
access to its fields.
>>> simple_ns.id
1
>>> simple_ns.name
'Simple Name'
>>> simple_ns.description
'Simple Description'
In addition to standard attribute access, instances of a namedspace
class implement a MutableMapping interface.
>>> 'id' in simple_ns
True
>>> for field_name in simple_ns:
... print field_name
id
name
description
>>> len(simple_ns)
3
>>> simple_ns["id"]
1
>>> simple_ns["name"]
'Simple Name'
>>> simple_ns["description"]
'Simple Description'
There are built-in properties to access collections and iterators
associated with the namespace.
The namespace encapsulated by a namedspace class is stored in an
OrderedDict, so order of the collections is the same as the order
that the fields were specified.
All of these properties use the standard "non-public" naming
convention in order to not pollute the public namespace.
>>> simple_ns._field_names
('id', 'name', 'description')
>>> tuple(simple_ns._field_names_iter)
('id', 'name', 'description')
>>> simple_ns._field_values
(1, 'Simple Name', 'Simple Description')
>>> tuple(simple_ns._field_values_iter)
(1, 'Simple Name', 'Simple Description')
>>> simple_ns._field_items
[('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')]
>>> list(simple_ns._field_items_iter)
[('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')]
>>> simple_ns._as_dict
OrderedDict([('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')])
Here is a more complex example, using most of the other arguments:
>>> from itertools import count
>>> ComplexNS = namedspace("ComplexNS", "id", optional_fields=("name", "description", "extra"),
... mutable_fields=("description", "extra"), default_values={"description": "None available"},
... default_value_factories={"id": lambda self, counter=count(start=1): counter.next(),
... "name": lambda self: "Name for id={id}".format(id=self.id)})
>>> complex_ns1 = ComplexNS()
>>> complex_ns1.id
1
The value of 1 was automatically assigned by the
default_value_factory for the 'id' field, in this case a lambda
closure that hooks up an instance of itertools.count.
>>> complex_ns1.name
'Name for id=1'
This value was also generated by a default value factory. In this
case, the factory for the 'name' attribute uses the value of the
'id' attribute to compute the default value.
>>> complex_ns1.description
'None available'
This value came from the default_values mapping.
The description field was set as a mutable field, which allows
it to be modified.
>>> complex_ns1.description = "Some fancy description"
>>> complex_ns1.description
'Some fancy description'
Its value can also be deleted.
>>> del complex_ns1.description
>>> complex_ns1.description
'None available'
Since its modified value was deleted, and it has a default value,
it has reverted to its default value.
The extra field is a valid field in this namedspace, but it has not
yet been assigned a value and does not have a default.
>>> complex_ns1.extra
Traceback (most recent call last):
<snip/>
AttributeError: "Field 'extra' does not yet exist in this ComplexNS namedspace instance."
Sometimes, having an exception raised if an optional field is
missing, and being forced to handle it, is annoying. A namedspace
class can be configured at creation time to return None instead of
raising exceptions for optional fields by setting the `return_none`
parameter to `True`. Here is a trivial example:
>>> QuietNS = namedspace("QuietNS", optional_fields=("might_be_none",), return_none=True)
>>> quiet_ns1 = QuietNS(might_be_none="Nope, not this time")
>>> quiet_ns1.might_be_none
'Nope, not this time'
>>> quiet_ns2 = QuietNS()
>>> quiet_ns2.might_be_none
>>>
Having the namedspace quietly return `None` makes sense in some
situations. But be careful. Understand the full implications of
this alternate behavior on the code that uses it. Subtle data-
dependent bugs can be introduced by this behavior, which is why it
is not enabled by default.
Now, back to our "complex" example.
Since the 'extra' field is one of the mutable fields, we can give it a value.
>>> complex_ns1.extra = "Lasts a long, long time"
>>> complex_ns1.extra
'Lasts a long, long time'
Only fields that have been declared as either required or optional
are allowed.
>>> complex_ns1.some_other_field = "some other value"
Traceback (most recent call last):
<snip/>
FieldNameError: "Field 'some_other_field' does not exist in ComplexNS namedspace."
Finally, to illustrate that our counter is working as it should, if
we instantiate another instance, our id field will get the next
counter value.
>>> complex_ns2 = ComplexNS()
>>> complex_ns2.id
2
A common use case for a namedspace class is as a base class for
another custom class that has additional members such as properties
and methods. This way, the custom class gets all of the namedspace
behavior through declarative configuration, instead of having
to re-define that behavior imperatively.
The following is an example where one of the required fields is
generated at instantiation time, and the values for the two
optional fields are calculated values provided by properties in
the subclass.
>>> from collections import Counter
>>> class Widget(namedspace("_Widget", ("mfg_code", "model_code", "serial_number"), optional_fields=("sku", "pk"),
... return_none=True)):
... _sn_map = Counter()
... def __init__(self, *args, **kwargs):
... sn_key = (kwargs["mfg_code"], kwargs["model_code"])
... self._sn_map[sn_key] += 1
... kwargs["serial_number"] = "{:010}".format(self._sn_map[sn_key])
... super(Widget, self).__init__(*args, **kwargs)
... @property
... def sku(self):
... return "{}_{}".format(self.mfg_code, self.model_code)
... @property
... def pk(self):
... return "{}_{}".format(self.sku, self.serial_number)
>>> widget1 = Widget(mfg_code="ACME", model_code="X-500")
>>> widget1
Widget(mfg_code='ACME', model_code='X-500', serial_number='0000000001', sku='ACME_X-500', pk='ACME_X-500_0000000001')
>>> widget1._as_dict
OrderedDict([('mfg_code', 'ACME'), ('model_code', 'X-500'), ('serial_number', '0000000001'), ('sku', 'ACME_X-500'), ('pk', 'ACME_X-500_0000000001')])
>>> widget2 = Widget(mfg_code="ACME", model_code="X-500")
>>> widget2
Widget(mfg_code='ACME', model_code='X-500', serial_number='0000000002', sku='ACME_X-500', pk='ACME_X-500_0000000002')
>>> widget2._as_dict
OrderedDict([('mfg_code', 'ACME'), ('model_code', 'X-500'), ('serial_number', '0000000002'), ('sku', 'ACME_X-500'), ('pk', 'ACME_X-500_0000000002')])
"""
# Initialize the list of arguments that will get put into the
# doc string of the generated class
arg_list_items = []
#
# Validate parameters
#
for arg_name in ("required_fields", "optional_fields", "mutable_fields"):
arg_value = locals()[arg_name]
if isinstance(arg_value, basestring):
arg_value = (arg_value,)
exec "{arg_name} = arg_value".format(arg_name=arg_name)
elif not isinstance(arg_value, Container):
raise ValueError("Value for argument '{arg_name}' must be a string or container of strings.".format(
arg_name=arg_name))
for field_name in arg_value:
if not isinstance(field_name, basestring):
raise ValueError("Items of container argument '{arg_name}' must be strings.".format(arg_name=arg_name))
if len(arg_value) != len(frozenset(arg_value)):
raise ValueError("Value for argument '{arg_name}' contains duplicate fields.".format(
arg_name=arg_name))
arg_list_items.append("{arg_name}={arg_value!r}".format(arg_name=arg_name, arg_value=tuple(arg_value)))
exec "{arg_name}_set = frozenset(arg_value)".format(arg_name=arg_name)
all_fields = tuple(required_fields + optional_fields)
if not all_fields:
raise ValueError("At least one required or optional field must be provided.")
all_fields_set = frozenset(all_fields)
for field_name in mutable_fields:
if field_name not in all_fields_set:
raise ValueError("Mutable field '{field_name}' is not a required or optional field.".format(
field_name=field_name))
for arg_name in ("default_values", "default_value_factories"):
arg_value = locals()[arg_name]
if not isinstance(arg_value, Mapping):
raise ValueError("Value for argument '{arg_name}' must be a mapping.".format(arg_name=arg_name))
default_field_names = frozenset(arg_value.iterkeys())
if not default_field_names.issubset(all_fields_set):
bad_default_field_names = default_field_names - all_fields_set
raise ValueError("Value for argument '{arg_name}' contains invalid field(s) '{field_names}'.".format(
arg_name=arg_name, field_names=", ".join(bad_default_field_names)))
arg_list_items.append("{arg_name}={arg_value!r}".format(arg_name=arg_name, arg_value=dict(arg_value)))
exec "{arg_name} = frozendict(arg_value)".format(arg_name=arg_name)
for field_name, factory in default_value_factories.iteritems():
if not callable(factory):
raise ValueError("Default value factory for '{field_name}' is not callable.".format(field_name=field_name))
# Fill-in the class template
class_definition = _class_template.format(
typename=typename,
arg_list=", ".join(arg_list_items),
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(
__name__='namedspace_{typename}'.format(typename=typename),
all_fields=all_fields,
all_fields_set=all_fields_set,
required_fields_set=locals()["required_fields_set"],
mutable_fields_set=locals()["mutable_fields_set"],
default_values=default_values,
default_value_factories=default_value_factories,
Hashable=Hashable,
MutableMapping=MutableMapping,
OrderedDict=OrderedDict,
return_none=return_none,
NamedspaceMeta=NamedspaceMeta,
)
#
# Code from here down copied verbatim from namedtuple
#
try:
exec class_definition in namespace
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result | [
"def",
"namedspace",
"(",
"typename",
",",
"required_fields",
"=",
"(",
")",
",",
"optional_fields",
"=",
"(",
")",
",",
"mutable_fields",
"=",
"(",
")",
",",
"default_values",
"=",
"frozendict",
"(",
")",
",",
"default_value_factories",
"=",
"frozendict",
"... | 36.528646 | 27.390625 |
def _validate_dependencies_met():
"""
Verifies that PyOpenSSL's package-level dependencies have been met.
Throws `ImportError` if they are not met.
"""
# Method added in `cryptography==1.1`; not available in older versions
from cryptography.x509.extensions import Extensions
if getattr(Extensions, "get_extension_for_class", None) is None:
raise ImportError("'cryptography' module missing required functionality. "
"Try upgrading to v1.3.4 or newer.")
# pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
# attribute is only present on those versions.
from OpenSSL.crypto import X509
x509 = X509()
if getattr(x509, "_x509", None) is None:
raise ImportError("'pyOpenSSL' module missing required functionality. "
"Try upgrading to v0.14 or newer.") | [
"def",
"_validate_dependencies_met",
"(",
")",
":",
"# Method added in `cryptography==1.1`; not available in older versions",
"from",
"cryptography",
".",
"x509",
".",
"extensions",
"import",
"Extensions",
"if",
"getattr",
"(",
"Extensions",
",",
"\"get_extension_for_class\"",
... | 48.333333 | 20.333333 |
def get_infobox(ptree, boxterm="box"):
"""
Returns parse tree template with title containing <boxterm> as dict:
<box> = {<name>: <value>, ...}
If simple transform fails, attempts more general assembly:
<box> = {'boxes': [{<title>: <parts>}, ...],
'count': <len(boxes)>}
"""
boxes = []
for item in lxml.etree.fromstring(ptree).xpath("//template"):
title = item.find('title').text
if title and boxterm in title:
box = template_to_dict(item)
if box:
return box
alt = template_to_dict_alt(item, title)
if alt:
boxes.append(alt)
if boxes:
return {'boxes': boxes, 'count': len(boxes)} | [
"def",
"get_infobox",
"(",
"ptree",
",",
"boxterm",
"=",
"\"box\"",
")",
":",
"boxes",
"=",
"[",
"]",
"for",
"item",
"in",
"lxml",
".",
"etree",
".",
"fromstring",
"(",
"ptree",
")",
".",
"xpath",
"(",
"\"//template\"",
")",
":",
"title",
"=",
"item"... | 26.814815 | 19.185185 |
def tree(path, load_path=None):
'''
Returns recursively the complete tree of a node
CLI Example:
.. code-block:: bash
salt '*' augeas.tree /files/etc/
path
The base of the recursive listing
.. versionadded:: 2016.3.0
load_path
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB.
'''
load_path = _check_load_paths(load_path)
aug = _Augeas(loadpath=load_path)
path = path.rstrip('/') + '/'
match_path = path
return dict([i for i in _recurmatch(match_path, aug)]) | [
"def",
"tree",
"(",
"path",
",",
"load_path",
"=",
"None",
")",
":",
"load_path",
"=",
"_check_load_paths",
"(",
"load_path",
")",
"aug",
"=",
"_Augeas",
"(",
"loadpath",
"=",
"load_path",
")",
"path",
"=",
"path",
".",
"rstrip",
"(",
"'/'",
")",
"+",
... | 23.777778 | 24.222222 |
def isempty(path):
"""Returns True if the given file or directory path is empty.
**Examples**:
::
auxly.filesys.isempty("foo.txt") # Works on files...
auxly.filesys.isempty("bar") # ...or directories!
"""
if op.isdir(path):
return [] == os.listdir(path)
elif op.isfile(path):
return 0 == os.stat(path).st_size
return None | [
"def",
"isempty",
"(",
"path",
")",
":",
"if",
"op",
".",
"isdir",
"(",
"path",
")",
":",
"return",
"[",
"]",
"==",
"os",
".",
"listdir",
"(",
"path",
")",
"elif",
"op",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"0",
"==",
"os",
".",
"s... | 28.615385 | 16.923077 |
def __skeleton_difference(graph, image, boundary_term, spacing):
"""
A skeleton for the calculation of intensity difference based boundary terms.
Iterates over the images dimensions and generates for each an array of absolute
neighbouring voxel :math:`(p, q)` intensity differences :math:`|I_p, I_q|`. These are
then passed to the supplied function :math:`g(\cdot)` for for boundary term
computation. Finally the returned edge weights are added to the graph.
Formally for each edge :math:`(p, q)` of the image, their edge weight is computed as
.. math::
w(p,q) = g(|I_p - I_q|)
,where :math:`g(\cdot)` is the supplied boundary term function.
The boundary term function has to take an array of intensity differences as only
parameter and return an array of the same shape containing the edge weights. For the
implemented function the condition :math:`g(\cdot)\in(0, 1]` must hold true, i.e., it
has to be strictly positive with :math:`1` as the upper limit.
@note the underlying neighbourhood connectivity is 4 for 2D, 6 for 3D, etc.
@note This function is able to work with images of arbitrary dimensions, but was only
tested for 2D and 3D cases.
@param graph An initialized graph.GCGraph object
@type graph.GCGraph
@param image The image to compute on
@type image numpy.ndarray
@param boundary_term A function to compute the boundary term over an array of
absolute intensity differences
@type boundary_term function
@param spacing A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
False, no distance based weighting of the graph edges is performed.
@param spacing sequence | False
"""
def intensity_difference(neighbour_one, neighbour_two):
"""
Takes two voxel arrays constituting neighbours and computes the absolute
intensity differences.
"""
return scipy.absolute(neighbour_one - neighbour_two)
__skeleton_base(graph, image, boundary_term, intensity_difference, spacing) | [
"def",
"__skeleton_difference",
"(",
"graph",
",",
"image",
",",
"boundary_term",
",",
"spacing",
")",
":",
"def",
"intensity_difference",
"(",
"neighbour_one",
",",
"neighbour_two",
")",
":",
"\"\"\"\n Takes two voxel arrays constituting neighbours and computes the ab... | 46.468085 | 28.170213 |
def to_picard_basecalling_params(
self,
directory: Union[str, Path],
bam_prefix: Union[str, Path],
lanes: Union[int, List[int]],
) -> None:
"""Writes sample and library information to a set of files for a given
set of lanes.
**BARCODE PARAMETERS FILES**: Store information regarding the sample
index sequences, sample index names, and, optionally, the library name.
These files are used by Picard's `CollectIlluminaBasecallingMetrics`
and Picard's `ExtractIlluminaBarcodes`. The output tab-seperated files
are formatted as:
``<directory>/barcode_params.<lane>.txt``
**LIBRARY PARAMETERS FILES**: Store information regarding the sample
index sequences, sample index names, and optionally sample library and
descriptions. A path to the resulting demultiplexed BAM file is also
stored which is used by Picard's `IlluminaBasecallsToSam`. The output
tab-seperated files are formatted as:
``<directory>/library_params.<lane>.txt``
The format of the BAM file output paths in the library parameter files
are formatted as:
``<bam_prefix>/<Sample_Name>.<Sample_Library>/<Sample_Name>.<index><index2>.<lane>.bam``
Two files will be written to ``directory`` for all ``lanes`` specified.
If the path to ``directory`` does not exist, it will be created.
Args:
directory: File path to the directory to write the parameter files.
bam_prefix: Where the demultiplexed BAMs should be written.
lanes: The lanes to write basecalling parameters for.
"""
if len(self.samples) == 0:
raise ValueError('No samples in sample sheet')
if not (
isinstance(lanes, int)
or isinstance(lanes, (list, tuple))
and len(lanes) > 0
and all(isinstance(lane, int) for lane in lanes)
):
raise ValueError(f'Lanes must be an int or list of ints: {lanes}')
if len(set(len(sample.index or '') for sample in self.samples)) != 1:
raise ValueError('I7 indexes have differing lengths.')
if len(set(len(sample.index2 or '') for sample in self.samples)) != 1:
raise ValueError('I5 indexes have differing lengths.')
for attr in ('Sample_Name', 'Library_ID', 'index'):
if any(getattr(sample, attr) is None for sample in self.samples):
raise ValueError(
'Samples must have at least `Sample_Name`, '
'`Sample_Library`, and `index` attributes'
)
# Make lanes iterable if only an int was provided.
lanes = [lanes] if isinstance(lanes, int) else lanes
# Resolve path to basecalling parameter files.
prefix = Path(directory).expanduser().resolve()
prefix.mkdir(exist_ok=True, parents=True)
# Promote bam_prefix to Path object.
bam_prefix = Path(bam_prefix).expanduser().resolve()
# Both headers are one column larger if an ``index2`` attribute is
# present on all samples. Use list splatting to unpack the options.
barcode_header = [
*(
['barcode_sequence_1']
if not self.samples_have_index2
else ['barcode_sequence_1', 'barcode_sequence_2']
),
'barcode_name',
'library_name',
]
# TODO: Remove description if none is provided on all samples.
library_header = [
*(
['BARCODE_1']
if not self.samples_have_index2
else ['BARCODE_1', 'BARCODE_2']
),
'OUTPUT',
'SAMPLE_ALIAS',
'LIBRARY_NAME',
'DS',
]
for lane in lanes:
barcode_out = prefix / f'barcode_params.{lane}.txt'
library_out = prefix / f'library_params.{lane}.txt'
# Enter into a writing context for both library and barcode params.
with ExitStack() as stack:
barcode_writer = csv.writer(
stack.enter_context(barcode_out.open('w')), delimiter='\t'
)
library_writer = csv.writer(
stack.enter_context(library_out.open('w')), delimiter='\t'
)
barcode_writer.writerow(barcode_header)
library_writer.writerow(library_header)
for sample in self.samples:
# The long name of a sample is a combination of the sample
# ID and the sample library.
long_name = '.'.join(
[sample.Sample_Name, sample.Library_ID]
)
# The barcode name is all sample indexes concatenated.
barcode_name = sample.index + (sample.index2 or '')
library_name = sample.Library_ID or ''
# Assemble the path to the future BAM file.
bam_file = (
bam_prefix
/ long_name
/ f'{sample.Sample_Name}.{barcode_name}.{lane}.bam'
)
# Use list splatting to build the contents of the library
# and barcodes parameter files.
barcode_line = [
*(
[sample.index]
if not self.samples_have_index2
else [sample.index, sample.index2]
),
barcode_name,
library_name,
]
library_line = [
*(
[sample.index]
if not self.samples_have_index2
else [sample.index, sample.index2]
),
bam_file,
sample.Sample_Name,
sample.Library_ID,
sample.Description or '',
]
barcode_writer.writerow(map(str, barcode_line))
library_writer.writerow(map(str, library_line))
# Dempultiplexing relys on an umatched file so append that,
# but only to the library parameters file.
unmatched_file = bam_prefix / f'unmatched.{lane}.bam'
library_line = [
*(['N'] if not self.samples_have_index2 else ['N', 'N']),
unmatched_file,
'unmatched',
'unmatchedunmatched',
'',
]
library_writer.writerow(map(str, library_line)) | [
"def",
"to_picard_basecalling_params",
"(",
"self",
",",
"directory",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"bam_prefix",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"lanes",
":",
"Union",
"[",
"int",
",",
"List",
"[",
"int",
"]",
"]"... | 40.890909 | 22.266667 |
def send_to_cloudshark(self, id, seq, intf, inline=False): # pylint: disable=invalid-name,redefined-builtin
"""Send a capture to a CloudShark Appliance. Both
cloudshark_appliance_url and cloudshark_appliance_token must
be properly configured via system preferences.
:param id: Result ID as an int.
:param seq: TestResult sequence ID as an int.
:param intf: Interface name as string.
:param inline: (optional) Use inline version of capture file.
:return: :class:`captures.CloudShark <captures.CloudShark>` object
:rtype: captures.CloudShark
"""
schema = CloudSharkSchema()
resp = self.service.post(self._base(id, seq)+str(intf)+'/cloudshark/', params={'inline': inline})
return self.service.decode(schema, resp) | [
"def",
"send_to_cloudshark",
"(",
"self",
",",
"id",
",",
"seq",
",",
"intf",
",",
"inline",
"=",
"False",
")",
":",
"# pylint: disable=invalid-name,redefined-builtin",
"schema",
"=",
"CloudSharkSchema",
"(",
")",
"resp",
"=",
"self",
".",
"service",
".",
"pos... | 53.4 | 21 |
def put_http_meta(self, key, value):
"""
Add http related metadata.
:param str key: Currently supported keys are:
* url
* method
* user_agent
* client_ip
* status
* content_length
:param value: status and content_length are int and for other
supported keys string should be used.
"""
self._check_ended()
if value is None:
return
if key == http.STATUS:
if isinstance(value, string_types):
value = int(value)
self.apply_status_code(value)
if key in http.request_keys:
if 'request' not in self.http:
self.http['request'] = {}
self.http['request'][key] = value
elif key in http.response_keys:
if 'response' not in self.http:
self.http['response'] = {}
self.http['response'][key] = value
else:
log.warning("ignoring unsupported key %s in http meta.", key) | [
"def",
"put_http_meta",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"_check_ended",
"(",
")",
"if",
"value",
"is",
"None",
":",
"return",
"if",
"key",
"==",
"http",
".",
"STATUS",
":",
"if",
"isinstance",
"(",
"value",
",",
"string_... | 30.5 | 14.323529 |
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
"""
if self.encoding:
# wrap the outfile in a StreamWriter
outfile = codecs.lookup(self.encoding)[3](outfile)
return self.format_unencoded(tokensource, outfile) | [
"def",
"format",
"(",
"self",
",",
"tokensource",
",",
"outfile",
")",
":",
"if",
"self",
".",
"encoding",
":",
"# wrap the outfile in a StreamWriter",
"outfile",
"=",
"codecs",
".",
"lookup",
"(",
"self",
".",
"encoding",
")",
"[",
"3",
"]",
"(",
"outfile... | 41.555556 | 11.555556 |
def filter_nremoved(self, filt=True, quiet=False):
"""
Report how many data are removed by the active filters.
"""
rminfo = {}
for n in self.subsets['All_Samples']:
s = self.data[n]
rminfo[n] = s.filt_nremoved(filt)
if not quiet:
maxL = max([len(s) for s in rminfo.keys()])
print('{string:{number}s}'.format(string='Sample ', number=maxL + 3) +
'{total:4s}'.format(total='tot') +
'{removed:4s}'.format(removed='flt') +
'{percent:4s}'.format(percent='%rm'))
for k, (ntot, nfilt, pcrm) in rminfo.items():
print('{string:{number}s}'.format(string=k, number=maxL + 3) +
'{total:4.0f}'.format(total=ntot) +
'{removed:4.0f}'.format(removed=nfilt) +
'{percent:4.0f}'.format(percent=pcrm))
return rminfo | [
"def",
"filter_nremoved",
"(",
"self",
",",
"filt",
"=",
"True",
",",
"quiet",
"=",
"False",
")",
":",
"rminfo",
"=",
"{",
"}",
"for",
"n",
"in",
"self",
".",
"subsets",
"[",
"'All_Samples'",
"]",
":",
"s",
"=",
"self",
".",
"data",
"[",
"n",
"]"... | 44.190476 | 17.52381 |
def Start(self, Minimized=False, Nosplash=False):
"""Starts Skype application.
:Parameters:
Minimized : bool
If True, Skype is started minimized in system tray.
Nosplash : bool
If True, no splash screen is displayed upon startup.
"""
self._Skype._Api.startup(Minimized, Nosplash) | [
"def",
"Start",
"(",
"self",
",",
"Minimized",
"=",
"False",
",",
"Nosplash",
"=",
"False",
")",
":",
"self",
".",
"_Skype",
".",
"_Api",
".",
"startup",
"(",
"Minimized",
",",
"Nosplash",
")"
] | 34.7 | 15.8 |
def main():
"""
Zebrafish:
1. Map ENSP to ZFIN Ids using Intermine
2. Map deprecated ENSP IDs to ensembl genes
by querying the ensembl database then use
intermine to resolve to gene IDs
Mouse: Map deprecated ENSP IDs to ensembl genes
by querying the ensembl database then use
intermine to resolve to MGI IDs
Fly: ENSP IDs appear as xrefs on translation IDs
Worm: Use UniProt Mapping file provided by String
"""
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument('--config', '-c', required=True, help='JSON configuration file')
parser.add_argument('--out', '-o', required=False, help='output directory', default="./")
parser.add_argument('--use_cache', '-cached', action="store_true",
required=False, help='use cached files', default=False)
args = parser.parse_args()
# Hardcoded dir for raw files
out_path = Path(args.out)
raw_dir = out_path / "out"
raw_dir.mkdir(parents=True, exist_ok=True)
# Hardcoded unmapped file
VERSION = 'v10.5'
STRING_BASE = "http://string-db.org/download/" \
"protein.links.detailed.{}".format(VERSION)
config_file = open(args.config, 'r')
config = yaml.load(config_file)
config_file.close()
out_unmapped_file = out_path / "unmapped_ids.tsv"
unmapped_file = out_unmapped_file.open("w")
# Connect to ensembl
connection = connect_to_database(host=config['database']['host'],
username=config['database']['username'],
port=config['database']['port'])
cursor = connection.cursor()
# Process MGI eqs #
####################
taxon = config['taxa_specific']['mouse']['tax_id']
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' \
.format(taxon, VERSION)
mouse_map_file = out_path / config['taxa_specific']['mouse']['output_file']
mouse_file = mouse_map_file.open('w')
path = '{}/{}.protein.links.detailed.{}.txt.gz' \
.format(STRING_BASE, taxon, VERSION)
if not args.use_cache:
download_file(path, dump_file)
ensembl = Ensembl("rdf_graph", True)
p2gene_map = ensembl.fetch_protein_gene_map(taxon)
fh = gzip.open(str(dump_file), 'rb')
df = pd.read_csv(fh, sep='\s+')
fh.close()
proteins = pd.unique(df[['protein1', 'protein2']].values.ravel())
logger.info("Processing {} proteins".format(len(proteins)))
for protein in proteins:
prot = protein.replace('{}.'.format(str(taxon)), '')
try:
ens_gene = p2gene_map[prot]
ens_curie = "ENSEMBL:{}".format(ens_gene)
mouse_file.write("{}\t{}\n".format(prot, ens_curie))
continue
except KeyError:
pass
ens_gene = get_deprecated_protein_gene_rel(
cursor, prot, config['taxa_specific']['mouse']['ensembl'],
config)
intermine_resp = query_mousemine(
config['taxa_specific']['mouse']['intermine'], ens_gene)
if intermine_resp.is_successful:
mouse_file.write("{}\t{}\n".format(prot, intermine_resp.gene_id))
else:
unmapped_file.write("{}\t{}\t{}\n".format(prot, ens_gene, taxon))
mouse_file.close()
# Process Fly eqs #
####################
taxon = config['taxa_specific']['fly']['tax_id']
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' \
.format(taxon, VERSION)
fly_map_file = out_path / config['taxa_specific']['fly']['output_file']
fly_file = fly_map_file.open('w')
path = '{}/{}.protein.links.detailed.{}.txt.gz' \
.format(STRING_BASE, taxon, VERSION)
if not args.use_cache:
download_file(path, dump_file)
ensembl = Ensembl("rdf_graph", True)
p2gene_map = ensembl.fetch_protein_gene_map(taxon)
fh = gzip.open(str(dump_file), 'rb')
df = pd.read_csv(fh, sep='\s+')
fh.close()
proteins = pd.unique(df[['protein1', 'protein2']].values.ravel())
logger.info("Processing {} proteins".format(len(proteins)))
for protein in proteins:
prot = protein.replace('{}.'.format(str(taxon)), '')
try:
ens_gene = p2gene_map[prot]
ens_curie = "ENSEMBL:{}".format(ens_gene)
fly_file.write("{}\t{}\n".format(prot, ens_curie))
continue
except KeyError:
pass
ens_gene = get_xref_protein_gene_rel(
cursor, prot, config['taxa_specific']['fly']['ensembl'],
config, taxon)
if ens_gene is not None:
fly_file.write("{}\t{}\n".format(prot, "ENSEMBL:{}".format(ens_gene)))
else:
unmapped_file.write("{}\t{}\t{}\n".format(prot, '', taxon))
fly_file.close()
# Process Worm eqs #
####################
taxon = config['taxa_specific']['worm']['tax_id']
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' \
.format(taxon, VERSION)
uniprot_file = raw_dir / config['taxa_specific']['worm']['uniprot_file']
worm_map_file = out_path / config['taxa_specific']['worm']['output_file']
worm_file = worm_map_file.open('w')
path = '{}/{}.protein.links.detailed.{}.txt.gz' \
.format(STRING_BASE, taxon, VERSION)
if not args.use_cache:
download_file(path, dump_file)
download_file(config['taxa_specific']['worm']['uniprot_mappings'],
uniprot_file)
ensembl = Ensembl("rdf_graph", True)
p2gene_map = ensembl.fetch_protein_gene_map(taxon)
uni2gene_map = ensembl.fetch_uniprot_gene_map(taxon)
fh = gzip.open(str(uniprot_file), 'rb')
df = pd.read_csv(fh, sep='\s+')
fh.close()
string_uniprot_map = {}
for index, row in df.iterrows():
uniprot_ac = row['uniprot_ac|uniprot_id'].split('|')[0]
string_uniprot_map[row['string_id']] = uniprot_ac
fh = gzip.open(str(dump_file), 'rb')
df = pd.read_csv(fh, sep='\s+')
fh.close()
proteins = pd.unique(df[['protein1', 'protein2']].values.ravel())
logger.info("Processing {} proteins".format(len(proteins)))
for protein in proteins:
prot = protein.replace('{}.'.format(str(taxon)), '')
try:
ens_gene = p2gene_map[prot]
ens_curie = "ENSEMBL:{}".format(ens_gene)
worm_file.write("{}\t{}\n".format(prot, ens_curie))
continue
except KeyError:
pass
try:
uniprot_ac = string_uniprot_map[prot]
ens_gene = uni2gene_map[uniprot_ac]
ens_curie = "ENSEMBL:{}".format(ens_gene)
worm_file.write("{}\t{}\n".format(prot, ens_curie))
continue
except KeyError:
pass
unmapped_file.write("{}\t{}\t{}\n".format(prot, '', taxon))
worm_file.close()
# Process ZFIN eqs #
####################
taxon = config['taxa_specific']['zebrafish']['tax_id']
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' \
.format(taxon, VERSION)
zfin_map_file = out_path / config['taxa_specific']['zebrafish']['output_file']
zfin_file = zfin_map_file.open('w')
path = '{}/{}.protein.links.detailed.{}.txt.gz' \
.format(STRING_BASE, taxon, VERSION)
if not args.use_cache:
download_file(path, dump_file)
ensembl = Ensembl("rdf_graph", True)
p2gene_map = ensembl.fetch_protein_gene_map(taxon)
# in 3.6 gzip accepts Paths
fh = gzip.open(str(dump_file), 'rb')
df = pd.read_csv(fh, sep='\s+')
fh.close()
proteins = pd.unique(df[['protein1', 'protein2']].values.ravel())
logger.info("Processing {} proteins".format(len(proteins)))
for protein in proteins:
prot = protein.replace('{}.'.format(str(taxon)), '')
try:
ens_gene = p2gene_map[prot]
ens_curie = "ENSEMBL:{}".format(ens_gene)
zfin_file.write("{}\t{}\n".format(prot, ens_curie))
continue
except KeyError:
pass
intermine_resp = query_fishmine(
config['taxa_specific']['zebrafish']['intermine'], prot)
if intermine_resp.is_successful:
zfin_file.write("{}\t{}\n".format(prot, intermine_resp.gene_id))
continue
ens_gene = get_deprecated_protein_gene_rel(
cursor, prot, config['taxa_specific']['zebrafish']['ensembl'],
config)
intermine_resp = query_fishmine(
config['taxa_specific']['zebrafish']['intermine'], ens_gene)
if intermine_resp.is_successful:
zfin_file.write("{}\t{}\n".format(prot, intermine_resp.gene_id))
continue
intermine_resp = query_fishmine(
config['taxa_specific']['zebrafish']['intermine'],
ens_gene, "Pseudogene")
if intermine_resp.is_successful:
zfin_file.write("{}\t{}\n".format(prot, intermine_resp.gene_id))
else:
unmapped_file.write("{}\t{}\t{}\n".format(prot, ens_gene, taxon))
zfin_file.close()
unmapped_file.close()
connection.close()
logger.info("ID Map Finished") | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"usage",
"=",
"__doc__",
")",
"parser",
".",
"add_argument",
"(",
"'--config'",
",",
"'-c'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'JSON configuration file'",
"... | 34.175573 | 20.954198 |
def shutdown(self, reason = ConnectionClosed()):
"""Shutdown the socket server.
The socket server will stop accepting incoming connections.
All connections will be dropped.
"""
if self._shutdown:
raise ShutdownError()
self.stop()
self._closing = True
for connection in self.connections:
connection.close()
self.connections = set()
self._shutdown = True
if isinstance(reason, ConnectionClosed):
logger.info("server shutdown")
else:
logger.warn("server shutdown, reason %s" % str(reason)) | [
"def",
"shutdown",
"(",
"self",
",",
"reason",
"=",
"ConnectionClosed",
"(",
")",
")",
":",
"if",
"self",
".",
"_shutdown",
":",
"raise",
"ShutdownError",
"(",
")",
"self",
".",
"stop",
"(",
")",
"self",
".",
"_closing",
"=",
"True",
"for",
"connection... | 28.590909 | 16.818182 |
def add_default_module_dir(self):
"""
Add directory to store built-in plugins to `module_dir` parameter.
Default directory to store plugins is `BLACKBIRD_INSTALL_DIR/plugins`.
:rtype: None
:return: None
"""
default_module_dir = os.path.join(
os.path.abspath(os.path.curdir),
'plugins'
)
module_dir_params = {
'module_dir': [default_module_dir]
}
if 'module_dir' in self.config['global']:
module_dir_params['module_dir'].append(
self.config['global']['module_dir']
)
self.config['global'].update(
module_dir_params
) | [
"def",
"add_default_module_dir",
"(",
"self",
")",
":",
"default_module_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"curdir",
")",
",",
"'plugins'",
")",
"module_dir_params",
"=",
"{",
... | 31.227273 | 16.590909 |
def string_asset(class_obj: type) -> type:
"""
Decorator to annotate the StringAsset class. Registers the decorated class
as the StringAsset known type.
"""
assert isinstance(class_obj, type), "class_obj is not a Class"
global _string_asset_resource_type
_string_asset_resource_type = class_obj
return class_obj | [
"def",
"string_asset",
"(",
"class_obj",
":",
"type",
")",
"->",
"type",
":",
"assert",
"isinstance",
"(",
"class_obj",
",",
"type",
")",
",",
"\"class_obj is not a Class\"",
"global",
"_string_asset_resource_type",
"_string_asset_resource_type",
"=",
"class_obj",
"re... | 37.222222 | 10.777778 |
def open_channel_with_funding(
self,
registry_address_hex,
token_address_hex,
peer_address_hex,
total_deposit,
settle_timeout=None,
):
""" Convenience method to open a channel.
Args:
registry_address_hex (str): hex encoded address of the registry for the channel.
token_address_hex (str): hex encoded address of the token for the channel.
peer_address_hex (str): hex encoded address of the channel peer.
total_deposit (int): amount of total funding for the channel.
settle_timeout (int): amount of blocks for the settle time (if None use app defaults).
Return:
netting_channel: the (newly opened) netting channel object.
"""
# Check, if peer is discoverable
registry_address = decode_hex(registry_address_hex)
peer_address = decode_hex(peer_address_hex)
token_address = decode_hex(token_address_hex)
try:
self._discovery.get(peer_address)
except KeyError:
print('Error: peer {} not found in discovery'.format(peer_address_hex))
return None
self._api.channel_open(
registry_address,
token_address,
peer_address,
settle_timeout=settle_timeout,
)
return self._api.set_total_channel_deposit(
registry_address,
token_address,
peer_address,
total_deposit,
) | [
"def",
"open_channel_with_funding",
"(",
"self",
",",
"registry_address_hex",
",",
"token_address_hex",
",",
"peer_address_hex",
",",
"total_deposit",
",",
"settle_timeout",
"=",
"None",
",",
")",
":",
"# Check, if peer is discoverable",
"registry_address",
"=",
"decode_h... | 35 | 21.27907 |
def patch_module_function(module, target, aspect, force_name=None, bag=BrokenBag, **options):
"""
Low-level patcher for one function from a specified module.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
"""
logdebug("patch_module_function (module=%s, target=%s, aspect=%s, force_name=%s, **options=%s",
module, target, aspect, force_name, options)
name = force_name or target.__name__
return patch_module(module, name, _checked_apply(aspect, target, module=module), original=target, **options) | [
"def",
"patch_module_function",
"(",
"module",
",",
"target",
",",
"aspect",
",",
"force_name",
"=",
"None",
",",
"bag",
"=",
"BrokenBag",
",",
"*",
"*",
"options",
")",
":",
"logdebug",
"(",
"\"patch_module_function (module=%s, target=%s, aspect=%s, force_name=%s, **... | 48.166667 | 27 |
def get_stdin_data():
""" Helper function that returns data send to stdin or False if nothing is send """
# STDIN can only be 3 different types of things ("modes")
# 1. An interactive terminal device (i.e. a TTY -> sys.stdin.isatty() or stat.S_ISCHR)
# 2. A (named) pipe (stat.S_ISFIFO)
# 3. A regular file (stat.S_ISREG)
# Technically, STDIN can also be other device type like a named unix socket (stat.S_ISSOCK), but we don't
# support that in gitlint (at least not today).
#
# Now, the behavior that we want is the following:
# If someone sends something directly to gitlint via a pipe or a regular file, read it. If not, read from the
# local repository.
# Note that we don't care about whether STDIN is a TTY or not, we only care whether data is via a pipe or regular
# file.
# However, in case STDIN is not a TTY, it HAS to be one of the 2 other things (pipe or regular file), even if
# no-one is actually sending anything to gitlint over them. In this case, we still want to read from the local
# repository.
# To support this use-case (which is common in CI runners such as Jenkins and Gitlab), we need to actually attempt
# to read from STDIN in case it's a pipe or regular file. In case that fails, then we'll fall back to reading
# from the local repo.
mode = os.fstat(sys.stdin.fileno()).st_mode
stdin_is_pipe_or_file = stat.S_ISFIFO(mode) or stat.S_ISREG(mode)
if stdin_is_pipe_or_file:
input_data = sys.stdin.read()
# Only return the input data if there's actually something passed
# i.e. don't consider empty piped data
if input_data:
return ustr(input_data)
return False | [
"def",
"get_stdin_data",
"(",
")",
":",
"# STDIN can only be 3 different types of things (\"modes\")",
"# 1. An interactive terminal device (i.e. a TTY -> sys.stdin.isatty() or stat.S_ISCHR)",
"# 2. A (named) pipe (stat.S_ISFIFO)",
"# 3. A regular file (stat.S_ISREG)",
"# Technically, STDIN can... | 56.566667 | 30.933333 |
def render(self, is_unicode=False, **kwargs):
"""Render the graph, and return the svg string"""
self.setup(**kwargs)
svg = self.svg.render(
is_unicode=is_unicode, pretty_print=self.pretty_print
)
self.teardown()
return svg | [
"def",
"render",
"(",
"self",
",",
"is_unicode",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"setup",
"(",
"*",
"*",
"kwargs",
")",
"svg",
"=",
"self",
".",
"svg",
".",
"render",
"(",
"is_unicode",
"=",
"is_unicode",
",",
"pretty_... | 34.375 | 15.25 |
def int_to_bit(self, x_int, num_bits, base=2):
"""Turn x_int representing numbers into a bitwise (lower-endian) tensor.
Args:
x_int: Tensor containing integer to be converted into base
notation.
num_bits: Number of bits in the representation.
base: Base of the representation.
Returns:
Corresponding number expressed in base.
"""
x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))
# pylint: disable=g-complex-comprehension
x_labels = [
tf.floormod(
tf.floordiv(tf.to_int32(x_l),
tf.to_int32(base)**i), tf.to_int32(base))
for i in range(num_bits)]
res = tf.concat(x_labels, axis=-1)
return tf.to_float(res) | [
"def",
"int_to_bit",
"(",
"self",
",",
"x_int",
",",
"num_bits",
",",
"base",
"=",
"2",
")",
":",
"x_l",
"=",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"expand_dims",
"(",
"x_int",
",",
"axis",
"=",
"-",
"1",
")",
")",
"# pylint: disable=g-complex-compreh... | 34 | 15.571429 |
def __update_state(self):
"""Fetches most up to date state from db."""
# Only if the job was not in a terminal state.
if self._state.active:
self._state = self.__get_state_by_id(self.job_config.job_id) | [
"def",
"__update_state",
"(",
"self",
")",
":",
"# Only if the job was not in a terminal state.",
"if",
"self",
".",
"_state",
".",
"active",
":",
"self",
".",
"_state",
"=",
"self",
".",
"__get_state_by_id",
"(",
"self",
".",
"job_config",
".",
"job_id",
")"
] | 43 | 13 |
def changeLayerSize(self, layername, newsize):
"""
Changes layer size. Newsize must be greater than zero.
"""
# for all connection from to this layer, change matrix:
if self.sharedWeights:
raise AttributeError("shared weights broken")
for connection in self.connections:
if connection.fromLayer.name == layername:
connection.changeSize( newsize, connection.toLayer.size )
if connection.toLayer.name == layername:
connection.changeSize( connection.fromLayer.size, newsize )
# then, change the actual layer size:
self.getLayer(layername).changeSize(newsize) | [
"def",
"changeLayerSize",
"(",
"self",
",",
"layername",
",",
"newsize",
")",
":",
"# for all connection from to this layer, change matrix:",
"if",
"self",
".",
"sharedWeights",
":",
"raise",
"AttributeError",
"(",
"\"shared weights broken\"",
")",
"for",
"connection",
... | 48.214286 | 13.785714 |
def is_satisfied_by(self, candidate: Any, **kwds: Any) -> bool:
"""Return True if `candidate` satisfies the specification."""
candidate_name = self._candidate_name
context = self._context
if context:
if candidate_name in kwds:
raise ValueError(f"Candidate name '{candidate_name}' must "
"not be given as keyword.")
context.update(kwds)
context[candidate_name] = candidate
try:
code = self._code
except AttributeError:
self._code = code = compile(self._ast_expr, '<str>', mode='eval')
return eval(code, context) | [
"def",
"is_satisfied_by",
"(",
"self",
",",
"candidate",
":",
"Any",
",",
"*",
"*",
"kwds",
":",
"Any",
")",
"->",
"bool",
":",
"candidate_name",
"=",
"self",
".",
"_candidate_name",
"context",
"=",
"self",
".",
"_context",
"if",
"context",
":",
"if",
... | 43.8 | 14.533333 |
def _tidy2xhtml5(html):
"""Tidy up a html4/5 soup to a parsable valid XHTML5.
Requires tidy-html5 from https://github.com/w3c/tidy-html5
Installation: http://goo.gl/FG27n
"""
html = _io2string(html)
html = _pre_tidy(html) # Pre-process
xhtml5, errors =\
tidy_document(html,
options={
# do not merge nested div elements
# - preserve semantic block structrues
'merge-divs': 0,
# create xml output
'output-xml': 1,
# Don't use indent, adds extra linespace or linefeed
# which are big problems
'indent': 0,
# No tidy meta tag in output
'tidy-mark': 0,
# No wrapping
'wrap': 0,
# Help ensure validation
'alt-text': '',
# No sense in transitional for tool-generated markup
'doctype': 'strict',
# May not get what you expect,
# but you will get something
'force-output': 1,
# remove HTML entities like e.g. nbsp
'numeric-entities': 1,
# remove
'clean': 1,
'bare': 1,
'word-2000': 1,
'drop-proprietary-attributes': 1,
# enclose text in body always with <p>...</p>
'enclose-text': 1,
# transforms <i> and <b> to <em> and <strong>
'logical-emphasis': 1,
# do not tidy all MathML elements!
# List of MathML 3.0 elements from
# http://www.w3.org/TR/MathML3/appendixi.html#index.elem
'new-inline-tags': 'abs, and, annotation, '
'annotation-xml, apply, approx, arccos, arccosh, '
'arccot, arccoth, arccsc, arccsch, arcsec, arcsech, '
'arcsin, arcsinh, arctan, arctanh, arg, bind, bvar, '
'card, cartesianproduct, cbytes, ceiling, cerror, '
'ci, cn, codomain, complexes, compose, condition, '
'conjugate, cos, cosh, cot, coth, cs, csc, csch, '
'csymbol, curl, declare, degree, determinant, diff, '
'divergence, divide, domain, domainofapplication, '
'el, emptyset, eq, equivalent, eulergamma, exists, '
'exp, exponentiale, factorial, factorof, false, '
'floor, fn, forall, gcd, geq, grad, gt, ident, '
'image, imaginary, imaginaryi, implies, in, '
'infinity, int, integers, intersect, interval, '
'inverse, lambda, laplacian, lcm, leq, limit, list, '
'ln, log, logbase, lowlimit, lt, maction, malign, '
'maligngroup, malignmark, malignscope, math, '
'matrix, matrixrow, max, mean, median, menclose, '
'merror, mfenced, mfrac, mfraction, mglyph, mi, '
'min, minus, mlabeledtr, mlongdiv, mmultiscripts, '
'mn, mo, mode, moment, momentabout, mover, mpadded, '
'mphantom, mprescripts, mroot, mrow, ms, mscarries, '
'mscarry, msgroup, msline, mspace, msqrt, msrow, '
'mstack, mstyle, msub, msubsup, msup, mtable, mtd, '
'mtext, mtr, munder, munderover, naturalnumbers, '
'neq, none, not, notanumber, note, notin, '
'notprsubset, notsubset, or, otherwise, '
'outerproduct, partialdiff, pi, piece, piecewise, '
'plus, power, primes, product, prsubset, quotient, '
'rationals, real, reals, reln, rem, root, '
'scalarproduct, sdev, sec, sech, selector, '
'semantics, sep, set, setdiff, share, sin, sinh, '
'subset, sum, tan, tanh, tendsto, times, transpose, '
'true, union, uplimit, variance, vector, '
'vectorproduct, xor',
'doctype': 'html5',
})
# return xhtml5
# return the tree itself, there is another modification below to avoid
# another parse
return _post_tidy(xhtml5) | [
"def",
"_tidy2xhtml5",
"(",
"html",
")",
":",
"html",
"=",
"_io2string",
"(",
"html",
")",
"html",
"=",
"_pre_tidy",
"(",
"html",
")",
"# Pre-process",
"xhtml5",
",",
"errors",
"=",
"tidy_document",
"(",
"html",
",",
"options",
"=",
"{",
"# do not merge ne... | 57.223529 | 22.094118 |
def opls_notation(atom_key):
"""Return element for OPLS forcefield atom key."""
# warning for Ne, He, Na types overlap
conflicts = ['ne', 'he', 'na']
if atom_key in conflicts:
raise _AtomKeyConflict((
"One of the OPLS conflicting "
"atom_keys has occured '{0}'. "
"For how to solve this issue see the manual or "
"MolecularSystem._atom_key_swap() doc string.").format(atom_key))
for element in opls_atom_keys:
if atom_key in opls_atom_keys[element]:
return element
# In case if atom_key was not found in the OPLS keys dictionary
raise _AtomKeyError((
"OPLS atom key {0} was not found in OPLS keys dictionary.").format(
atom_key)) | [
"def",
"opls_notation",
"(",
"atom_key",
")",
":",
"# warning for Ne, He, Na types overlap",
"conflicts",
"=",
"[",
"'ne'",
",",
"'he'",
",",
"'na'",
"]",
"if",
"atom_key",
"in",
"conflicts",
":",
"raise",
"_AtomKeyConflict",
"(",
"(",
"\"One of the OPLS conflicting... | 43.352941 | 13.117647 |
def intersect(self, other):
"""Constructs an unminimized DFA recognizing
the intersection of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the intersect operation
Returns:
Returns:
DFA: The resulting DFA
"""
self.automaton = fst.intersect(self.automaton, other.automaton)
return self | [
"def",
"intersect",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"automaton",
"=",
"fst",
".",
"intersect",
"(",
"self",
".",
"automaton",
",",
"other",
".",
"automaton",
")",
"return",
"self"
] | 35.666667 | 16.083333 |
def find_all_mappings(
self,
other_lattice: "Lattice",
ltol: float = 1e-5,
atol: float = 1,
skip_rotation_matrix: bool = False,
) -> Iterator[Tuple["Lattice", Optional[np.ndarray], np.ndarray]]:
"""
Finds all mappings between current lattice and another lattice.
Args:
other_lattice (Lattice): Another lattice that is equivalent to
this one.
ltol (float): Tolerance for matching lengths. Defaults to 1e-5.
atol (float): Tolerance for matching angles. Defaults to 1.
skip_rotation_matrix (bool): Whether to skip calculation of the
rotation matrix
Yields:
(aligned_lattice, rotation_matrix, scale_matrix) if a mapping is
found. aligned_lattice is a rotated version of other_lattice that
has the same lattice parameters, but which is aligned in the
coordinate system of this lattice so that translational points
match up in 3D. rotation_matrix is the rotation that has to be
applied to other_lattice to obtain aligned_lattice, i.e.,
aligned_matrix = np.inner(other_lattice, rotation_matrix) and
op = SymmOp.from_rotation_and_translation(rotation_matrix)
aligned_matrix = op.operate_multi(latt.matrix)
Finally, scale_matrix is the integer matrix that expresses
aligned_matrix as a linear combination of this
lattice, i.e., aligned_matrix = np.dot(scale_matrix, self.matrix)
None is returned if no matches are found.
"""
(lengths, angles) = other_lattice.lengths_and_angles
(alpha, beta, gamma) = angles
frac, dist, _, _ = self.get_points_in_sphere(
[[0, 0, 0]], [0, 0, 0], max(lengths) * (1 + ltol), zip_results=False
)
cart = self.get_cartesian_coords(frac)
# this can't be broadcast because they're different lengths
inds = [
np.logical_and(dist / l < 1 + ltol, dist / l > 1 / (1 + ltol))
for l in lengths
]
c_a, c_b, c_c = (cart[i] for i in inds)
f_a, f_b, f_c = (frac[i] for i in inds)
l_a, l_b, l_c = (np.sum(c ** 2, axis=-1) ** 0.5 for c in (c_a, c_b, c_c))
def get_angles(v1, v2, l1, l2):
x = np.inner(v1, v2) / l1[:, None] / l2
x[x > 1] = 1
x[x < -1] = -1
angles = np.arccos(x) * 180.0 / pi
return angles
alphab = np.abs(get_angles(c_b, c_c, l_b, l_c) - alpha) < atol
betab = np.abs(get_angles(c_a, c_c, l_a, l_c) - beta) < atol
gammab = np.abs(get_angles(c_a, c_b, l_a, l_b) - gamma) < atol
for i, all_j in enumerate(gammab):
inds = np.logical_and(
all_j[:, None], np.logical_and(alphab, betab[i][None, :])
)
for j, k in np.argwhere(inds):
scale_m = np.array((f_a[i], f_b[j], f_c[k]), dtype=np.int)
if abs(np.linalg.det(scale_m)) < 1e-8:
continue
aligned_m = np.array((c_a[i], c_b[j], c_c[k]))
if skip_rotation_matrix:
rotation_m = None
else:
rotation_m = np.linalg.solve(aligned_m, other_lattice.matrix)
yield Lattice(aligned_m), rotation_m, scale_m | [
"def",
"find_all_mappings",
"(",
"self",
",",
"other_lattice",
":",
"\"Lattice\"",
",",
"ltol",
":",
"float",
"=",
"1e-5",
",",
"atol",
":",
"float",
"=",
"1",
",",
"skip_rotation_matrix",
":",
"bool",
"=",
"False",
",",
")",
"->",
"Iterator",
"[",
"Tupl... | 42.820513 | 23.333333 |
def update_model_in_repo_based_on_filename(self, model):
""" Adds a model to the repo (not initially visible)
Args:
model: the model to be added. If the model
has no filename, a name is invented
Returns: the filename of the model added to the repo
"""
if model._tx_filename is None:
for fn in self.all_models.filename_to_model:
if self.all_models.filename_to_model[fn] == model:
return fn
i = 0
while self.all_models.has_model("anonymous{}".format(i)):
i += 1
myfilename = "anonymous{}".format(i)
self.all_models.filename_to_model[myfilename] = model
else:
myfilename = model._tx_filename
if (not self.all_models.has_model(myfilename)):
self.all_models.filename_to_model[myfilename] = model
return myfilename | [
"def",
"update_model_in_repo_based_on_filename",
"(",
"self",
",",
"model",
")",
":",
"if",
"model",
".",
"_tx_filename",
"is",
"None",
":",
"for",
"fn",
"in",
"self",
".",
"all_models",
".",
"filename_to_model",
":",
"if",
"self",
".",
"all_models",
".",
"f... | 43.809524 | 15.952381 |
def parse_variable(self, variable):
"""Method to parse an input or output variable.
**Example Variable**::
#App:1234:output!String
Args:
variable (string): The variable name to parse.
Returns:
(dictionary): Result of parsed string.
"""
data = None
if variable is not None:
variable = variable.strip()
if re.match(self._variable_match, variable):
var = re.search(self._variable_parse, variable)
data = {
'root': var.group(0),
'job_id': var.group(2),
'name': var.group(3),
'type': var.group(4),
}
return data | [
"def",
"parse_variable",
"(",
"self",
",",
"variable",
")",
":",
"data",
"=",
"None",
"if",
"variable",
"is",
"not",
"None",
":",
"variable",
"=",
"variable",
".",
"strip",
"(",
")",
"if",
"re",
".",
"match",
"(",
"self",
".",
"_variable_match",
",",
... | 29.36 | 15.92 |
def checkArgs(args):
"""Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Check the "before" file
if not args.before.endswith(".bim"):
msg = "%s: not a BIM file (extension must be .bim)" % args.before
raise ProgramError(msg)
elif not os.path.isfile(args.before):
msg = "%s: no such file" % args.before
raise ProgramError(msg)
# Check the "after" file
if not args.after.endswith(".bim"):
msg = "%s: not a BIM file (extension must be .bim)" % args.after
raise ProgramError(msg)
elif not os.path.isfile(args.after):
msg = "%s: no such file" % args.after
raise ProgramError(msg)
return True | [
"def",
"checkArgs",
"(",
"args",
")",
":",
"# Check the \"before\" file",
"if",
"not",
"args",
".",
"before",
".",
"endswith",
"(",
"\".bim\"",
")",
":",
"msg",
"=",
"\"%s: not a BIM file (extension must be .bim)\"",
"%",
"args",
".",
"before",
"raise",
"ProgramEr... | 32.193548 | 18.483871 |
def clear(self):
"""Remove all key-value pairs."""
for i in range(self.maxlevel):
self._head[2+i] = self._tail
self._tail[-1] = 0
self._level = 1 | [
"def",
"clear",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"maxlevel",
")",
":",
"self",
".",
"_head",
"[",
"2",
"+",
"i",
"]",
"=",
"self",
".",
"_tail",
"self",
".",
"_tail",
"[",
"-",
"1",
"]",
"=",
"0",
"self",
... | 31.333333 | 8.833333 |
def socket_reader(connection: socket, buffer_size: int = 1024):
""" read data from adb socket """
while connection is not None:
try:
buffer = connection.recv(buffer_size)
# no output
if not len(buffer):
raise ConnectionAbortedError
except ConnectionAbortedError:
# socket closed
print('connection aborted')
connection.close()
yield None
except OSError:
# still operate connection after it was closed
print('socket closed')
connection.close()
yield None
else:
yield buffer | [
"def",
"socket_reader",
"(",
"connection",
":",
"socket",
",",
"buffer_size",
":",
"int",
"=",
"1024",
")",
":",
"while",
"connection",
"is",
"not",
"None",
":",
"try",
":",
"buffer",
"=",
"connection",
".",
"recv",
"(",
"buffer_size",
")",
"# no output",
... | 32.6 | 12.65 |
def runner(self, fun, **kwargs):
'''
Wrap RunnerClient for executing :ref:`runner modules <all-salt.runners>`
'''
return self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs)) | [
"def",
"runner",
"(",
"self",
",",
"fun",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"pool",
".",
"fire_async",
"(",
"self",
".",
"client_cache",
"[",
"'runner'",
"]",
".",
"low",
",",
"args",
"=",
"(",
"fun",
",",
"kwargs",
")",
"... | 44.4 | 30.8 |
def _compute_needed_metrics(self, instance, available_metrics):
""" Compare the available metrics for one MOR we have computed and intersect them
with the set of metrics we want to report
"""
i_key = self._instance_key(instance)
if self.in_compatibility_mode(instance):
if instance.get('all_metrics', False):
return available_metrics
wanted_metrics = []
# Get only the basic metrics
for counter_id in available_metrics:
# No cache yet, skip it for now
if not self.metadata_cache.contains(i_key, counter_id):
self.log.debug(
"No metadata found for counter {}, will not collect it".format(ensure_unicode(counter_id))
)
continue
metadata = self.metadata_cache.get_metadata(i_key, counter_id)
if metadata.get('name') in BASIC_METRICS:
wanted_metrics.append(vim.PerformanceManager.MetricId(counterId=counter_id, instance="*"))
return wanted_metrics
else:
# The metadata cache contains only metrics of the desired level, so use it to filter the metrics to keep
return [
vim.PerformanceManager.MetricId(counterId=counter_id, instance="*")
for counter_id in available_metrics
if self.metadata_cache.contains(i_key, counter_id)
] | [
"def",
"_compute_needed_metrics",
"(",
"self",
",",
"instance",
",",
"available_metrics",
")",
":",
"i_key",
"=",
"self",
".",
"_instance_key",
"(",
"instance",
")",
"if",
"self",
".",
"in_compatibility_mode",
"(",
"instance",
")",
":",
"if",
"instance",
".",
... | 48.966667 | 22.033333 |
def add_global_request_interceptor(self, request_interceptor):
# type: (AbstractRequestInterceptor) -> None
"""Register input to the global request interceptors list.
:param request_interceptor: Request Interceptor instance to be
registered.
:type request_interceptor: AbstractRequestInterceptor
:return: None
"""
if request_interceptor is None:
raise RuntimeConfigException(
"Valid Request Interceptor instance to be provided")
if not isinstance(request_interceptor, AbstractRequestInterceptor):
raise RuntimeConfigException(
"Input should be a RequestInterceptor instance")
self.global_request_interceptors.append(request_interceptor) | [
"def",
"add_global_request_interceptor",
"(",
"self",
",",
"request_interceptor",
")",
":",
"# type: (AbstractRequestInterceptor) -> None",
"if",
"request_interceptor",
"is",
"None",
":",
"raise",
"RuntimeConfigException",
"(",
"\"Valid Request Interceptor instance to be provided\"... | 42.333333 | 19.944444 |
def pygmentify(value, **kwargs):
"""Return a highlighted code block with Pygments."""
soup = BeautifulSoup(value, 'html.parser')
for pre in soup.find_all('pre'):
# Get code
code = ''.join([to_string(item) for item in pre.contents])
code = code.replace('<', '<')
code = code.replace('>', '>')
code = code.replace(''', "'")
code = code.replace('"', '"')
code = code.replace('&', '&')
# Get lexer by language
class_list = pre.get('class', [])
lexers = []
options = {
'stripall': True
}
# Collect all found lexers
for c in class_list:
try:
lexers.append(get_lexer_by_name(c, **options))
except ClassNotFound:
pass
# Get first lexer match or none
try:
lexer = lexers[0]
except IndexError:
lexer = None
# If no lexer, try guessing
if lexer is None:
try:
lexer = guess_lexer(pre.text, **options)
class_list += [alias for alias in lexer.aliases]
except ClassNotFound:
pass
if lexer is not None:
# Get formatter
formatter = HtmlFormatter(**kwargs)
# Highlight code
highlighted = highlight(code, lexer, formatter)
class_string = ' '.join([c for c in class_list])
highlighted = highlighted.replace(
'<div class="%s"><pre>' % kwargs['cssclass'],
'<div class="%s"><pre class="%s">' % (kwargs['cssclass'], class_string)
)
pre.replace_with(highlighted)
return soup.decode(formatter=None).strip() | [
"def",
"pygmentify",
"(",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"soup",
"=",
"BeautifulSoup",
"(",
"value",
",",
"'html.parser'",
")",
"for",
"pre",
"in",
"soup",
".",
"find_all",
"(",
"'pre'",
")",
":",
"# Get code",
"code",
"=",
"''",
".",
"j... | 30.732143 | 17.625 |
def get_catalog_lookup_session(self):
"""Gets the catalog lookup session.
return: (osid.cataloging.CatalogLookupSession) - a
``CatalogLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_lookup()`` is ``true``.*
"""
if not self.supports_catalog_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogLookupSession(runtime=self._runtime) | [
"def",
"get_catalog_lookup_session",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"supports_catalog_lookup",
"(",
")",
":",
"raise",
"errors",
".",
"Unimplemented",
"(",
")",
"# pylint: disable=no-member",
"return",
"sessions",
".",
"CatalogLookupSession",
"(",
... | 40.25 | 14.75 |
def prtcols(items, vpad=6):
'''
After computing the size of our rows and columns based on the terminal size
and length of the largest element, use zip to aggregate our column lists
into row lists and then iterate over the row lists and print them.
'''
from os import get_terminal_size
items = list(items) # copy list so we don't mutate it
width, height = get_terminal_size()
height -= vpad # customize vertical padding
pad = mkpad(items)
rows = mkrows(items, pad, width, height)
cols = mkcols(items, rows)
# * operator in conjunction with zip, unzips the list
for c in zip(*cols):
row_format = '{:<{pad}}' * len(cols)
print(row_format.format(*c, pad=pad)) | [
"def",
"prtcols",
"(",
"items",
",",
"vpad",
"=",
"6",
")",
":",
"from",
"os",
"import",
"get_terminal_size",
"items",
"=",
"list",
"(",
"items",
")",
"# copy list so we don't mutate it",
"width",
",",
"height",
"=",
"get_terminal_size",
"(",
")",
"height",
... | 41.941176 | 17 |
def clean_path_from_deprecated_naming(base_path):
""" Checks if the base path includes deprecated characters/format and returns corrected version
The state machine folder name should be according the universal RAFCON path format. In case the state machine path
is inside a mounted library_root_path also the library_path has to have this format. The library path is a
partial path of the state machine path. This rules are followed to always provide secure paths for RAFCON and all
operating systems.
:param base_path:
:return: cleaned base_path
:rtype: str
"""
def warning_logger_message(insert_string):
not_allowed_characters = "'" + "', '".join(REPLACED_CHARACTERS_FOR_NO_OS_LIMITATION.keys()) + "'"
logger.warning("Deprecated {2} in {0}. Please avoid to use the following characters {1}."
"".format(base_path, not_allowed_characters, insert_string))
from rafcon.core.singleton import library_manager
if library_manager.is_os_path_within_library_root_paths(base_path):
library_path, library_name = library_manager.get_library_path_and_name_for_os_path(base_path)
clean_library_path = clean_path(library_path)
clean_library_name = clean_path(library_name)
if library_name != clean_library_name or library_path != clean_library_path:
warning_logger_message("library path")
library_root_key = library_manager._get_library_root_key_for_os_path(base_path)
library_root_path = library_manager._library_root_paths[library_root_key]
clean_base_path = os.path.join(library_root_path, clean_library_path, clean_library_name)
else:
path_elements = base_path.split(os.path.sep)
state_machine_folder_name = base_path.split(os.path.sep)[-1]
path_elements[-1] = clean_path(state_machine_folder_name)
if not state_machine_folder_name == path_elements[-1]:
warning_logger_message("state machine folder name")
clean_base_path = os.path.sep.join(path_elements)
return clean_base_path | [
"def",
"clean_path_from_deprecated_naming",
"(",
"base_path",
")",
":",
"def",
"warning_logger_message",
"(",
"insert_string",
")",
":",
"not_allowed_characters",
"=",
"\"'\"",
"+",
"\"', '\"",
".",
"join",
"(",
"REPLACED_CHARACTERS_FOR_NO_OS_LIMITATION",
".",
"keys",
"... | 60.323529 | 31.147059 |
def _get_time_stamp(entry):
"""Return datetime object from a timex constraint start/end entry.
Example string format to convert: 2018-01-01T00:00
"""
if not entry or entry == 'Undef':
return None
try:
dt = datetime.datetime.strptime(entry, '%Y-%m-%dT%H:%M')
except Exception as e:
logger.debug('Could not parse %s format' % entry)
return None
return dt | [
"def",
"_get_time_stamp",
"(",
"entry",
")",
":",
"if",
"not",
"entry",
"or",
"entry",
"==",
"'Undef'",
":",
"return",
"None",
"try",
":",
"dt",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"entry",
",",
"'%Y-%m-%dT%H:%M'",
")",
"except",
"Exc... | 30.846154 | 17.384615 |
def write_task_options(self, **kw):
"""
Write an options line for a task definition::
writer.write_task_options(
start_time=time(12, 34, 56),
task_time=timedelta(hours=1, minutes=45, seconds=12),
waypoint_distance=False,
distance_tolerance=(0.7, 'km'),
altitude_tolerance=300.0,
)
# -> Options,NoStart=12:34:56,TaskTime=01:45:12,WpDis=False,NearDis=0.7km,NearAlt=300.0m
:param start_time: opening time of the start line as
:class:`datetime.time` or string
:param task_time: designated time for the task as
:class:`datetime.timedelta` or string
:param waypoint_distance: task distance calculation (``False``: use
fixes, ``True``: use waypoints)
:param distance_tolerance: distance tolerance in meters or as
``(distance, unit)`` tuple
:param altitude_tolerance: altitude tolerance in meters or as
``(distance, unit)`` tuple
:param min_distance: "uncompleted leg (``False``: calculate maximum
distance from last observation zone)"
:param random_order: if ``True``, then Random order of waypoints is
checked
:param max_points: maximum number of points
:param before_points: number of mandatory waypoints at the beginning.
``1`` means start line only, ``2`` means start line plus first
point in task sequence (Task line).
:param after_points: number of mandatory waypoints at the end. ``1``
means finish line only, ``2`` means finish line and one point
before finish in task sequence (Task line).
:param bonus: bonus for crossing the finish line
"""
if not self.in_task_section:
raise RuntimeError(
u'Task options have to be written in task section')
fields = ['Options']
if 'start_time' in kw:
fields.append(u'NoStart=' + self.format_time(kw['start_time']))
if 'task_time' in kw:
fields.append(u'TaskTime=' + self.format_timedelta(kw['task_time']))
if 'waypoint_distance' in kw:
fields.append(u'WpDis=%s' % kw['waypoint_distance'])
if 'distance_tolerance' in kw:
fields.append(
u'NearDis=' + self.format_distance(kw['distance_tolerance']))
if 'altitude_tolerance' in kw:
fields.append(
u'NearAlt=' + self.format_distance(kw['altitude_tolerance']))
if 'min_distance' in kw:
fields.append(u'MinDis=%s' % kw['min_distance'])
if 'random_order' in kw:
fields.append(u'RandomOrder=%s' % kw['random_order'])
if 'max_points' in kw:
fields.append(u'MaxPts=%d' % kw['max_points'])
if 'before_points' in kw:
fields.append(u'BeforePts=%d' % kw['before_points'])
if 'after_points' in kw:
fields.append(u'AfterPts=%d' % kw['after_points'])
if 'bonus' in kw:
fields.append(u'Bonus=%d' % kw['bonus'])
self.write_fields(fields) | [
"def",
"write_task_options",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"if",
"not",
"self",
".",
"in_task_section",
":",
"raise",
"RuntimeError",
"(",
"u'Task options have to be written in task section'",
")",
"fields",
"=",
"[",
"'Options'",
"]",
"if",
"'star... | 38.85 | 21.65 |
def camelHump(text):
"""
Converts the inputted text to camel humps by joining all
capital letters toegether (The Quick, Brown,
Fox.Tail -> TheQuickBrownFoxTail)
:param: text <str> text to be changed
:return: <str>
:usage: |import projex.text
|print projex.text.camelHump('The,Quick, Brown, Fox.Tail')
"""
# make sure the first letter is upper case
output = ''.join([word[0].upper() + word[1:] for word in words(text)])
if output:
output = output[0].lower() + output[1:]
return output | [
"def",
"camelHump",
"(",
"text",
")",
":",
"# make sure the first letter is upper case",
"output",
"=",
"''",
".",
"join",
"(",
"[",
"word",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"word",
"[",
"1",
":",
"]",
"for",
"word",
"in",
"words",
"(",
"t... | 32.277778 | 18.388889 |
def __execute_from_archive(self, cmd):
"""Execute gerrit command against the archive"""
cmd = self.sanitize_for_archive(cmd)
response = self.archive.retrieve(cmd, None, None)
if isinstance(response, RuntimeError):
raise response
return response | [
"def",
"__execute_from_archive",
"(",
"self",
",",
"cmd",
")",
":",
"cmd",
"=",
"self",
".",
"sanitize_for_archive",
"(",
"cmd",
")",
"response",
"=",
"self",
".",
"archive",
".",
"retrieve",
"(",
"cmd",
",",
"None",
",",
"None",
")",
"if",
"isinstance",... | 29 | 18 |
def createCertRequest(pkey, digest="sha256"):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
req.get_subject().C = "FR"
req.get_subject().ST = "IDF"
req.get_subject().L = "Paris"
req.get_subject().O = "RedHat" # noqa
req.get_subject().OU = "DCI"
req.get_subject().CN = "DCI-remoteCI"
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req | [
"def",
"createCertRequest",
"(",
"pkey",
",",
"digest",
"=",
"\"sha256\"",
")",
":",
"req",
"=",
"crypto",
".",
"X509Req",
"(",
")",
"req",
".",
"get_subject",
"(",
")",
".",
"C",
"=",
"\"FR\"",
"req",
".",
"get_subject",
"(",
")",
".",
"ST",
"=",
... | 37.785714 | 13.857143 |
def filter_stacks(data, sidx, hslice):
"""
Grab a chunk of loci from the HDF5 database. Apply filters and fill the
the filters boolean array.
The design of the filtering steps intentionally sacrifices some performance
for an increase in readability, and extensibility. Calling multiple filter
functions ends up running through the sequences per stack several times,
but I felt this design made more sense, and also will easily allow us to
add more filters in the future.
"""
LOGGER.info("Entering filter_stacks")
## open h5 handles
io5 = h5py.File(data.clust_database, 'r')
co5 = h5py.File(data.database, 'r')
## get a chunk (hslice) of loci for the selected samples (sidx)
#superseqs = io5["seqs"][hslice[0]:hslice[1], sidx,]
## get an int view of the seq array
#superints = io5["seqs"][hslice[0]:hslice[1], sidx, :].view(np.int8)
## we need to use upper to skip lowercase allele storage
## this slows down the rate of loading in data by a ton.
superints = np.char.upper(io5["seqs"][hslice[0]:hslice[1], sidx,]).view(np.int8)
LOGGER.info("superints shape {}".format(superints.shape))
## fill edge filter
## get edges of superseqs and supercats, since edges need to be trimmed
## before counting hets, snps, inds. Technically, this could edge trim
## clusters to the point that they are below the minlen, and so this
## also constitutes a filter, though one that is uncommon. For this
## reason we have another filter called edgfilter.
splits = co5["edges"][hslice[0]:hslice[1], 4]
edgfilter, edgearr = get_edges(data, superints, splits)
del splits
LOGGER.info('passed edges %s', hslice[0])
## minsamp coverages filtered from superseqs
minfilter = filter_minsamp(data, superints)
LOGGER.info('passed minfilt %s', hslice[0])
## maxhets per site column from superseqs after trimming edges
hetfilter = filter_maxhet(data, superints, edgearr)
LOGGER.info('passed minhet %s', hslice[0])
## ploidy filter
pldfilter = io5["nalleles"][hslice[0]:hslice[1]].max(axis=1) > \
data.paramsdict["max_alleles_consens"]
## indel filter, needs a fresh superints b/c get_edges does (-)->(N)
indfilter = filter_indels(data, superints, edgearr)
LOGGER.info('passed minind %s', hslice[0])
## Build the .loci snpstring as an array (snps)
## shape = (chunk, 1) dtype=S1, or should it be (chunk, 2) for [-,*] ?
snpfilter, snpsarr = filter_maxsnp(data, superints, edgearr)
LOGGER.info("edg %s", edgfilter.sum())
LOGGER.info("min %s", minfilter.sum())
LOGGER.info("het %s", hetfilter.sum())
LOGGER.info("pld %s", pldfilter.sum())
LOGGER.info("snp %s", snpfilter.sum())
LOGGER.info("ind %s", indfilter.sum())
## SAVE FILTERS AND INFO TO DISK BY SLICE NUMBER (.0.tmp.h5)
chunkdir = os.path.join(data.dirs.outfiles, data.name+"_tmpchunks")
handle = os.path.join(chunkdir, "edgf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, edgfilter)
handle = os.path.join(chunkdir, "minf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, minfilter)
handle = os.path.join(chunkdir, "hetf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, hetfilter)
handle = os.path.join(chunkdir, "snpf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, snpfilter)
handle = os.path.join(chunkdir, "pldf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, pldfilter)
handle = os.path.join(chunkdir, "indf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, indfilter)
handle = os.path.join(chunkdir, "snpsarr.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, snpsarr)
handle = os.path.join(chunkdir, "edgearr.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, edgearr)
io5.close()
co5.close() | [
"def",
"filter_stacks",
"(",
"data",
",",
"sidx",
",",
"hslice",
")",
":",
"LOGGER",
".",
"info",
"(",
"\"Entering filter_stacks\"",
")",
"## open h5 handles",
"io5",
"=",
"h5py",
".",
"File",
"(",
"data",
".",
"clust_database",
",",
"'r'",
")",
"co5",
"="... | 39.643564 | 21.564356 |
def fromMimeData(self, data):
"""
Paste the clipboard data at the current cursor position.
This method also adds another undo-object to the undo-stack.
..note: This method forcefully interrupts the ``QsciInternal``
pasting mechnism by returning an empty MIME data element.
This is not an elegant implementation, but the best I
could come up with at the moment.
"""
# Only insert the element if it is available in plain text.
if data.hasText():
self.insert(data.text())
# Tell the underlying QsciScintilla object that the MIME data
# object was indeed empty.
return (QtCore.QByteArray(), False) | [
"def",
"fromMimeData",
"(",
"self",
",",
"data",
")",
":",
"# Only insert the element if it is available in plain text.",
"if",
"data",
".",
"hasText",
"(",
")",
":",
"self",
".",
"insert",
"(",
"data",
".",
"text",
"(",
")",
")",
"# Tell the underlying QsciScinti... | 37.842105 | 21.421053 |
def argv_to_cmdline(argv):
"""
Convert a list of arguments to a single command line string.
@type argv: list( str )
@param argv: List of argument strings.
The first element is the program to execute.
@rtype: str
@return: Command line string.
"""
cmdline = list()
for token in argv:
if not token:
token = '""'
else:
if '"' in token:
token = token.replace('"', '\\"')
if ' ' in token or \
'\t' in token or \
'\n' in token or \
'\r' in token:
token = '"%s"' % token
cmdline.append(token)
return ' '.join(cmdline) | [
"def",
"argv_to_cmdline",
"(",
"argv",
")",
":",
"cmdline",
"=",
"list",
"(",
")",
"for",
"token",
"in",
"argv",
":",
"if",
"not",
"token",
":",
"token",
"=",
"'\"\"'",
"else",
":",
"if",
"'\"'",
"in",
"token",
":",
"token",
"=",
"token",
".",
"rep... | 30.84 | 12.36 |
def xyplot(points, title="", c="b", corner=1, lines=False):
"""
Return a ``vtkXYPlotActor`` that is a plot of `x` versus `y`,
where `points` is a list of `(x,y)` points.
:param int corner: assign position:
- 1, topleft,
- 2, topright,
- 3, bottomleft,
- 4, bottomright.
.. hint:: Example: |fitspheres1.py|_
"""
c = vc.getColor(c) # allow different codings
array_x = vtk.vtkFloatArray()
array_y = vtk.vtkFloatArray()
array_x.SetNumberOfTuples(len(points))
array_y.SetNumberOfTuples(len(points))
for i, p in enumerate(points):
array_x.InsertValue(i, p[0])
array_y.InsertValue(i, p[1])
field = vtk.vtkFieldData()
field.AddArray(array_x)
field.AddArray(array_y)
data = vtk.vtkDataObject()
data.SetFieldData(field)
plot = vtk.vtkXYPlotActor()
plot.AddDataObjectInput(data)
plot.SetDataObjectXComponent(0, 0)
plot.SetDataObjectYComponent(0, 1)
plot.SetXValuesToValue()
plot.SetXTitle(title)
plot.SetYTitle("")
plot.ExchangeAxesOff()
plot.PlotPointsOn()
if not lines:
plot.PlotLinesOff()
plot.GetProperty().SetPointSize(5)
plot.GetProperty().SetLineWidth(2)
plot.SetNumberOfXLabels(3) # not working
plot.GetProperty().SetColor(0, 0, 0)
plot.GetProperty().SetOpacity(0.7)
plot.SetPlotColor(0, c[0], c[1], c[2])
tprop = plot.GetAxisLabelTextProperty()
tprop.SetColor(0, 0, 0)
tprop.SetOpacity(0.7)
tprop.SetFontFamily(0)
tprop.BoldOff()
tprop.ItalicOff()
tprop.ShadowOff()
tprop.SetFontSize(3) # not working
plot.SetAxisTitleTextProperty(tprop)
plot.SetAxisLabelTextProperty(tprop)
plot.SetTitleTextProperty(tprop)
if corner == 1:
plot.GetPositionCoordinate().SetValue(0.0, 0.8, 0)
if corner == 2:
plot.GetPositionCoordinate().SetValue(0.7, 0.8, 0)
if corner == 3:
plot.GetPositionCoordinate().SetValue(0.0, 0.0, 0)
if corner == 4:
plot.GetPositionCoordinate().SetValue(0.7, 0.0, 0)
plot.GetPosition2Coordinate().SetValue(0.3, 0.2, 0)
return plot | [
"def",
"xyplot",
"(",
"points",
",",
"title",
"=",
"\"\"",
",",
"c",
"=",
"\"b\"",
",",
"corner",
"=",
"1",
",",
"lines",
"=",
"False",
")",
":",
"c",
"=",
"vc",
".",
"getColor",
"(",
"c",
")",
"# allow different codings",
"array_x",
"=",
"vtk",
".... | 30.397059 | 13.367647 |
def generate_swagger_html(swagger_static_root, swagger_json_url):
"""
given a root directory for the swagger statics, and
a swagger json path, return back a swagger html designed
to use those values.
"""
tmpl = _get_template("swagger.html")
return tmpl.render(
swagger_root=swagger_static_root, swagger_json_url=swagger_json_url
) | [
"def",
"generate_swagger_html",
"(",
"swagger_static_root",
",",
"swagger_json_url",
")",
":",
"tmpl",
"=",
"_get_template",
"(",
"\"swagger.html\"",
")",
"return",
"tmpl",
".",
"render",
"(",
"swagger_root",
"=",
"swagger_static_root",
",",
"swagger_json_url",
"=",
... | 36.1 | 16.3 |
def _make_retry_fields(file_name, metadata, tags, project):
"""Generate fields to send to init_multipart_upload in the case that a Sample upload via
fastx-proxy fails.
Parameters
----------
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
Returns
-------
`dict`
Contains metadata fields that will be integrated into the Sample model created when
init_multipart_upload is called.
"""
upload_args = {"filename": file_name}
if metadata:
# format metadata keys as snake case
new_metadata = {}
for md_key, md_val in metadata.items():
new_metadata[snake_case(md_key)] = md_val
upload_args["metadata"] = new_metadata
if tags:
upload_args["tags"] = tags
if project:
upload_args["project"] = getattr(project, "id", project)
return upload_args | [
"def",
"_make_retry_fields",
"(",
"file_name",
",",
"metadata",
",",
"tags",
",",
"project",
")",
":",
"upload_args",
"=",
"{",
"\"filename\"",
":",
"file_name",
"}",
"if",
"metadata",
":",
"# format metadata keys as snake case",
"new_metadata",
"=",
"{",
"}",
"... | 28 | 22.081081 |
def cli(**args):
""" Shakedown is a DC/OS test-harness wrapper for the pytest tool.
"""
import shakedown
# Read configuration options from ~/.shakedown (if exists)
args = read_config(args)
# Set configuration defaults
args = set_config_defaults(args)
if args['quiet']:
shakedown.cli.quiet = True
if not args['dcos_url']:
try:
args['dcos_url'] = dcos_url()
except:
click.secho('error: cluster URL not set, use --dcos-url or see --help for more information.', fg='red', bold=True)
sys.exit(1)
if not args['dcos_url']:
click.secho('error: --dcos-url is a required option; see --help for more information.', fg='red', bold=True)
sys.exit(1)
if args['ssh_key_file']:
shakedown.cli.ssh_key_file = args['ssh_key_file']
if args['ssh_user']:
shakedown.cli.ssh_user = args['ssh_user']
if not args['no_banner']:
echo(banner(), n=False)
echo('Running pre-flight checks...', d='step-maj')
# required modules and their 'version' method
imported = {}
requirements = {
'pytest': '__version__',
'dcos': 'version'
}
for req in requirements:
ver = requirements[req]
echo("Checking for {} library...".format(req), d='step-min', n=False)
try:
imported[req] = importlib.import_module(req, package=None)
except ImportError:
click.secho("error: {p} is not installed; run 'pip install {p}'.".format(p=req), fg='red', bold=True)
sys.exit(1)
echo(getattr(imported[req], requirements[req]))
if shakedown.attach_cluster(args['dcos_url']):
echo('Checking DC/OS cluster version...', d='step-min', n=False)
echo(shakedown.dcos_version())
else:
with imported['dcos'].cluster.setup_directory() as temp_path:
imported['dcos'].cluster.set_attached(temp_path)
imported['dcos'].config.set_val('core.dcos_url', args['dcos_url'])
if args['ssl_no_verify']:
imported['dcos'].config.set_val('core.ssl_verify', 'False')
try:
imported['dcos'].cluster.setup_cluster_config(args['dcos_url'], temp_path, False)
except:
echo('Authenticating with DC/OS cluster...', d='step-min')
authenticated = False
token = imported['dcos'].config.get_config_val("core.dcos_acs_token")
if token is not None:
echo('trying existing ACS token...', d='step-min', n=False)
try:
shakedown.dcos_leader()
authenticated = True
echo(fchr('PP'), d='pass')
except imported['dcos'].errors.DCOSException:
echo(fchr('FF'), d='fail')
if not authenticated and args['oauth_token']:
try:
echo('trying OAuth token...', d='item-maj', n=False)
token = shakedown.authenticate_oauth(args['oauth_token'])
with stdchannel_redirected(sys.stderr, os.devnull):
imported['dcos'].config.set_val('core.dcos_acs_token', token)
authenticated = True
echo(fchr('PP'), d='pass')
except:
echo(fchr('FF'), d='fail')
if not authenticated and args['username'] and args['password']:
try:
echo('trying username and password...', d='item-maj', n=False)
token = shakedown.authenticate(args['username'], args['password'])
with stdchannel_redirected(sys.stderr, os.devnull):
imported['dcos'].config.set_val('core.dcos_acs_token', token)
authenticated = True
echo(fchr('PP'), d='pass')
except:
echo(fchr('FF'), d='fail')
if authenticated:
imported['dcos'].cluster.setup_cluster_config(args['dcos_url'], temp_path, False)
echo('Checking DC/OS cluster version...', d='step-min', n=False)
echo(shakedown.dcos_version())
else:
click.secho("error: no authentication credentials or token found.", fg='red', bold=True)
sys.exit(1)
class shakedown:
""" This encapsulates a PyTest wrapper plugin
"""
state = {}
stdout = []
tests = {
'file': {},
'test': {}
}
report_stats = {
'passed':[],
'skipped':[],
'failed':[],
'total_passed':0,
'total_skipped':0,
'total_failed':0,
}
def output(title, state, text, status=True):
""" Capture and display stdout/stderr output
:param title: the title of the output box (eg. test name)
:type title: str
:param state: state of the result (pass, fail)
:type state: str
:param text: the stdout/stderr output
:type text: str
:param status: whether to output a status marker
:type status: bool
"""
if state == 'fail':
schr = fchr('FF')
elif state == 'pass':
schr = fchr('PP')
elif state == 'skip':
schr = fchr('SK')
else:
schr = ''
if status:
if not args['stdout_inline']:
if state == 'fail':
echo(schr, d='fail')
elif state == 'pass':
echo(schr, d='pass')
else:
if not text:
if state == 'fail':
echo(schr, d='fail')
elif state == 'pass':
if '::' in title:
echo(title.split('::')[-1], d='item-min', n=False)
echo(schr, d='pass')
if text and args['stdout'] in [state, 'all']:
o = decorate(schr + ': ', 'quote-head-' + state)
o += click.style(decorate(title, style=state), bold=True) + "\n"
o += decorate(str(text).strip(), style='quote-' + state)
if args['stdout_inline']:
echo(o)
else:
shakedown.stdout.append(o)
def pytest_collectreport(self, report):
""" Collect and validate individual test files
"""
if not 'collect' in shakedown.state:
shakedown.state['collect'] = 1
echo('Collecting and validating test files...', d='step-min')
if report.nodeid:
echo(report.nodeid, d='item-maj', n=False)
state = None
if report.failed:
state = 'fail'
if report.passed:
state = 'pass'
if report.skipped:
state = 'skip'
if state:
if report.longrepr:
shakedown.output(report.nodeid, state, report.longrepr)
else:
shakedown.output(report.nodeid, state, None)
def pytest_sessionstart(self):
""" Tests have been collected, begin running them...
"""
echo('Initiating testing phase...', d='step-maj')
def pytest_report_teststatus(self, report):
""" Print report results to the console as they are run
"""
try:
report_file, report_test = report.nodeid.split('::', 1)
except ValueError:
return
if not 'test' in shakedown.state:
shakedown.state['test'] = 1
echo('Running individual tests...', d='step-min')
if not report_file in shakedown.tests['file']:
shakedown.tests['file'][report_file] = 1
echo(report_file, d='item-maj')
if not report.nodeid in shakedown.tests['test']:
shakedown.tests['test'][report.nodeid] = {}
if args['stdout_inline']:
echo('')
echo(report_test + ':', d='item-min')
else:
echo(report_test, d='item-min', n=False)
if report.failed:
shakedown.tests['test'][report.nodeid]['fail'] = True
if report.when == 'teardown' and not 'tested' in shakedown.tests['test'][report.nodeid]:
shakedown.output(report.nodeid, 'pass', None)
# Suppress excess terminal output
return report.outcome, None, None
def pytest_runtest_logreport(self, report):
""" Log the [stdout, stderr] results of tests if desired
"""
state = None
for secname, content in report.sections:
if report.failed:
state = 'fail'
if report.passed:
state = 'pass'
if report.skipped:
state = 'skip'
if state and secname != 'Captured stdout call':
module = report.nodeid.split('::', 1)[0]
cap_type = secname.split(' ')[-1]
if not 'setup' in shakedown.tests['test'][report.nodeid]:
shakedown.tests['test'][report.nodeid]['setup'] = True
shakedown.output(module + ' ' + cap_type, state, content, False)
elif cap_type == 'teardown':
shakedown.output(module + ' ' + cap_type, state, content, False)
elif state and report.when == 'call':
if 'tested' in shakedown.tests['test'][report.nodeid]:
shakedown.output(report.nodeid, state, content, False)
else:
shakedown.tests['test'][report.nodeid]['tested'] = True
shakedown.output(report.nodeid, state, content)
# Capture execution crashes
if hasattr(report.longrepr, 'reprcrash'):
longreport = report.longrepr
if 'tested' in shakedown.tests['test'][report.nodeid]:
shakedown.output(report.nodeid, 'fail', 'error: ' + str(longreport.reprcrash), False)
else:
shakedown.tests['test'][report.nodeid]['tested'] = True
shakedown.output(report.nodeid, 'fail', 'error: ' + str(longreport.reprcrash))
def pytest_sessionfinish(self, session, exitstatus):
""" Testing phase is complete; print extra reports (stdout/stderr, JSON) as requested
"""
echo('Test phase completed.', d='step-maj')
if ('stdout' in args and args['stdout']) and shakedown.stdout:
for output in shakedown.stdout:
echo(output)
opts = ['-q', '--tb=no', "--timeout={}".format(args['timeout'])]
if args['fail'] == 'fast':
opts.append('-x')
if args['pytest_option']:
for opt in args['pytest_option']:
opts.append(opt)
if args['stdout_inline']:
opts.append('-s')
if args['tests']:
tests_to_run = []
for test in args['tests']:
tests_to_run.extend(test.split())
for test in tests_to_run:
opts.append(test)
exitstatus = imported['pytest'].main(opts, plugins=[shakedown()])
sys.exit(exitstatus) | [
"def",
"cli",
"(",
"*",
"*",
"args",
")",
":",
"import",
"shakedown",
"# Read configuration options from ~/.shakedown (if exists)",
"args",
"=",
"read_config",
"(",
"args",
")",
"# Set configuration defaults",
"args",
"=",
"set_config_defaults",
"(",
"args",
")",
"if"... | 35.659509 | 22.490798 |
def run(self, *args, **kwargs):
"""Run the command; args/kwargs are added or replace the ones given to the constructor."""
_args, _kwargs = self._combine_arglist(args, kwargs)
results, p = self._run_command(*_args, **_kwargs)
return results | [
"def",
"run",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_args",
",",
"_kwargs",
"=",
"self",
".",
"_combine_arglist",
"(",
"args",
",",
"kwargs",
")",
"results",
",",
"p",
"=",
"self",
".",
"_run_command",
"(",
"*",
"_args"... | 53.6 | 12.8 |
def inrypl(vertex, direct, plane):
"""
Find the intersection of a ray and a plane.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inrypl_c.html
:param vertex: Vertex vector of ray.
:type vertex: 3-Element Array of floats
:param direct: Direction vector of ray.
:type direct: 3-Element Array of floats
:param plane: A SPICE plane.
:type plane: spiceypy.utils.support_types.Plane
:return:
Number of intersection points of ray and plane,
Intersection point,
if nxpts == 1.
:rtype: tuple
"""
assert (isinstance(plane, stypes.Plane))
vertex = stypes.toDoubleVector(vertex)
direct = stypes.toDoubleVector(direct)
nxpts = ctypes.c_int()
xpt = stypes.emptyDoubleVector(3)
libspice.inrypl_c(vertex, direct, ctypes.byref(plane), ctypes.byref(nxpts),
xpt)
return nxpts.value, stypes.cVectorToPython(xpt) | [
"def",
"inrypl",
"(",
"vertex",
",",
"direct",
",",
"plane",
")",
":",
"assert",
"(",
"isinstance",
"(",
"plane",
",",
"stypes",
".",
"Plane",
")",
")",
"vertex",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"vertex",
")",
"direct",
"=",
"stypes",
".",
... | 35.076923 | 12.923077 |
def listdir(self, directory_path=None, hidden_files=False):
"""
Return a list of files and directories in a given directory.
:param directory_path: Optional str (defaults to current directory)
:param hidden_files: Include hidden files
:return: Directory listing
"""
# Change current directory if a directory path is specified, otherwise use current
if directory_path:
self.chdir(directory_path)
# Exclude hidden files
if not hidden_files:
return [path for path in self.session.nlst() if not path.startswith('.')]
# Include hidden files
else:
return self.session.nlst() | [
"def",
"listdir",
"(",
"self",
",",
"directory_path",
"=",
"None",
",",
"hidden_files",
"=",
"False",
")",
":",
"# Change current directory if a directory path is specified, otherwise use current",
"if",
"directory_path",
":",
"self",
".",
"chdir",
"(",
"directory_path",
... | 36.052632 | 20.473684 |
def contains_non_repeat_actions(self):
'''
Because repeating repeat actions can get ugly real fast
'''
for action in self.actions:
if not isinstance(action, (int, dynamic.RepeatCommand)):
return True
return False | [
"def",
"contains_non_repeat_actions",
"(",
"self",
")",
":",
"for",
"action",
"in",
"self",
".",
"actions",
":",
"if",
"not",
"isinstance",
"(",
"action",
",",
"(",
"int",
",",
"dynamic",
".",
"RepeatCommand",
")",
")",
":",
"return",
"True",
"return",
"... | 34.125 | 18.625 |
def get_3d_markers_no_label(
self, component_info=None, data=None, component_position=None
):
"""Get 3D markers without label."""
return self._get_3d_markers(
RT3DMarkerPositionNoLabel, component_info, data, component_position
) | [
"def",
"get_3d_markers_no_label",
"(",
"self",
",",
"component_info",
"=",
"None",
",",
"data",
"=",
"None",
",",
"component_position",
"=",
"None",
")",
":",
"return",
"self",
".",
"_get_3d_markers",
"(",
"RT3DMarkerPositionNoLabel",
",",
"component_info",
",",
... | 38.571429 | 21.285714 |
def create_timestamp_anti_leech_url(host, file_name, query_string, encrypt_key, deadline):
"""
创建时间戳防盗链
Args:
host: 带访问协议的域名
file_name: 原始文件名,不需要urlencode
query_string: 查询参数,不需要urlencode
encrypt_key: 时间戳防盗链密钥
deadline: 链接有效期时间戳(以秒为单位)
Returns:
带时间戳防盗链鉴权访问链接
"""
if query_string:
url_to_sign = '{0}/{1}?{2}'.format(host, urlencode(file_name), query_string)
else:
url_to_sign = '{0}/{1}'.format(host, urlencode(file_name))
path = '/{0}'.format(urlencode(file_name))
expire_hex = str(hex(deadline))[2:]
str_to_sign = '{0}{1}{2}'.format(encrypt_key, path, expire_hex).encode()
sign_str = hashlib.md5(str_to_sign).hexdigest()
if query_string:
signed_url = '{0}&sign={1}&t={2}'.format(url_to_sign, sign_str, expire_hex)
else:
signed_url = '{0}?sign={1}&t={2}'.format(url_to_sign, sign_str, expire_hex)
return signed_url | [
"def",
"create_timestamp_anti_leech_url",
"(",
"host",
",",
"file_name",
",",
"query_string",
",",
"encrypt_key",
",",
"deadline",
")",
":",
"if",
"query_string",
":",
"url_to_sign",
"=",
"'{0}/{1}?{2}'",
".",
"format",
"(",
"host",
",",
"urlencode",
"(",
"file_... | 32.2 | 23.6 |
def p_expr_BOR_expr(p):
""" expr : expr BOR expr
"""
p[0] = make_binary(p.lineno(2), 'BOR', p[1], p[3], lambda x, y: x | y) | [
"def",
"p_expr_BOR_expr",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"make_binary",
"(",
"p",
".",
"lineno",
"(",
"2",
")",
",",
"'BOR'",
",",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
",",
"lambda",
"x",
",",
"y",
":",
"x",
"|",
"y"... | 33 | 12.75 |
def colindex_by_colname(self, colname):
"""Return column index whose name is :param:`column`
:raises: `ValueError` when no column with :param:`colname` found
"""
for i, coldef in enumerate(self): # iterate each column's definition
if coldef.name == colname:
return i
raise ValueError('No column named "%s" found' % (colname)) | [
"def",
"colindex_by_colname",
"(",
"self",
",",
"colname",
")",
":",
"for",
"i",
",",
"coldef",
"in",
"enumerate",
"(",
"self",
")",
":",
"# iterate each column's definition",
"if",
"coldef",
".",
"name",
"==",
"colname",
":",
"return",
"i",
"raise",
"ValueE... | 43.222222 | 17.333333 |
def dependents(self):
"""
:API: public
:return: targets that depend on this target
:rtype: list of Target
"""
return [self._build_graph.get_target(dep_address)
for dep_address in self._build_graph.dependents_of(self.address)] | [
"def",
"dependents",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"_build_graph",
".",
"get_target",
"(",
"dep_address",
")",
"for",
"dep_address",
"in",
"self",
".",
"_build_graph",
".",
"dependents_of",
"(",
"self",
".",
"address",
")",
"]"
] | 28.222222 | 17.111111 |
def GetExtractionStatusUpdateCallback(self):
"""Retrieves the extraction status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintExtractionStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintExtractionStatusUpdateWindow
return None | [
"def",
"GetExtractionStatusUpdateCallback",
"(",
"self",
")",
":",
"if",
"self",
".",
"_mode",
"==",
"self",
".",
"MODE_LINEAR",
":",
"return",
"self",
".",
"_PrintExtractionStatusUpdateLinear",
"if",
"self",
".",
"_mode",
"==",
"self",
".",
"MODE_WINDOW",
":",
... | 30.384615 | 18.307692 |
def trade(self, pair, type_, rate, amount):
"""
The basic method that can be used for creating orders and trading on the exchange.
To use this method you need an API key privilege to trade.
You can only create limit orders using this method, but you can emulate market orders using rate parameters.
E.g. using rate=0.1 you can sell at the best market price.
Each pair has a different limit on the minimum / maximum amounts, the minimum amount and the number of digits
after the decimal point. All limitations can be obtained using the info method in PublicAPI v3.
:param str pair: pair (ex. 'btc_usd')
:param str type_: order type ('buy' or 'sell')
:param float rate: the rate at which you need to buy/sell
:param float amount: the amount you need to buy/sell
"""
return self._trade_api_call('Trade', pair=pair, type_=type_, rate=rate, amount=amount) | [
"def",
"trade",
"(",
"self",
",",
"pair",
",",
"type_",
",",
"rate",
",",
"amount",
")",
":",
"return",
"self",
".",
"_trade_api_call",
"(",
"'Trade'",
",",
"pair",
"=",
"pair",
",",
"type_",
"=",
"type_",
",",
"rate",
"=",
"rate",
",",
"amount",
"... | 62.733333 | 31.933333 |
def set_bucket_notification(self, bucket_name, notifications):
"""
Set the given notifications on the bucket.
:param bucket_name: Bucket name.
:param notifications: Notifications structure
"""
is_valid_bucket_name(bucket_name)
is_valid_bucket_notification_config(notifications)
content = xml_marshal_bucket_notifications(notifications)
headers = {
'Content-Length': str(len(content)),
'Content-Md5': get_md5_base64digest(content)
}
content_sha256_hex = get_sha256_hexdigest(content)
self._url_open(
'PUT',
bucket_name=bucket_name,
query={"notification": ""},
headers=headers,
body=content,
content_sha256=content_sha256_hex
) | [
"def",
"set_bucket_notification",
"(",
"self",
",",
"bucket_name",
",",
"notifications",
")",
":",
"is_valid_bucket_name",
"(",
"bucket_name",
")",
"is_valid_bucket_notification_config",
"(",
"notifications",
")",
"content",
"=",
"xml_marshal_bucket_notifications",
"(",
"... | 33.5 | 15.416667 |
def resolve_dependencies(self, to_build, depender):
"""Add any required dependencies.
"""
shutit_global.shutit_global_object.yield_to_draw()
self.log('In resolve_dependencies',level=logging.DEBUG)
cfg = self.cfg
for dependee_id in depender.depends_on:
dependee = self.shutit_map.get(dependee_id)
# Don't care if module doesn't exist, we check this later
if (dependee and dependee not in to_build
and cfg[dependee_id]['shutit.core.module.build_ifneeded']):
to_build.append(dependee)
cfg[dependee_id]['shutit.core.module.build'] = True
return True | [
"def",
"resolve_dependencies",
"(",
"self",
",",
"to_build",
",",
"depender",
")",
":",
"shutit_global",
".",
"shutit_global_object",
".",
"yield_to_draw",
"(",
")",
"self",
".",
"log",
"(",
"'In resolve_dependencies'",
",",
"level",
"=",
"logging",
".",
"DEBUG"... | 40.714286 | 12.428571 |
def _expand_paths_itr(paths, marker='*'):
"""Iterator version of :func:`expand_paths`.
"""
for path in paths:
if is_path(path):
if marker in path: # glob path pattern
for ppath in sglob(path):
yield ppath
else:
yield path # a simple file path
elif is_path_obj(path):
if marker in path.as_posix():
for ppath in sglob(path.as_posix()):
yield normpath(ppath)
else:
yield normpath(path.as_posix())
elif is_ioinfo(path):
yield path.path
else: # A file or file-like object
yield path | [
"def",
"_expand_paths_itr",
"(",
"paths",
",",
"marker",
"=",
"'*'",
")",
":",
"for",
"path",
"in",
"paths",
":",
"if",
"is_path",
"(",
"path",
")",
":",
"if",
"marker",
"in",
"path",
":",
"# glob path pattern",
"for",
"ppath",
"in",
"sglob",
"(",
"pat... | 34.05 | 9.2 |
def get_topics(self):
'''
Returns the topics available on unbabel
'''
result = self.api_call('topic/')
topics_json = json.loads(result.content)
topics = [Topic(name=topic_json["topic"]["name"])
for topic_json in topics_json["objects"]]
return topics | [
"def",
"get_topics",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"api_call",
"(",
"'topic/'",
")",
"topics_json",
"=",
"json",
".",
"loads",
"(",
"result",
".",
"content",
")",
"topics",
"=",
"[",
"Topic",
"(",
"name",
"=",
"topic_json",
"[",
... | 35.444444 | 16.777778 |
def body(self):
"""Yields the body of the buffered file."""
for fp, need_close in self.files:
try:
name = os.path.basename(fp.name)
except AttributeError:
name = ''
for chunk in self.gen_chunks(self.envelope.file_open(name)):
yield chunk
for chunk in self.file_chunks(fp):
yield chunk
for chunk in self.gen_chunks(self.envelope.file_close()):
yield chunk
if need_close:
fp.close()
for chunk in self.close():
yield chunk | [
"def",
"body",
"(",
"self",
")",
":",
"for",
"fp",
",",
"need_close",
"in",
"self",
".",
"files",
":",
"try",
":",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fp",
".",
"name",
")",
"except",
"AttributeError",
":",
"name",
"=",
"''",
"... | 35.705882 | 13.882353 |
def validation_step(self, Xi, yi, **fit_params):
"""Perform a forward step using batched data and return the
resulting loss.
The module is set to be in evaluation mode (e.g. dropout is
not applied).
Parameters
----------
Xi : input data
A batch of the input data.
yi : target data
A batch of the target data.
**fit_params : dict
Additional parameters passed to the ``forward`` method of
the module and to the ``self.train_split`` call.
"""
self.module_.eval()
with torch.no_grad():
y_pred = self.infer(Xi, **fit_params)
loss = self.get_loss(y_pred, yi, X=Xi, training=False)
return {
'loss': loss,
'y_pred': y_pred,
} | [
"def",
"validation_step",
"(",
"self",
",",
"Xi",
",",
"yi",
",",
"*",
"*",
"fit_params",
")",
":",
"self",
".",
"module_",
".",
"eval",
"(",
")",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"y_pred",
"=",
"self",
".",
"infer",
"(",
"Xi",
","... | 28.535714 | 19.607143 |
def register_logger(self, logger):
"""
Register a new logger.
"""
handler = CommandHandler(self)
handler.setFormatter(CommandFormatter())
logger.handlers = [handler]
logger.propagate = False
output = self.output
level = logging.WARNING
if output.is_debug():
level = logging.DEBUG
elif output.is_very_verbose() or output.is_verbose():
level = logging.INFO
logger.setLevel(level) | [
"def",
"register_logger",
"(",
"self",
",",
"logger",
")",
":",
"handler",
"=",
"CommandHandler",
"(",
"self",
")",
"handler",
".",
"setFormatter",
"(",
"CommandFormatter",
"(",
")",
")",
"logger",
".",
"handlers",
"=",
"[",
"handler",
"]",
"logger",
".",
... | 28.411765 | 11.588235 |
def analyze(qpi, r0, method="edge", model="projection", edgekw={}, imagekw={},
ret_center=False, ret_pha_offset=False, ret_qpi=False):
"""Determine refractive index and radius of a spherical object
Parameters
----------
qpi: qpimage.QPImage
Quantitative phase image data
r0: float
Approximate radius of the sphere [m]
method: str
The method used to determine the refractive index
can either be "edge" (determine the radius from the
edge detected in the phase image) or "image" (perform
a 2D phase image fit).
model: str
The light-scattering model used by `method`. If
`method` is "edge", only "projection" is allowed.
If `method` is "image", `model` can be one of
"mie", "projection", "rytov", or "rytov-sc".
edgekw: dict
Keyword arguments for tuning the edge detection algorithm,
see :func:`qpsphere.edgefit.contour_canny`.
imagekw: dict
Keyword arguments for tuning the image fitting algorithm,
see :func:`qpsphere.imagefit.alg.match_phase`
ret_center: bool
If True, return the center coordinate of the sphere.
ret_pha_offset: bool
If True, return the phase image background offset.
ret_qpi: bool
If True, return the modeled data as a :class:`qpimage.QPImage`.
Returns
-------
n: float
Computed refractive index
r: float
Computed radius [m]
c: tuple of floats
Only returned if `ret_center` is True;
Center position of the sphere [px]
pha_offset: float
Only returned if `ret_pha_offset` is True;
Phase image background offset
qpi_sim: qpimage.QPImage
Only returned if `ret_qpi` is True;
Modeled data
Notes
-----
If `method` is "image", then the "edge" method is used
as a first step to estimate initial parameters for radius,
refractive index, and position of the sphere using `edgekw`.
If this behavior is not desired, please make use of the
method :func:`qpsphere.imagefit.analyze`.
"""
if method == "edge":
if model != "projection":
raise ValueError("`method='edge'` requires `model='projection'`!")
n, r, c = edgefit.analyze(qpi=qpi,
r0=r0,
edgekw=edgekw,
ret_center=True,
ret_edge=False,
)
res = [n, r]
if ret_center:
res.append(c)
if ret_pha_offset:
res.append(0)
if ret_qpi:
qpi_sim = simulate(radius=r,
sphere_index=n,
medium_index=qpi["medium index"],
wavelength=qpi["wavelength"],
grid_size=qpi.shape,
model="projection",
pixel_size=qpi["pixel size"],
center=c)
res.append(qpi_sim)
elif method == "image":
n0, r0, c0 = edgefit.analyze(qpi=qpi,
r0=r0,
edgekw=edgekw,
ret_center=True,
ret_edge=False,
)
res = imagefit.analyze(qpi=qpi,
model=model,
n0=n0,
r0=r0,
c0=c0,
imagekw=imagekw,
ret_center=ret_center,
ret_pha_offset=ret_pha_offset,
ret_qpi=ret_qpi
)
else:
raise NotImplementedError("`method` must be 'edge' or 'image'!")
return res | [
"def",
"analyze",
"(",
"qpi",
",",
"r0",
",",
"method",
"=",
"\"edge\"",
",",
"model",
"=",
"\"projection\"",
",",
"edgekw",
"=",
"{",
"}",
",",
"imagekw",
"=",
"{",
"}",
",",
"ret_center",
"=",
"False",
",",
"ret_pha_offset",
"=",
"False",
",",
"ret... | 37.941176 | 16.107843 |
def read_matlab_features(array_paths, number_of_nodes, dimensionality):
"""
Returns a sparse feature matrix as calculated by a Matlab routine.
"""
# Read the data array
file_row_gen = get_file_row_generator(array_paths[0], "\t")
data = list()
append_data = data.append
for file_row in file_row_gen:
append_data(float(file_row[0]))
# Read the row array
file_row_gen = get_file_row_generator(array_paths[1], "\t")
row = list()
append_row = row.append
for file_row in file_row_gen:
append_row(int(float(file_row[0])))
# Read the data array
file_row_gen = get_file_row_generator(array_paths[2], "\t")
col = list()
append_col = col.append
for file_row in file_row_gen:
append_col(int(float(file_row[0])))
data = np.array(data).astype(np.float64)
row = np.array(row).astype(np.int64) - 1 # Due to Matlab numbering
col = np.array(col).astype(np.int64) - 1 # Due to Matlab numbering
print(np.max(row), np.min(row))
print(np.max(col), np.min(col))
# centroids_new = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes + 1, k))
features = spsp.coo_matrix((data, (row, col)), shape=(number_of_nodes, dimensionality))
return features | [
"def",
"read_matlab_features",
"(",
"array_paths",
",",
"number_of_nodes",
",",
"dimensionality",
")",
":",
"# Read the data array",
"file_row_gen",
"=",
"get_file_row_generator",
"(",
"array_paths",
"[",
"0",
"]",
",",
"\"\\t\"",
")",
"data",
"=",
"list",
"(",
")... | 34.194444 | 20.861111 |
def send(self, sender: PytgbotApiBot):
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
"""
return sender.send_sticker(
# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id
sticker=self.sticker, chat_id=self.receiver, reply_to_message_id=self.reply_id, disable_notification=self.disable_notification, reply_markup=self.reply_markup
) | [
"def",
"send",
"(",
"self",
",",
"sender",
":",
"PytgbotApiBot",
")",
":",
"return",
"sender",
".",
"send_sticker",
"(",
"# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id",
"sticker",
"=",
"self",
".",
"sticker",
",",
"c... | 42.076923 | 26.538462 |
def vgp_calc(dataframe, tilt_correction='yes', site_lon='site_lon', site_lat='site_lat', dec_is='dec_is', inc_is='inc_is', dec_tc='dec_tc', inc_tc='inc_tc'):
"""
This function calculates paleomagnetic poles using directional data and site
location data within a pandas.DataFrame. The function adds the columns
'paleolatitude', 'vgp_lat', 'vgp_lon', 'vgp_lat_rev', and 'vgp_lon_rev'
to the dataframe. The '_rev' columns allow for subsequent choice as to which
polarity will be used for the VGPs.
Parameters
-----------
dataframe : the name of the pandas.DataFrame containing the data
tilt-correction : 'yes' is the default and uses tilt-corrected data (dec_tc, inc_tc), 'no' uses data that is not tilt-corrected and is in geographic coordinates
dataframe['site_lat'] : the name of the Dataframe column containing the latitude of the site
dataframe['site_lon'] : the name of the Dataframe column containing the longitude of the site
dataframe['inc_tc'] : the name of the Dataframe column containing the tilt-corrected inclination (used by default tilt-correction='yes')
dataframe['dec_tc'] : the name of the Dataframe column containing the tilt-corrected declination (used by default tilt-correction='yes')
dataframe['inc_is'] : the name of the Dataframe column containing the insitu inclination (used when tilt-correction='no')
dataframe['dec_is'] : the name of the Dataframe column containing the insitu declination (used when tilt-correction='no')
Returns
-------
dataframe['paleolatitude']
dataframe['colatitude']
dataframe['vgp_lat']
dataframe['vgp_lon']
dataframe['vgp_lat_rev']
dataframe['vgp_lon_rev']
"""
dataframe.is_copy = False
if tilt_correction == 'yes':
# calculate the paleolatitude/colatitude
dataframe['paleolatitude'] = np.degrees(
np.arctan(0.5 * np.tan(np.radians(dataframe[inc_tc]))))
dataframe['colatitude'] = 90 - dataframe['paleolatitude']
# calculate the latitude of the pole
dataframe['vgp_lat'] = np.degrees(np.arcsin(np.sin(np.radians(dataframe[site_lat])) *
np.cos(np.radians(dataframe['colatitude'])) +
np.cos(np.radians(dataframe[site_lat])) *
np.sin(np.radians(dataframe['colatitude'])) *
np.cos(np.radians(dataframe[dec_tc]))))
# calculate the longitudinal difference between the pole and the site
# (beta)
dataframe['beta'] = np.degrees(np.arcsin(old_div((np.sin(np.radians(dataframe['colatitude'])) *
np.sin(np.radians(dataframe[dec_tc]))),
(np.cos(np.radians(dataframe['vgp_lat']))))))
# generate a boolean array (mask) to use to distinguish between the two possibilities for pole longitude
# and then calculate pole longitude using the site location and
# calculated beta
mask = np.cos(np.radians(dataframe['colatitude'])) > np.sin(
np.radians(dataframe[site_lat])) * np.sin(np.radians(dataframe['vgp_lat']))
dataframe['vgp_lon'] = np.where(mask, (dataframe[site_lon] + dataframe['beta']) %
360., (dataframe[site_lon] + 180 - dataframe['beta']) % 360.)
# calculate the antipode of the poles
dataframe['vgp_lat_rev'] = -dataframe['vgp_lat']
dataframe['vgp_lon_rev'] = (dataframe['vgp_lon'] - 180.) % 360.
# the 'colatitude' and 'beta' columns were created for the purposes of the pole calculations
# but aren't of further use and are deleted
del dataframe['colatitude']
del dataframe['beta']
if tilt_correction == 'no':
# calculate the paleolatitude/colatitude
dataframe['paleolatitude'] = np.degrees(
np.arctan(0.5 * np.tan(np.radians(dataframe[inc_is]))))
dataframe['colatitude'] = 90 - dataframe['paleolatitude']
# calculate the latitude of the pole
dataframe['vgp_lat'] = np.degrees(np.arcsin(np.sin(np.radians(dataframe[site_lat])) *
np.cos(np.radians(dataframe['colatitude'])) +
np.cos(np.radians(dataframe[site_lat])) *
np.sin(np.radians(dataframe['colatitude'])) *
np.cos(np.radians(dataframe[dec_is]))))
# calculate the longitudinal difference between the pole and the site
# (beta)
dataframe['beta'] = np.degrees(np.arcsin(old_div((np.sin(np.radians(dataframe['colatitude'])) *
np.sin(np.radians(dataframe[dec_is]))),
(np.cos(np.radians(dataframe['vgp_lat']))))))
# generate a boolean array (mask) to use to distinguish between the two possibilities for pole longitude
# and then calculate pole longitude using the site location and
# calculated beta
mask = np.cos(np.radians(dataframe['colatitude'])) > np.sin(
np.radians(dataframe[site_lat])) * np.sin(np.radians(dataframe['vgp_lat']))
dataframe['vgp_lon'] = np.where(mask, (dataframe[site_lon] + dataframe['beta']) %
360., (dataframe[site_lon] + 180 - dataframe['beta']) % 360.)
# calculate the antipode of the poles
dataframe['vgp_lat_rev'] = -dataframe['vgp_lat']
dataframe['vgp_lon_rev'] = (dataframe['vgp_lon'] - 180.) % 360.
# the 'colatitude' and 'beta' columns were created for the purposes of the pole calculations
# but aren't of further use and are deleted
del dataframe['colatitude']
del dataframe['beta']
return(dataframe) | [
"def",
"vgp_calc",
"(",
"dataframe",
",",
"tilt_correction",
"=",
"'yes'",
",",
"site_lon",
"=",
"'site_lon'",
",",
"site_lat",
"=",
"'site_lat'",
",",
"dec_is",
"=",
"'dec_is'",
",",
"inc_is",
"=",
"'inc_is'",
",",
"dec_tc",
"=",
"'dec_tc'",
",",
"inc_tc",
... | 66.166667 | 36.366667 |
def diff(config, files, metrics, changes_only=True, detail=True):
"""
Show the differences in metrics for each of the files.
:param config: The wily configuration
:type config: :namedtuple:`wily.config.WilyConfig`
:param files: The files to compare.
:type files: ``list`` of ``str``
:param metrics: The metrics to measure.
:type metrics: ``list`` of ``str``
:param changes_only: Only include changes files in output.
:type changes_only: ``bool``
:param detail: Show details (function-level)
:type detail: ``bool``
"""
config.targets = files
files = list(files)
state = State(config)
last_revision = state.index[state.default_archiver].revisions[0]
# Convert the list of metrics to a list of metric instances
operators = {resolve_operator(metric.split(".")[0]) for metric in metrics}
metrics = [(metric.split(".")[0], resolve_metric(metric)) for metric in metrics]
data = {}
results = []
# Build a set of operators
_operators = [operator.cls(config) for operator in operators]
cwd = os.getcwd()
os.chdir(config.path)
for operator in _operators:
logger.debug(f"Running {operator.name} operator")
data[operator.name] = operator.run(None, config)
os.chdir(cwd)
# Write a summary table..
extra = []
for operator, metric in metrics:
if detail and resolve_operator(operator).level == OperatorLevel.Object:
for file in files:
try:
extra.extend(
[
f"{file}:{k}"
for k in data[operator][file].keys()
if k != metric.name
and isinstance(data[operator][file][k], dict)
]
)
except KeyError:
logger.debug(f"File {file} not in cache")
logger.debug("Cache follows -- ")
logger.debug(data[operator])
files.extend(extra)
logger.debug(files)
for file in files:
metrics_data = []
has_changes = False
for operator, metric in metrics:
try:
current = last_revision.get(
config, state.default_archiver, operator, file, metric.name
)
except KeyError as e:
current = "-"
try:
new = get_metric(data, operator, file, metric.name)
except KeyError as e:
new = "-"
if new != current:
has_changes = True
if metric.type in (int, float) and new != "-" and current != "-":
if current > new:
metrics_data.append(
"{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format(
current, new, BAD_COLORS[metric.measure]
)
)
elif current < new:
metrics_data.append(
"{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format(
current, new, GOOD_COLORS[metric.measure]
)
)
else:
metrics_data.append("{0:n} -> {1:n}".format(current, new))
else:
if current == "-" and new == "-":
metrics_data.append("-")
else:
metrics_data.append("{0} -> {1}".format(current, new))
if has_changes or not changes_only:
results.append((file, *metrics_data))
else:
logger.debug(metrics_data)
descriptions = [metric.description for operator, metric in metrics]
headers = ("File", *descriptions)
if len(results) > 0:
print(
# But it still makes more sense to show the newest at the top, so reverse again
tabulate.tabulate(
headers=headers, tabular_data=results, tablefmt=DEFAULT_GRID_STYLE
)
) | [
"def",
"diff",
"(",
"config",
",",
"files",
",",
"metrics",
",",
"changes_only",
"=",
"True",
",",
"detail",
"=",
"True",
")",
":",
"config",
".",
"targets",
"=",
"files",
"files",
"=",
"list",
"(",
"files",
")",
"state",
"=",
"State",
"(",
"config",... | 36.190909 | 19.027273 |
def _decode_value(self, value):
""" Decodes the value by turning any binary data back into Python objects.
The method searches for ObjectId values, loads the associated binary data from
GridFS and returns the decoded Python object.
Args:
value (object): The value that should be decoded.
Raises:
DataStoreDecodingError: An ObjectId was found but the id is not a valid
GridFS id.
DataStoreDecodeUnknownType: The type of the specified value is unknown.
Returns:
object: The decoded value as a valid Python object.
"""
if isinstance(value, (int, float, str, bool, datetime)):
return value
elif isinstance(value, list):
return [self._decode_value(item) for item in value]
elif isinstance(value, dict):
result = {}
for key, item in value.items():
result[key] = self._decode_value(item)
return result
elif isinstance(value, ObjectId):
if self._gridfs.exists({"_id": value}):
return pickle.loads(self._gridfs.get(value).read())
else:
raise DataStoreGridfsIdInvalid()
else:
raise DataStoreDecodeUnknownType() | [
"def",
"_decode_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"int",
",",
"float",
",",
"str",
",",
"bool",
",",
"datetime",
")",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"list",... | 38.666667 | 20.272727 |
def _useful_basename(data):
"""Provide a useful file basename for outputs, referencing batch/sample and caller.
"""
names = dd.get_batches(data)
if not names:
names = [dd.get_sample_name(data)]
batch_name = names[0]
return "%s-%s" % (batch_name, data["sv"]["variantcaller"]) | [
"def",
"_useful_basename",
"(",
"data",
")",
":",
"names",
"=",
"dd",
".",
"get_batches",
"(",
"data",
")",
"if",
"not",
"names",
":",
"names",
"=",
"[",
"dd",
".",
"get_sample_name",
"(",
"data",
")",
"]",
"batch_name",
"=",
"names",
"[",
"0",
"]",
... | 37.375 | 10.375 |
def collect_variables(self, g_scope='gen', d_scope='discrim'):
"""
Assign `self.g_vars` to the parameters under scope `g_scope`,
and same with `self.d_vars`.
"""
self.g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, g_scope)
assert self.g_vars
self.d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, d_scope)
assert self.d_vars | [
"def",
"collect_variables",
"(",
"self",
",",
"g_scope",
"=",
"'gen'",
",",
"d_scope",
"=",
"'discrim'",
")",
":",
"self",
".",
"g_vars",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"TRAINABLE_VARIABLES",
",",
"g_scope",
")",
"asser... | 45 | 18.555556 |
def get_host_node_state(self, state, problem_has_been_acknowledged, in_scheduled_downtime):
"""Get host node state, simplest case ::
* Handle not value (revert) for host and consider 1 as 2
:return: 0, 1 or 2
:rtype: int
"""
# Make DOWN look as CRITICAL (2 instead of 1)
if state == 1:
state = 2
# If our node is acknowledged or in downtime, state is ok/up
if problem_has_been_acknowledged or in_scheduled_downtime:
state = 0
# Maybe we are a NOT node, so manage this
if self.not_value:
return 0 if state else 2 # Keep the logic of return Down on NOT rules
return state | [
"def",
"get_host_node_state",
"(",
"self",
",",
"state",
",",
"problem_has_been_acknowledged",
",",
"in_scheduled_downtime",
")",
":",
"# Make DOWN look as CRITICAL (2 instead of 1)",
"if",
"state",
"==",
"1",
":",
"state",
"=",
"2",
"# If our node is acknowledged or in dow... | 34.35 | 23.9 |
def _get_regional_term(self, C, imt, vs30, rrup):
"""
Compute regional term for Japan. See page 1043
"""
f3 = interpolate.interp1d(
[150, 250, 350, 450, 600, 850, 1150, 2000],
[C['a36'], C['a37'], C['a38'], C['a39'], C['a40'], C['a41'],
C['a42'], C['a42']],
kind='linear')
return f3(vs30) + C['a29'] * rrup | [
"def",
"_get_regional_term",
"(",
"self",
",",
"C",
",",
"imt",
",",
"vs30",
",",
"rrup",
")",
":",
"f3",
"=",
"interpolate",
".",
"interp1d",
"(",
"[",
"150",
",",
"250",
",",
"350",
",",
"450",
",",
"600",
",",
"850",
",",
"1150",
",",
"2000",
... | 38.6 | 9.8 |
def get_token_issuer(token):
"""
Issuer of a token is the identifier used to recover the secret
Need to extract this from token to ensure we can proceed to the signature validation stage
Does not check validity of the token
:param token: signed JWT token
:return issuer: iss field of the JWT token
:raises TokenIssuerError: if iss field not present
:raises TokenDecodeError: if token does not conform to JWT spec
"""
try:
unverified = decode_token(token)
if 'iss' not in unverified:
raise TokenIssuerError
return unverified.get('iss')
except jwt.DecodeError:
raise TokenDecodeError | [
"def",
"get_token_issuer",
"(",
"token",
")",
":",
"try",
":",
"unverified",
"=",
"decode_token",
"(",
"token",
")",
"if",
"'iss'",
"not",
"in",
"unverified",
":",
"raise",
"TokenIssuerError",
"return",
"unverified",
".",
"get",
"(",
"'iss'",
")",
"except",
... | 34.368421 | 15.526316 |
def request_object(self, object_class, address, state, object_handler,
error_handler = None, timeout_handler = None,
backup_state = None, timeout = None,
freshness_period = None, expiration_period = None, purge_period = None):
"""Request an object of given class, with given address and state not
worse than `state`. The object will be taken from cache if available,
and created/fetched otherwise. The request is asynchronous -- this
metod doesn't return the object directly, but the `object_handler` is
called as soon as the object is available (this may be before
`request_object` returns and may happen in other thread). On error the
`error_handler` will be called, and on timeout -- the
`timeout_handler`.
:Parameters:
- `object_class`: class (type) of the object requested.
- `address`: address of the object requested.
- `state`: the worst acceptable object state. When 'new' then always
a new object will be created/fetched. 'stale' will select any
item available in cache.
- `object_handler`: function to be called when object is available.
It will be called with the following arguments: address, object
and its state.
- `error_handler`: function to be called on object retrieval error.
It will be called with two arguments: requested address and
additional error information (fetcher-specific, may be
StanzaError for XMPP objects). If not given, then the object
handler will be called with object set to `None` and state
"error".
- `timeout_handler`: function to be called on object retrieval
timeout. It will be called with only one argument: the requested
address. If not given, then the `error_handler` will be called
instead, with error details set to `None`.
- `backup_state`: when set and object in state `state` is not
available in the cache and object retrieval failed then object
with this state will also be looked-up in the cache and provided
if available.
- `timeout`: time interval after which retrieval of the object
should be given up.
- `freshness_period`: time interval after which the item created
should become 'old'.
- `expiration_period`: time interval after which the item created
should become 'stale'.
- `purge_period`: time interval after which the item created
shuld be removed from the cache.
:Types:
- `object_class`: `classobj`
- `address`: any hashable
- `state`: "new", "fresh", "old" or "stale"
- `object_handler`: callable(address, value, state)
- `error_handler`: callable(address, error_data)
- `timeout_handler`: callable(address)
- `backup_state`: "new", "fresh", "old" or "stale"
- `timeout`: `timedelta`
- `freshness_period`: `timedelta`
- `expiration_period`: `timedelta`
- `purge_period`: `timedelta`
"""
self._lock.acquire()
try:
if object_class not in self._caches:
raise TypeError("No cache for %r" % (object_class,))
self._caches[object_class].request_object(address, state, object_handler,
error_handler, timeout_handler, backup_state, timeout,
freshness_period, expiration_period, purge_period)
finally:
self._lock.release() | [
"def",
"request_object",
"(",
"self",
",",
"object_class",
",",
"address",
",",
"state",
",",
"object_handler",
",",
"error_handler",
"=",
"None",
",",
"timeout_handler",
"=",
"None",
",",
"backup_state",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"fresh... | 54.25 | 23.544118 |
def remove_permission(self, label, callback=None):
"""
Remove a permission from a queue.
:type label: str or unicode
:param label: The unique label associated with the permission being removed.
:rtype: bool
:return: True if successful, False otherwise.
"""
return self.connection.remove_permission(self, label, callback=callback) | [
"def",
"remove_permission",
"(",
"self",
",",
"label",
",",
"callback",
"=",
"None",
")",
":",
"return",
"self",
".",
"connection",
".",
"remove_permission",
"(",
"self",
",",
"label",
",",
"callback",
"=",
"callback",
")"
] | 35 | 19.363636 |
def _all_default(d, default, seen=None):
"""
ANY VALUE NOT SET WILL BE SET BY THE default
THIS IS RECURSIVE
"""
if default is None:
return
if _get(default, CLASS) is Data:
default = object.__getattribute__(default, SLOT) # REACH IN AND GET THE dict
# Log = _late_import()
# Log.error("strictly dict (or object) allowed: got {{type}}", type=_get(default, CLASS).__name__)
for k, default_value in default.items():
default_value = unwrap(default_value) # TWO DIFFERENT Dicts CAN SHARE id() BECAUSE THEY ARE SHORT LIVED
existing_value = _get_attr(d, [k])
if existing_value == None:
if default_value != None:
if _get(default_value, CLASS) in data_types:
df = seen.get(id(default_value))
if df is not None:
_set_attr(d, [k], df)
else:
copy_dict = {}
seen[id(default_value)] = copy_dict
_set_attr(d, [k], copy_dict)
_all_default(copy_dict, default_value, seen)
else:
# ASSUME PRIMITIVE (OR LIST, WHICH WE DO NOT COPY)
try:
_set_attr(d, [k], default_value)
except Exception as e:
if PATH_NOT_FOUND not in e:
get_logger().error("Can not set attribute {{name}}", name=k, cause=e)
elif is_list(existing_value) or is_list(default_value):
_set_attr(d, [k], None)
_set_attr(d, [k], listwrap(existing_value) + listwrap(default_value))
elif (hasattr(existing_value, "__setattr__") or _get(existing_value, CLASS) in data_types) and _get(default_value, CLASS) in data_types:
df = seen.get(id(default_value))
if df is not None:
_set_attr(d, [k], df)
else:
seen[id(default_value)] = existing_value
_all_default(existing_value, default_value, seen) | [
"def",
"_all_default",
"(",
"d",
",",
"default",
",",
"seen",
"=",
"None",
")",
":",
"if",
"default",
"is",
"None",
":",
"return",
"if",
"_get",
"(",
"default",
",",
"CLASS",
")",
"is",
"Data",
":",
"default",
"=",
"object",
".",
"__getattribute__",
... | 46.727273 | 20.045455 |
def set_color(self, color, alpha = 1):
"""set active color. You can use hex colors like "#aaa", or you can use
normalized RGB tripplets (where every value is in range 0..1), or
you can do the same thing in range 0..65535.
also consider skipping this operation and specify the color on stroke and
fill.
"""
color = self.colors.parse(color) # parse whatever we have there into a normalized triplet
if len(color) == 4 and alpha is None:
alpha = color[3]
r, g, b = color[:3]
self._add_instruction("set_color", r, g, b, alpha) | [
"def",
"set_color",
"(",
"self",
",",
"color",
",",
"alpha",
"=",
"1",
")",
":",
"color",
"=",
"self",
".",
"colors",
".",
"parse",
"(",
"color",
")",
"# parse whatever we have there into a normalized triplet",
"if",
"len",
"(",
"color",
")",
"==",
"4",
"a... | 50.166667 | 18.333333 |
def ignore(self, argument_dest, **kwargs):
""" Register an argument with type knack.arguments.ignore_type (hidden/ignored)
:param argument_dest: The destination argument to apply the ignore type to
:type argument_dest: str
"""
self._check_stale()
if not self._applicable():
return
dest_option = ['--__{}'.format(argument_dest.upper())]
self.argument(argument_dest, arg_type=ignore_type, options_list=dest_option, **kwargs) | [
"def",
"ignore",
"(",
"self",
",",
"argument_dest",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_stale",
"(",
")",
"if",
"not",
"self",
".",
"_applicable",
"(",
")",
":",
"return",
"dest_option",
"=",
"[",
"'--__{}'",
".",
"format",
"(",
"... | 40.75 | 20.75 |
def interstore(self, destination, *others):
""" The same as :meth:intersection, but stores the resulting set
@destination
@destination: #str keyname or :class:RedisSet
@others: one or several #str keynames or :class:RedisSet objects
-> #int number of members in resulting set
"""
others = self._typesafe_others(others)
destination = self._typesafe(destination)
return self._client.sinterstore(destination, self.key_prefix, *others) | [
"def",
"interstore",
"(",
"self",
",",
"destination",
",",
"*",
"others",
")",
":",
"others",
"=",
"self",
".",
"_typesafe_others",
"(",
"others",
")",
"destination",
"=",
"self",
".",
"_typesafe",
"(",
"destination",
")",
"return",
"self",
".",
"_client",... | 42.5 | 18.25 |
def read(variable):
"""
read an element from LiFePO4wered.
:param variable: the element to read.
:type variable: Lifepo4weredEnum
:return: the value of the element
:rtype: int
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as read parameter.')
if canRead(variable):
return lifepo4weredSO.read_lifepo4wered(variable.value)
else:
raise RuntimeError('You cannot read {0} value, just write it'.format(variable.name)) | [
"def",
"read",
"(",
"variable",
")",
":",
"if",
"variable",
"not",
"in",
"variablesEnum",
":",
"raise",
"ValueError",
"(",
"'Use a lifepo4wered enum element as read parameter.'",
")",
"if",
"canRead",
"(",
"variable",
")",
":",
"return",
"lifepo4weredSO",
".",
"re... | 34.470588 | 19.529412 |
def _break_poorly_matched_fronts(fronts, threshold=0.1, threshold_overlap_samples=3):
"""
For each onset front, for each frequency in that front, break the onset front if the signals
between this frequency's onset and the next frequency's onset are not similar enough.
Specifically:
If we have the following two frequency channels, and the two O's are part of the same onset front,
::
[ . O . . . . . . . . . . ]
[ . . . . O . . . . . . . ]
We compare the signals x and y:
::
[ . x x x x . . . . . . . ]
[ . y y y y . . . . . . . ]
And if they are not sufficiently similar (via a DSP correlation algorithm), we break the onset
front between these two channels.
Once this is done, remove any onset fronts that are less than 3 channels wide.
"""
assert threshold_overlap_samples > 0, "Number of samples of overlap must be greater than zero"
breaks_after = {}
for front_id in _get_front_ids_one_at_a_time(fronts):
front = _get_front_idxs_from_id(fronts, front_id)
for i, (f, s) in enumerate(front):
if i < len(front) - 1:
# Get the signal from f, s to f, s+1 and the signal from f+1, s to f+1, s+1
next_f, next_s = front[i + 1]
low_s = min(s, next_s)
high_s = max(s, next_s)
sig_this_f = fronts[f, low_s:high_s]
sig_next_f = fronts[next_f, low_s:high_s]
assert len(sig_next_f) == len(sig_this_f)
if len(sig_next_f) > threshold_overlap_samples:
# If these two signals are not sufficiently close in form, this front should be broken up
correlation = signal.correlate(sig_this_f, sig_next_f, mode='same')
assert len(correlation) > 0
correlation = correlation / max(correlation + 1E-9)
similarity = np.sum(correlation) / len(correlation)
# TODO: the above stuff probably needs to be figured out
if similarity < threshold:
if front_id in breaks_after:
breaks_after[front_id].append((f, s))
else:
breaks_after[front_id] = []
# Now update the fronts matrix by breaking up any fronts at the points we just identified
# and assign the newly created fronts new IDs
taken_ids = sorted(np.unique(fronts))
next_id = taken_ids[-1] + 1
for id in breaks_after.keys():
for f, s in breaks_after[id]:
fidxs, sidxs = np.where(fronts == id)
idxs_greater_than_f = [fidx for fidx in fidxs if fidx > f]
start = len(sidxs) - len(idxs_greater_than_f)
indexes = (idxs_greater_than_f, sidxs[start:])
fronts[indexes] = next_id
next_id += 1
_remove_fronts_that_are_too_small(fronts, 3) | [
"def",
"_break_poorly_matched_fronts",
"(",
"fronts",
",",
"threshold",
"=",
"0.1",
",",
"threshold_overlap_samples",
"=",
"3",
")",
":",
"assert",
"threshold_overlap_samples",
">",
"0",
",",
"\"Number of samples of overlap must be greater than zero\"",
"breaks_after",
"=",... | 43.848485 | 24.090909 |
def join (self, timeout=None):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
with self.all_tasks_done:
if timeout is None:
while self.unfinished_tasks:
self.all_tasks_done.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self.unfinished_tasks:
remaining = endtime - _time()
if remaining <= 0.0:
raise Timeout()
self.all_tasks_done.wait(remaining) | [
"def",
"join",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"with",
"self",
".",
"all_tasks_done",
":",
"if",
"timeout",
"is",
"None",
":",
"while",
"self",
".",
"unfinished_tasks",
":",
"self",
".",
"all_tasks_done",
".",
"wait",
"(",
")",
"els... | 44.136364 | 16.363636 |
def scaled(self, scale):
"""
Return a copy of the current scene, with meshes and scene
transforms scaled to the requested factor.
Parameters
-----------
scale : float
Factor to scale meshes and transforms
Returns
-----------
scaled : trimesh.Scene
A copy of the current scene but scaled
"""
scale = float(scale)
# matrix for 2D scaling
scale_2D = np.eye(3) * scale
# matrix for 3D scaling
scale_3D = np.eye(4) * scale
# preallocate transforms and geometries
nodes = self.graph.nodes_geometry
transforms = np.zeros((len(nodes), 4, 4))
geometries = [None] * len(nodes)
# collect list of transforms
for i, node in enumerate(nodes):
transforms[i], geometries[i] = self.graph[node]
# result is a copy
result = self.copy()
# remove all existing transforms
result.graph.clear()
for group in grouping.group(geometries):
# hashable reference to self.geometry
geometry = geometries[group[0]]
# original transform from world to geometry
original = transforms[group[0]]
# transform for geometry
new_geom = np.dot(scale_3D, original)
if result.geometry[geometry].vertices.shape[1] == 2:
# if our scene is 2D only scale in 2D
result.geometry[geometry].apply_transform(scale_2D)
else:
# otherwise apply the full transform
result.geometry[geometry].apply_transform(new_geom)
for node, T in zip(self.graph.nodes_geometry[group],
transforms[group]):
# generate the new transforms
transform = util.multi_dot(
[scale_3D, T, np.linalg.inv(new_geom)])
# apply scale to translation
transform[:3, 3] *= scale
# update scene with new transforms
result.graph.update(frame_to=node,
matrix=transform,
geometry=geometry)
return result | [
"def",
"scaled",
"(",
"self",
",",
"scale",
")",
":",
"scale",
"=",
"float",
"(",
"scale",
")",
"# matrix for 2D scaling",
"scale_2D",
"=",
"np",
".",
"eye",
"(",
"3",
")",
"*",
"scale",
"# matrix for 3D scaling",
"scale_3D",
"=",
"np",
".",
"eye",
"(",
... | 35.33871 | 14.983871 |
def wr_row_mergeall(self, worksheet, txtstr, fmt, row_idx):
"""Merge all columns and place text string in widened cell."""
hdridxval = len(self.hdrs) - 1
worksheet.merge_range(row_idx, 0, row_idx, hdridxval, txtstr, fmt)
return row_idx + 1 | [
"def",
"wr_row_mergeall",
"(",
"self",
",",
"worksheet",
",",
"txtstr",
",",
"fmt",
",",
"row_idx",
")",
":",
"hdridxval",
"=",
"len",
"(",
"self",
".",
"hdrs",
")",
"-",
"1",
"worksheet",
".",
"merge_range",
"(",
"row_idx",
",",
"0",
",",
"row_idx",
... | 53.4 | 13.8 |
def entry_line_to_text(self, entry):
"""
Return the textual representation of an :class:`~taxi.timesheet.lines.Entry` instance. This method is a bit
convoluted since we don't want to completely mess up the original formatting of the entry.
"""
line = []
# The entry is new, it didn't come from an existing line, so let's just return a simple text representation of
# it
if not entry._text:
flags_text = self.flags_to_text(entry.flags)
duration_text = self.duration_to_text(entry.duration)
return ''.join(
(flags_text, ' ' if flags_text else '', entry.alias, ' ', duration_text, ' ', entry.description)
)
for i, text in enumerate(entry._text):
# If this field is mapped to an attribute, check if it has changed
# and, if so, regenerate its text. The only fields that are not
# mapped to attributes are spacing fields
if i in self.ENTRY_ATTRS_POSITION:
if self.ENTRY_ATTRS_POSITION[i] in entry._changed_attrs:
attr_name = self.ENTRY_ATTRS_POSITION[i]
attr_value = getattr(entry, self.ENTRY_ATTRS_POSITION[i])
# Some attributes need to be transformed to their textual representation, such as flags or duration
if attr_name in self.ENTRY_ATTRS_TRANSFORMERS:
attr_value = getattr(self, self.ENTRY_ATTRS_TRANSFORMERS[attr_name])(attr_value)
else:
attr_value = text
line.append(attr_value)
else:
# If the length of the field has changed, do whatever we can to keep the current formatting (ie. number
# of whitespaces)
if len(line[i-1]) != len(entry._text[i-1]):
text = ' ' * max(1, (len(text) - (len(line[i-1]) - len(entry._text[i-1]))))
line.append(text)
return ''.join(line).strip() | [
"def",
"entry_line_to_text",
"(",
"self",
",",
"entry",
")",
":",
"line",
"=",
"[",
"]",
"# The entry is new, it didn't come from an existing line, so let's just return a simple text representation of",
"# it",
"if",
"not",
"entry",
".",
"_text",
":",
"flags_text",
"=",
"... | 47.690476 | 30.595238 |
def fix_pix_borders(image2d, nreplace, sought_value, replacement_value):
"""Replace a few pixels at the borders of each spectrum.
Set to 'replacement_value' 'nreplace' pixels at the beginning (at
the end) of each spectrum just after (before) the spectrum value
changes from (to) 'sought_value', as seen from the image borders.
Parameters
----------
image2d : numpy array
Initial 2D image.
nreplace : int
Number of pixels to be replaced in each border.
sought_value : int, float, bool
Pixel value that indicates missing data in the spectrum.
replacement_value : int, float, bool
Pixel value to be employed in the 'nreplace' pixels.
Returns
-------
image2d : numpy array
Final 2D image.
"""
# input image size
naxis2, naxis1 = image2d.shape
for i in range(naxis2):
# only spectra with values different from 'sought_value'
jborder_min, jborder_max = find_pix_borders(
image2d[i, :],
sought_value=sought_value
)
# left border
if jborder_min != -1:
j1 = jborder_min
j2 = min(j1 + nreplace, naxis1)
image2d[i, j1:j2] = replacement_value
# right border
if jborder_max != naxis1:
j2 = jborder_max + 1
j1 = max(j2 - nreplace, 0)
image2d[i, j1:j2] = replacement_value
return image2d | [
"def",
"fix_pix_borders",
"(",
"image2d",
",",
"nreplace",
",",
"sought_value",
",",
"replacement_value",
")",
":",
"# input image size",
"naxis2",
",",
"naxis1",
"=",
"image2d",
".",
"shape",
"for",
"i",
"in",
"range",
"(",
"naxis2",
")",
":",
"# only spectra... | 29.744681 | 19.93617 |
def _roll_random(n):
"""returns a random # from 0 to N-1"""
bits = util.bit_length(n - 1)
byte_count = (bits + 7) // 8
hbyte_mask = pow(2, bits % 8) - 1
# so here's the plan:
# we fetch as many random bits as we'd need to fit N-1, and if the
# generated number is >= N, we try again. in the worst case (N-1 is a
# power of 2), we have slightly better than 50% odds of getting one that
# fits, so i can't guarantee that this loop will ever finish, but the odds
# of it looping forever should be infinitesimal.
while True:
x = os.urandom(byte_count)
if hbyte_mask > 0:
x = byte_mask(x[0], hbyte_mask) + x[1:]
num = util.inflate_long(x, 1)
if num < n:
break
return num | [
"def",
"_roll_random",
"(",
"n",
")",
":",
"bits",
"=",
"util",
".",
"bit_length",
"(",
"n",
"-",
"1",
")",
"byte_count",
"=",
"(",
"bits",
"+",
"7",
")",
"//",
"8",
"hbyte_mask",
"=",
"pow",
"(",
"2",
",",
"bits",
"%",
"8",
")",
"-",
"1",
"#... | 37.6 | 18.6 |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a Firefox cache file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
filename = parser_mediator.GetFilename()
if (not self._CACHE_FILENAME_RE.match(filename) and
not filename.startswith('_CACHE_00')):
raise errors.UnableToParseFile('Not a Firefox cache1 file.')
display_name = parser_mediator.GetDisplayName()
firefox_config = self._GetFirefoxConfig(file_object, display_name)
file_object.seek(firefox_config.first_record_offset)
while file_object.get_offset() < file_object.get_size():
try:
self._ParseCacheEntry(
parser_mediator, file_object, display_name,
firefox_config.block_size)
except IOError:
file_offset = file_object.get_offset() - self._MINIMUM_BLOCK_SIZE
logger.debug((
'[{0:s}] Invalid cache record in file: {1:s} at offset: '
'{2:d}.').format(self.NAME, display_name, file_offset)) | [
"def",
"ParseFileObject",
"(",
"self",
",",
"parser_mediator",
",",
"file_object",
")",
":",
"filename",
"=",
"parser_mediator",
".",
"GetFilename",
"(",
")",
"if",
"(",
"not",
"self",
".",
"_CACHE_FILENAME_RE",
".",
"match",
"(",
"filename",
")",
"and",
"no... | 36.575758 | 22.757576 |
def get_matching_symbols_pairs(self, cursor, opening_symbol, closing_symbol, backward=False):
"""
Returns the cursor for matching given symbols pairs.
:param cursor: Cursor to match from.
:type cursor: QTextCursor
:param opening_symbol: Opening symbol.
:type opening_symbol: unicode
:param closing_symbol: Closing symbol to match.
:type closing_symbol: unicode
:return: Matching cursor.
:rtype: QTextCursor
"""
if cursor.hasSelection():
start_position = cursor.selectionEnd() if backward else cursor.selectionStart()
else:
start_position = cursor.position()
flags = QTextDocument.FindFlags()
if backward:
flags = flags | QTextDocument.FindBackward
start_cursor = previous_start_cursor = cursor.document().find(opening_symbol, start_position, flags)
end_cursor = previous_end_cursor = cursor.document().find(closing_symbol, start_position, flags)
if backward:
while start_cursor > end_cursor:
start_cursor = cursor.document().find(opening_symbol, start_cursor.selectionStart(), flags)
if start_cursor > end_cursor:
end_cursor = cursor.document().find(closing_symbol, end_cursor.selectionStart(), flags)
else:
while start_cursor < end_cursor:
start_cursor = cursor.document().find(opening_symbol, start_cursor.selectionEnd(), flags)
if start_cursor < end_cursor:
end_cursor = cursor.document().find(closing_symbol, end_cursor.selectionEnd(), flags)
return end_cursor if end_cursor.position() != -1 else previous_end_cursor | [
"def",
"get_matching_symbols_pairs",
"(",
"self",
",",
"cursor",
",",
"opening_symbol",
",",
"closing_symbol",
",",
"backward",
"=",
"False",
")",
":",
"if",
"cursor",
".",
"hasSelection",
"(",
")",
":",
"start_position",
"=",
"cursor",
".",
"selectionEnd",
"(... | 46.297297 | 25.918919 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.