code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def import_fasst(self, checked=False, test_fasst=None, test_annot=None):
"""Action: import from FASST .mat file"""
if self.parent.info.filename is not None:
fasst_file = splitext(self.parent.info.filename)[0] + '.mat'
annot_file = splitext(self.parent.info.filename)[0] + '_scores.xml'
else:
fasst_file = annot_file = ''
if test_fasst is None:
fasst_file, _ = QFileDialog.getOpenFileName(self, 'Load FASST score file',
fasst_file,
'FASST score file (*.mat)')
else:
fasst_file = test_fasst
if fasst_file == '':
return
if test_annot is None:
annot_file, _ = QFileDialog.getSaveFileName(self, 'Create annotation file',
annot_file,
'Annotation File (*.xml)')
else:
annot_file = test_annot
if annot_file == '':
return
try:
create_annotation(annot_file, from_fasst=fasst_file)
except BaseException as err:
self.parent.statusBar().showMessage(str(err))
lg.info(str(err))
return
try:
self.update_notes(annot_file, False)
except FileNotFoundError:
msg = 'Annotation file not found'
self.parent.statusBar().showMessage(msg)
lg.info(msg)
|
Action: import from FASST .mat file
|
def assert_title(self, title, **kwargs):
"""
Asserts that the page has the given title.
Args:
title (str | RegexObject): The string or regex that the title should match.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
"""
query = TitleQuery(title, **kwargs)
@self.synchronize(wait=query.wait)
def assert_title():
if not query.resolves_for(self):
raise ExpectationNotMet(query.failure_message)
return True
return assert_title()
|
Asserts that the page has the given title.
Args:
title (str | RegexObject): The string or regex that the title should match.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
|
def create_module_page(mod, dest_path, force=False):
"Create the documentation notebook for module `mod_name` in path `dest_path`"
nb = get_empty_notebook()
mod_name = mod.__name__
strip_name = strip_fastai(mod_name)
init_cell = [get_md_cell(f'## Title for {strip_name} (use plain english, not module name!)'), get_md_cell('Type an introduction of the package here.')]
cells = [get_code_cell(f'from fastai.gen_doc.nbdoc import *\nfrom {mod_name} import * ', True)]
gvar_map = get_global_vars(mod)
if gvar_map: cells.append(get_md_cell('### Global Variable Definitions:'))
for name in get_exports(mod):
if name in gvar_map: cells.append(get_md_cell(gvar_map[name]))
for ft_name in get_ft_names(mod, include_inner=True):
if not hasattr(mod, ft_name):
warnings.warn(f"Module {strip_name} doesn't have a function named {ft_name}.")
continue
cells += _symbol_skeleton(ft_name)
elt = getattr(mod, ft_name)
nb['cells'] = init_cell + cells + [get_md_cell(UNDOC_HEADER)]
doc_path = get_doc_path(mod, dest_path)
write_nb(nb, doc_path, 'w' if force else 'x')
execute_nb(doc_path)
return doc_path
|
Create the documentation notebook for module `mod_name` in path `dest_path`
|
def save(self):
"""Save this entry.
If the entry does not have an :attr:`id`, a new id will be assigned,
and the :attr:`id` attribute set accordingly.
Pre-save processing of the fields saved can be done by
overriding the :meth:`prepare_save` method.
Additional actions to be done after the save operation
has been completed can be added by defining the
:meth:`post_save` method.
"""
id = self.id or self.objects.id(self.name)
self.objects[id] = self.prepare_save(dict(self))
self.id = id
self.post_save()
return id
|
Save this entry.
If the entry does not have an :attr:`id`, a new id will be assigned,
and the :attr:`id` attribute set accordingly.
Pre-save processing of the fields saved can be done by
overriding the :meth:`prepare_save` method.
Additional actions to be done after the save operation
has been completed can be added by defining the
:meth:`post_save` method.
|
def hgnc_genes(self, hgnc_symbol, build='37', search=False):
"""Fetch all hgnc genes that match a hgnc symbol
Check both hgnc_symbol and aliases
Args:
hgnc_symbol(str)
build(str): The build in which to search
search(bool): if partial searching should be used
Returns:
result()
"""
LOG.debug("Fetching genes with symbol %s" % hgnc_symbol)
if search:
# first search for a full match
full_query = self.hgnc_collection.find({
'$or': [
{'aliases': hgnc_symbol},
{'hgnc_id': int(hgnc_symbol) if hgnc_symbol.isdigit() else None},
],
'build': build
})
if full_query.count() != 0:
return full_query
return self.hgnc_collection.find({
'aliases': {'$regex': hgnc_symbol, '$options': 'i'},
'build': build
})
return self.hgnc_collection.find({'build': build, 'aliases': hgnc_symbol})
|
Fetch all hgnc genes that match a hgnc symbol
Check both hgnc_symbol and aliases
Args:
hgnc_symbol(str)
build(str): The build in which to search
search(bool): if partial searching should be used
Returns:
result()
|
def stop(self, container, instances=None, map_name=None, **kwargs):
"""
Stops instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will stop all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param raise_on_error: Errors on stop and removal may result from Docker volume problems, that do not further
affect further actions. Such errors are always logged, but do not raise an exception unless this is set to
``True``. Please note that 404 errors (on non-existing containers) are always ignored on stop and removal.
:type raise_on_error: bool
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container stop.
:return: Return values of stopped containers.
:rtype: list[dockermap.map.runner.ActionOutput]
"""
return self.run_actions('stop', container, instances=instances, map_name=map_name, **kwargs)
|
Stops instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will stop all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param raise_on_error: Errors on stop and removal may result from Docker volume problems, that do not further
affect further actions. Such errors are always logged, but do not raise an exception unless this is set to
``True``. Please note that 404 errors (on non-existing containers) are always ignored on stop and removal.
:type raise_on_error: bool
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container stop.
:return: Return values of stopped containers.
:rtype: list[dockermap.map.runner.ActionOutput]
|
def standardizeMapName(mapName):
"""pretty-fy the name for pysc2 map lookup"""
#print("foreignName: %s (%s)"%(mapName, mapName in c.mapNameTranslations))
#if mapName in c.mapNameTranslations:
# return c.mapNameTranslations[mapName]
newName = os.path.basename(mapName)
newName = newName.split(".")[0]
newName = newName.split("(")[0]
newName = re.sub("[LT]E+$", "", newName)
newName = re.sub("-", "", newName)
newName = re.sub(' ', '', newName, flags=re.UNICODE)
foreignName = newName#bytes(mapName, 'utf-16')
#print("foreignName: %s (%s)"%(foreignName, foreignName in c.mapNameTranslations))
if foreignName in c.mapNameTranslations:
return c.mapNameTranslations[foreignName]
return newName
|
pretty-fy the name for pysc2 map lookup
|
def _convert_from_thrift_binary_annotations(self, thrift_binary_annotations):
"""Accepts a thrift decoded binary annotation and converts it
to a v1 binary annotation.
"""
tags = {}
local_endpoint = None
remote_endpoint = None
for binary_annotation in thrift_binary_annotations:
if binary_annotation.key == 'sa':
remote_endpoint = self._convert_from_thrift_endpoint(
thrift_endpoint=binary_annotation.host,
)
else:
key = binary_annotation.key
annotation_type = binary_annotation.annotation_type
value = binary_annotation.value
if annotation_type == zipkin_core.AnnotationType.BOOL:
tags[key] = "true" if value == 1 else "false"
elif annotation_type == zipkin_core.AnnotationType.STRING:
tags[key] = str(value)
else:
log.warning('Only STRING and BOOL binary annotations are '
'supported right now and can be properly decoded.')
if binary_annotation.host:
local_endpoint = self._convert_from_thrift_endpoint(
thrift_endpoint=binary_annotation.host,
)
return tags, local_endpoint, remote_endpoint
|
Accepts a thrift decoded binary annotation and converts it
to a v1 binary annotation.
|
def network_interfaces_list_all(**kwargs):
'''
.. versionadded:: 2019.2.0
List all network interfaces within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interfaces_list_all
'''
result = {}
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
nics = __utils__['azurearm.paged_object_to_list'](netconn.network_interfaces.list_all())
for nic in nics:
result[nic['name']] = nic
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result
|
.. versionadded:: 2019.2.0
List all network interfaces within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interfaces_list_all
|
def dispatch_request(self, *args, **kwargs):
"""Dispatch the request.
Its the actual ``view`` flask will use.
"""
if request.method in ('POST', 'PUT'):
return_url, context = self.post(*args, **kwargs)
if return_url is not None:
return redirect(return_url)
elif request.method in ('GET', 'HEAD'):
context = self.get(*args, **kwargs)
return self.render_response(self.context(context))
|
Dispatch the request.
Its the actual ``view`` flask will use.
|
def write_into(self, block, level=0):
"""Append this block to another one, passing all dependencies"""
for line, l in self._lines:
block.write_line(line, level + l)
for name, obj in _compat.iteritems(self._deps):
block.add_dependency(name, obj)
|
Append this block to another one, passing all dependencies
|
def create(self, template=None, flags=0, args=()):
"""
Create a new rootfs for the container.
"template" if passed must be a valid template name.
"flags" (optional) is an integer representing the optional
create flags to be passed.
"args" (optional) is a tuple of arguments to pass to the
template. It can also be provided as a dict.
"""
if isinstance(args, dict):
template_args = []
for item in args.items():
template_args.append("--%s" % item[0])
template_args.append("%s" % item[1])
else:
template_args = args
if template:
return _lxc.Container.create(self, template=template,
flags=flags,
args=tuple(template_args))
else:
return _lxc.Container.create(self, flags=flags,
args=tuple(template_args))
|
Create a new rootfs for the container.
"template" if passed must be a valid template name.
"flags" (optional) is an integer representing the optional
create flags to be passed.
"args" (optional) is a tuple of arguments to pass to the
template. It can also be provided as a dict.
|
def get_hla_truthset(data):
"""Retrieve expected truth calls for annotating HLA called output.
"""
val_csv = tz.get_in(["config", "algorithm", "hlavalidate"], data)
out = {}
if val_csv and utils.file_exists(val_csv):
with open(val_csv) as in_handle:
reader = csv.reader(in_handle)
next(reader) # header
for sample, locus, alleles in (l for l in reader if l):
out = tz.update_in(out, [sample, locus], lambda x: [x.strip() for x in alleles.split(";")])
return out
|
Retrieve expected truth calls for annotating HLA called output.
|
def lookup_tf(self, h):
'''Get stream IDs and term frequencies for a single hash.
This yields pairs of strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`
and the corresponding term frequency.
..see:: :meth:`lookup`
'''
for ((_, k1, k2), v) in self.client.scan(HASH_TF_INDEX_TABLE,
((h,), (h,))):
yield (kvlayer_key_to_stream_id((k1, k2)), v)
|
Get stream IDs and term frequencies for a single hash.
This yields pairs of strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`
and the corresponding term frequency.
..see:: :meth:`lookup`
|
def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
"""Find an existing profile dir by profile name, return its ProfileDir.
This searches through a sequence of paths for a profile dir. If it
is not found, a :class:`ProfileDirError` exception will be raised.
The search path algorithm is:
1. ``os.getcwdu()``
2. ``ipython_dir``
Parameters
----------
ipython_dir : unicode or str
The IPython directory to use.
name : unicode or str
The name of the profile. The name of the profile directory
will be "profile_<profile>".
"""
dirname = u'profile_' + name
paths = [os.getcwdu(), ipython_dir]
for p in paths:
profile_dir = os.path.join(p, dirname)
if os.path.isdir(profile_dir):
return cls(location=profile_dir, config=config)
else:
raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
|
Find an existing profile dir by profile name, return its ProfileDir.
This searches through a sequence of paths for a profile dir. If it
is not found, a :class:`ProfileDirError` exception will be raised.
The search path algorithm is:
1. ``os.getcwdu()``
2. ``ipython_dir``
Parameters
----------
ipython_dir : unicode or str
The IPython directory to use.
name : unicode or str
The name of the profile. The name of the profile directory
will be "profile_<profile>".
|
def is_same_address(addr1, addr2):
"""
Where the two addresses are in the host:port
Returns true if ports are equals and hosts are the same using is_same_host
"""
hostport1 = addr1.split(":")
hostport2 = addr2.split(":")
return (is_same_host(hostport1[0], hostport2[0]) and
hostport1[1] == hostport2[1])
|
Where the two addresses are in the host:port
Returns true if ports are equals and hosts are the same using is_same_host
|
def search_regexp(self):
"""
Define the regexp used for the search
"""
if ((self.season == "") and (self.episode == "")):
regexp = '^%s.*' % self.title.lower()
elif (self.episode == ""):
regexp = '^%s.*(s[0]*%s|season[\s\_\-\.]*%s).*' % (self.title.lower(), self.season, self.season)
else:
regexp = '^%s.*((s[0]*%s.*e[0]*%s)|[0]*%sx[0]*%s).*' % (self.title.lower(), self.season, self.episode, self.season, self.episode)
return regexp
|
Define the regexp used for the search
|
def clear_attributes(self):
"""
Remove the record_dict attribute from the object, as SeqRecords are not JSON-serializable. Also remove
the contig_lengths and longest_contig attributes, as they are large lists that make the .json file ugly
"""
for sample in self.metadata:
try:
delattr(sample[self.analysistype], 'record_dict')
delattr(sample[self.analysistype], 'contig_lengths')
delattr(sample[self.analysistype], 'longest_contig')
except AttributeError:
pass
|
Remove the record_dict attribute from the object, as SeqRecords are not JSON-serializable. Also remove
the contig_lengths and longest_contig attributes, as they are large lists that make the .json file ugly
|
def setup_sensors(self):
"""Setup some server sensors."""
self._add_result = Sensor.float("add.result",
"Last ?add result.", "", [-10000, 10000])
self._add_result.set_value(0, Sensor.UNREACHABLE)
self._time_result = Sensor.timestamp("time.result",
"Last ?time result.", "")
self._time_result.set_value(0, Sensor.INACTIVE)
self._eval_result = Sensor.string("eval.result",
"Last ?eval result.", "")
self._eval_result.set_value('', Sensor.UNKNOWN)
self._fruit_result = Sensor.discrete("fruit.result",
"Last ?pick-fruit result.", "", self.FRUIT)
self._fruit_result.set_value('apple', Sensor.ERROR)
self.add_sensor(self._add_result)
self.add_sensor(self._time_result)
self.add_sensor(self._eval_result)
self.add_sensor(self._fruit_result)
|
Setup some server sensors.
|
def filter_entries(self, request_type=None, content_type=None,
status_code=None, http_version=None, regex=True):
"""
Returns a ``list`` of entry objects based on the filter criteria.
:param request_type: ``str`` of request type (i.e. - GET or POST)
:param content_type: ``str`` of regex to use for finding content type
:param status_code: ``int`` of the desired status code
:param http_version: ``str`` of HTTP version of request
:param regex: ``bool`` indicating whether to use regex or exact match.
"""
results = []
for entry in self.entries:
"""
So yea... this is a bit ugly. We are looking for:
* The request type using self._match_request_type()
* The content type using self._match_headers()
* The HTTP response status code using self._match_status_code()
* The HTTP version using self._match_headers()
Oh lords of python.... please forgive my soul
"""
valid_entry = True
p = self.parser
if request_type is not None and not p.match_request_type(
entry, request_type, regex=regex):
valid_entry = False
if content_type is not None:
if not self.parser.match_content_type(entry, content_type,
regex=regex):
valid_entry = False
if status_code is not None and not p.match_status_code(
entry, status_code, regex=regex):
valid_entry = False
if http_version is not None and not p.match_http_version(
entry, http_version, regex=regex):
valid_entry = False
if valid_entry:
results.append(entry)
return results
|
Returns a ``list`` of entry objects based on the filter criteria.
:param request_type: ``str`` of request type (i.e. - GET or POST)
:param content_type: ``str`` of regex to use for finding content type
:param status_code: ``int`` of the desired status code
:param http_version: ``str`` of HTTP version of request
:param regex: ``bool`` indicating whether to use regex or exact match.
|
def do(self, changes, task_handle=taskhandle.NullTaskHandle()):
"""Apply the changes in a `ChangeSet`
Most of the time you call this function for committing the
changes for a refactoring.
"""
self.history.do(changes, task_handle=task_handle)
|
Apply the changes in a `ChangeSet`
Most of the time you call this function for committing the
changes for a refactoring.
|
def spher2cart(rho, theta, phi):
"""Spherical to Cartesian coordinate conversion."""
st = np.sin(theta)
sp = np.sin(phi)
ct = np.cos(theta)
cp = np.cos(phi)
rhost = rho * st
x = rhost * cp
y = rhost * sp
z = rho * ct
return np.array([x, y, z])
|
Spherical to Cartesian coordinate conversion.
|
def _set_intf_type(self, v, load=False):
"""
Setter method for intf_type, mapped from YANG variable /logical_interface_state/main_interface_physical/intf_type (intf-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_intf_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_intf_type() directly.
YANG Description: interface type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'ethernet': {'value': 1}, u'port-channel': {'value': 2}, u'ccep': {'value': 3}},), is_leaf=True, yang_name="intf-type", rest_name="intf-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='intf-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """intf_type must be of a type compatible with intf-type""",
'defined-type': "brocade-nsm-operational:intf-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'ethernet': {'value': 1}, u'port-channel': {'value': 2}, u'ccep': {'value': 3}},), is_leaf=True, yang_name="intf-type", rest_name="intf-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='intf-type', is_config=False)""",
})
self.__intf_type = t
if hasattr(self, '_set'):
self._set()
|
Setter method for intf_type, mapped from YANG variable /logical_interface_state/main_interface_physical/intf_type (intf-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_intf_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_intf_type() directly.
YANG Description: interface type
|
def collection(name=None):
"""Render the collection page.
It renders it either with a collection specific template (aka
collection_{collection_name}.html) or with the default collection
template (collection.html).
"""
if name is None:
collection = Collection.query.get_or_404(1)
else:
collection = Collection.query.filter(
Collection.name == name).first_or_404()
# TODO add breadcrumbs
# breadcrumbs = current_breadcrumbs + collection.breadcrumbs(ln=g.ln)[1:]
return render_template([
'invenio_collections/collection_{0}.html'.format(collection.id),
'invenio_collections/collection_{0}.html'.format(slugify(name, '_')),
current_app.config['COLLECTIONS_DEFAULT_TEMPLATE']
], collection=collection)
|
Render the collection page.
It renders it either with a collection specific template (aka
collection_{collection_name}.html) or with the default collection
template (collection.html).
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'role') and self.role is not None:
_dict['role'] = self.role
return _dict
|
Return a json dictionary representing this model.
|
def selfSignCert(self, cert, pkey):
'''
Self-sign a certificate.
Args:
cert (OpenSSL.crypto.X509): The certificate to sign.
pkey (OpenSSL.crypto.PKey): The PKey with which to sign the certificate.
Examples:
Sign a given certificate with a given private key:
cdir.selfSignCert(mycert, myotherprivatekey)
Returns:
None
'''
cert.set_issuer(cert.get_subject())
cert.sign(pkey, self.signing_digest)
|
Self-sign a certificate.
Args:
cert (OpenSSL.crypto.X509): The certificate to sign.
pkey (OpenSSL.crypto.PKey): The PKey with which to sign the certificate.
Examples:
Sign a given certificate with a given private key:
cdir.selfSignCert(mycert, myotherprivatekey)
Returns:
None
|
def get_repository(self, repository_id=None):
"""Gets the ``Repository`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Repository`` may have a
different ``Id`` than requested, such as the case where a
duplicate ``Id`` was assigned to a ``Repository`` and retained
for compatibility.
arg: repository_id (osid.id.Id): ``Id`` of the ``Repository``
return: (osid.repository.Repository) - the repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from awsosid template for -
# osid.resource.BinLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_repository(repository_id)
|
Gets the ``Repository`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Repository`` may have a
different ``Id`` than requested, such as the case where a
duplicate ``Id`` was assigned to a ``Repository`` and retained
for compatibility.
arg: repository_id (osid.id.Id): ``Id`` of the ``Repository``
return: (osid.repository.Repository) - the repository
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
|
def to_special_value(self, value):
"""
Return proper spdx term or Literal
"""
if isinstance(value, utils.NoAssert):
return self.spdx_namespace.noassertion
elif isinstance(value, utils.SPDXNone):
return self.spdx_namespace.none
else:
return Literal(value)
|
Return proper spdx term or Literal
|
def save_project(self, project, filename=''):
r"""
Saves given Project to a 'pnm' file
This will include all of associated objects, including algorithms.
Parameters
----------
project : OpenPNM Project
The project to save.
filename : string, optional
If no filename is given, the given project name is used. See Notes
for more information.
See Also
--------
save_workspace
Notes
-----
The filename can be a string such as 'saved_file.pnm'. The string can
include absolute path such as 'C:\networks\saved_file.pnm', or can
be a relative path such as '..\..\saved_file.pnm', which will look
2 directories above the current working directory. Can also be a
path object object such as that produced by ``pathlib`` or
``os.path`` in the Python standard library.
"""
if filename == '':
filename = project.name
filename = self._parse_filename(filename=filename, ext='pnm')
# Save dictionary as pickle
d = {project.name: project}
with open(filename, 'wb') as f:
pickle.dump(d, f)
|
r"""
Saves given Project to a 'pnm' file
This will include all of associated objects, including algorithms.
Parameters
----------
project : OpenPNM Project
The project to save.
filename : string, optional
If no filename is given, the given project name is used. See Notes
for more information.
See Also
--------
save_workspace
Notes
-----
The filename can be a string such as 'saved_file.pnm'. The string can
include absolute path such as 'C:\networks\saved_file.pnm', or can
be a relative path such as '..\..\saved_file.pnm', which will look
2 directories above the current working directory. Can also be a
path object object such as that produced by ``pathlib`` or
``os.path`` in the Python standard library.
|
def calculate_limits(array_dict, method='global', percentiles=None, limit=()):
"""
Calculate limits for a group of arrays in a flexible manner.
Returns a dictionary of calculated (vmin, vmax), with the same keys as
`array_dict`.
Useful for plotting heatmaps of multiple datasets, and the vmin/vmax values
of the colormaps need to be matched across all (or a subset) of heatmaps.
Parameters
----------
array_dict : dict of np.arrays
method : {'global', 'independent', callable}
If method="global", then use the global min/max values across all
arrays in array_dict. If method="independent", then each array will
have its own min/max calcuated. If a callable, then it will be used to
group the keys of `array_dict`, and each group will have its own
group-wise min/max calculated.
percentiles : None or list
If not None, a list of (lower, upper) percentiles in the range [0,100].
"""
if percentiles is not None:
for percentile in percentiles:
if not 0 <= percentile <= 100:
raise ValueError("percentile (%s) not between [0, 100]")
if method == 'global':
all_arrays = np.concatenate(
[i.ravel() for i in array_dict.itervalues()]
)
if percentiles:
vmin = mlab.prctile(
all_arrays, percentiles[0])
vmax = mlab.prctile(
all_arrays, percentiles[1])
else:
vmin = all_arrays.min()
vmax = all_arrays.max()
d = dict([(i, (vmin, vmax)) for i in array_dict.keys()])
elif method == 'independent':
d = {}
for k, v in array_dict.iteritems():
d[k] = (v.min(), v.max())
elif hasattr(method, '__call__'):
d = {}
sorted_keys = sorted(array_dict.keys(), key=method)
for group, keys in itertools.groupby(sorted_keys, method):
keys = list(keys)
all_arrays = np.concatenate([array_dict[i] for i in keys])
if percentiles:
vmin = mlab.prctile(
all_arrays, percentiles[0])
vmax = mlab.prctile(
all_arrays, percentiles[1])
else:
vmin = all_arrays.min()
vmax = all_arrays.max()
for key in keys:
d[key] = (vmin, vmax)
return d
|
Calculate limits for a group of arrays in a flexible manner.
Returns a dictionary of calculated (vmin, vmax), with the same keys as
`array_dict`.
Useful for plotting heatmaps of multiple datasets, and the vmin/vmax values
of the colormaps need to be matched across all (or a subset) of heatmaps.
Parameters
----------
array_dict : dict of np.arrays
method : {'global', 'independent', callable}
If method="global", then use the global min/max values across all
arrays in array_dict. If method="independent", then each array will
have its own min/max calcuated. If a callable, then it will be used to
group the keys of `array_dict`, and each group will have its own
group-wise min/max calculated.
percentiles : None or list
If not None, a list of (lower, upper) percentiles in the range [0,100].
|
def create(cls, tx_signers, recipients, metadata=None, asset=None):
"""A simple way to generate a `CREATE` transaction.
Note:
This method currently supports the following Cryptoconditions
use cases:
- Ed25519
- ThresholdSha256
Additionally, it provides support for the following BigchainDB
use cases:
- Multiple inputs and outputs.
Args:
tx_signers (:obj:`list` of :obj:`str`): A list of keys that
represent the signers of the CREATE Transaction.
recipients (:obj:`list` of :obj:`tuple`): A list of
([keys],amount) that represent the recipients of this
Transaction.
metadata (dict): The metadata to be stored along with the
Transaction.
asset (dict): The metadata associated with the asset that will
be created in this Transaction.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
"""
(inputs, outputs) = cls.validate_create(tx_signers, recipients, asset, metadata)
return cls(cls.CREATE, {'data': asset}, inputs, outputs, metadata)
|
A simple way to generate a `CREATE` transaction.
Note:
This method currently supports the following Cryptoconditions
use cases:
- Ed25519
- ThresholdSha256
Additionally, it provides support for the following BigchainDB
use cases:
- Multiple inputs and outputs.
Args:
tx_signers (:obj:`list` of :obj:`str`): A list of keys that
represent the signers of the CREATE Transaction.
recipients (:obj:`list` of :obj:`tuple`): A list of
([keys],amount) that represent the recipients of this
Transaction.
metadata (dict): The metadata to be stored along with the
Transaction.
asset (dict): The metadata associated with the asset that will
be created in this Transaction.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
|
def read_igpar(self):
"""
Renders accessible:
er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys)
er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys)
er_ev_tot = spin up + spin down summed
er_bp_tot = spin up + spin down summed
p_elc = spin up + spin down summed
p_ion = spin up + spin down summed
(See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on
what these are).
"""
# variables to be filled
self.er_ev = {} # will be dict (Spin.up/down) of array(3*float)
self.er_bp = {} # will be dics (Spin.up/down) of array(3*float)
self.er_ev_tot = None # will be array(3*float)
self.er_bp_tot = None # will be array(3*float)
self.p_elec = None
self.p_ion = None
try:
search = []
# Nonspin cases
def er_ev(results, match):
results.er_ev[Spin.up] = np.array(map(float,
match.groups()[1:4])) / 2
results.er_ev[Spin.down] = results.er_ev[Spin.up]
results.context = 2
search.append([r"^ *e<r>_ev=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None, er_ev])
def er_bp(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)]) / 2
results.er_bp[Spin.down] = results.er_bp[Spin.up]
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == 2, er_bp])
# Spin cases
def er_ev_up(results, match):
results.er_ev[Spin.up] = np.array([float(match.group(i))
for i in range(1, 4)])
results.context = Spin.up
search.append([r"^.*Spin component 1 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_up])
def er_bp_up(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results,
line: results.context == Spin.up, er_bp_up])
def er_ev_dn(results, match):
results.er_ev[Spin.down] = np.array([float(match.group(1)),
float(match.group(2)),
float(match.group(3))])
results.context = Spin.down
search.append([r"^.*Spin component 2 *e<r>_ev=\( *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None, er_ev_dn])
def er_bp_dn(results, match):
results.er_bp[Spin.down] = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
lambda results,
line: results.context == Spin.down, er_bp_dn])
# Always present spin/non-spin
def p_elc(results, match):
results.p_elc = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_elc])
def p_ion(results, match):
results.p_ion = np.array([float(match.group(i))
for i in range(1, 4)])
search.append([r"^.*ionic dipole moment: "
r"*p\[ion\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)", None, p_ion])
self.context = None
self.er_ev = {Spin.up: None, Spin.down: None}
self.er_bp = {Spin.up: None, Spin.down: None}
micro_pyawk(self.filename, search, self)
if self.er_ev[Spin.up] is not None and \
self.er_ev[Spin.down] is not None:
self.er_ev_tot = self.er_ev[Spin.up] + self.er_ev[Spin.down]
if self.er_bp[Spin.up] is not None and \
self.er_bp[Spin.down] is not None:
self.er_bp_tot = self.er_bp[Spin.up] + self.er_bp[Spin.down]
except:
self.er_ev_tot = None
self.er_bp_tot = None
raise Exception("IGPAR OUTCAR could not be parsed.")
|
Renders accessible:
er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys)
er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys)
er_ev_tot = spin up + spin down summed
er_bp_tot = spin up + spin down summed
p_elc = spin up + spin down summed
p_ion = spin up + spin down summed
(See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on
what these are).
|
def num_pending(self, work_spec_name):
'''Get the number of pending work units for some work spec.
These are work units that some worker is currently working on
(hopefully; it could include work units assigned to workers that
died and that have not yet expired).
'''
return self.registry.len(WORK_UNITS_ + work_spec_name,
priority_min=time.time())
|
Get the number of pending work units for some work spec.
These are work units that some worker is currently working on
(hopefully; it could include work units assigned to workers that
died and that have not yet expired).
|
def _set_bfd_session_setup_delay(self, v, load=False):
"""
Setter method for bfd_session_setup_delay, mapped from YANG variable /rbridge_id/bfd_session_setup_delay (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bfd_session_setup_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bfd_session_setup_delay() directly.
YANG Description: Configure BFD desired session setup delay in seconds.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=bfd_session_setup_delay.bfd_session_setup_delay, is_container='container', presence=False, yang_name="bfd-session-setup-delay", rest_name="bfd-session-setup-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure BFD desired session setup delay in seconds.', u'callpoint': u'bfd-session-delay-cpworker', u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bfd', defining_module='brocade-bfd', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bfd_session_setup_delay must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=bfd_session_setup_delay.bfd_session_setup_delay, is_container='container', presence=False, yang_name="bfd-session-setup-delay", rest_name="bfd-session-setup-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure BFD desired session setup delay in seconds.', u'callpoint': u'bfd-session-delay-cpworker', u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bfd', defining_module='brocade-bfd', yang_type='container', is_config=True)""",
})
self.__bfd_session_setup_delay = t
if hasattr(self, '_set'):
self._set()
|
Setter method for bfd_session_setup_delay, mapped from YANG variable /rbridge_id/bfd_session_setup_delay (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_bfd_session_setup_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bfd_session_setup_delay() directly.
YANG Description: Configure BFD desired session setup delay in seconds.
|
def create(self):
"""
Creates the full project
"""
# create virtualenv
self.create_virtualenv()
# create project
self.create_project()
# generate uwsgi script
self.create_uwsgi_script()
# generate nginx config
self.create_nginx_config()
# generate management scripts
self.create_manage_scripts()
logging.info('** Make sure to set proper permissions for the webserver user account on the var and log directories in the project root')
|
Creates the full project
|
def _set_isis_state(self, v, load=False):
"""
Setter method for isis_state, mapped from YANG variable /isis_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_isis_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_isis_state() directly.
YANG Description: ISIS Operational Information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=isis_state.isis_state, is_container='container', presence=False, yang_name="isis-state", rest_name="isis-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-isis', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """isis_state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=isis_state.isis_state, is_container='container', presence=False, yang_name="isis-state", rest_name="isis-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-isis', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=True)""",
})
self.__isis_state = t
if hasattr(self, '_set'):
self._set()
|
Setter method for isis_state, mapped from YANG variable /isis_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_isis_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_isis_state() directly.
YANG Description: ISIS Operational Information
|
def _GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object.
"""
stat_object = super(LVMFileEntry, self)._GetStat()
if self._vslvm_logical_volume is not None:
stat_object.size = self._vslvm_logical_volume.size
return stat_object
|
Retrieves information about the file entry.
Returns:
VFSStat: a stat object.
|
def _increment(self, what, host):
''' helper function to bump a statistic '''
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev+1
|
helper function to bump a statistic
|
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built
|
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
|
def asyncPipeFetch(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that asynchronously fetches and parses one or more feeds to
return the feed entries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'URL': [
{'type': 'url', 'value': <url1>},
{'type': 'url', 'value': <url2>},
{'type': 'url', 'value': <url3>},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
"""
splits = yield asyncGetSplits(_INPUT, conf['URL'], **cdicts(opts, kwargs))
items = yield asyncStarMap(asyncParseResult, splits)
_OUTPUT = utils.multiplex(items)
returnValue(_OUTPUT)
|
A source that asynchronously fetches and parses one or more feeds to
return the feed entries. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'URL': [
{'type': 'url', 'value': <url1>},
{'type': 'url', 'value': <url2>},
{'type': 'url', 'value': <url3>},
]
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
|
def sample_from_proposal(self, A: pd.DataFrame) -> None:
""" Sample a new transition matrix from the proposal distribution,
given a current candidate transition matrix. In practice, this amounts
to the in-place perturbation of an element of the transition matrix
currently being used by the sampler.
Args
"""
# Choose the element of A to perturb
self.source, self.target, self.edge_dict = random.choice(
list(self.edges(data=True))
)
self.original_value = A[f"∂({self.source})/∂t"][self.target]
A[f"∂({self.source})/∂t"][self.target] += np.random.normal(scale=0.001)
|
Sample a new transition matrix from the proposal distribution,
given a current candidate transition matrix. In practice, this amounts
to the in-place perturbation of an element of the transition matrix
currently being used by the sampler.
Args
|
def _apply_correction_on_genes(genes,
pathway_column_names,
pathway_definitions):
"""Helper function to create the gene-to-pathway
membership matrix and apply crosstalk correction on that
matrix. Returns the crosstalk-corrected pathway definitions
for the input `genes.`
"""
gene_row_names = index_element_map(genes)
membership_matrix = initialize_membership_matrix(
genes, pathway_definitions)
crosstalk_corrected_index_map = maximum_impact_estimation(
membership_matrix)
updated_pathway_definitions = _update_pathway_definitions(
crosstalk_corrected_index_map,
gene_row_names, pathway_column_names)
return updated_pathway_definitions
|
Helper function to create the gene-to-pathway
membership matrix and apply crosstalk correction on that
matrix. Returns the crosstalk-corrected pathway definitions
for the input `genes.`
|
def unhook_all():
"""
Removes all keyboard hooks in use, including hotkeys, abbreviations, word
listeners, `record`ers and `wait`s.
"""
_listener.start_if_necessary()
_listener.blocking_keys.clear()
_listener.nonblocking_keys.clear()
del _listener.blocking_hooks[:]
del _listener.handlers[:]
unhook_all_hotkeys()
|
Removes all keyboard hooks in use, including hotkeys, abbreviations, word
listeners, `record`ers and `wait`s.
|
def delete_copy_field(self, collection, copy_dict):
'''
Deletes a copy field.
copy_dict should look like ::
{'source':'source_field_name','dest':'destination_field_name'}
:param string collection: Name of the collection for the action
:param dict copy_field: Dictionary of field info
'''
#Fix this later to check for field before sending a delete
if self.devel:
self.logger.debug("Deleting {}".format(str(copy_dict)))
copyfields = self.get_schema_copyfields(collection)
if copy_dict not in copyfields:
self.logger.info("Fieldset not in Solr Copy Fields: {}".format(str(copy_dict)))
temp = {"delete-copy-field": dict(copy_dict)}
res, con_info = self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp))
return res
|
Deletes a copy field.
copy_dict should look like ::
{'source':'source_field_name','dest':'destination_field_name'}
:param string collection: Name of the collection for the action
:param dict copy_field: Dictionary of field info
|
def _next_page(self, response):
"""
return url path to next page of paginated data
"""
for link in response.getheader("link", "").split(","):
try:
(url, rel) = link.split(";")
if "next" in rel:
return url.lstrip("<").rstrip(">")
except Exception:
return
|
return url path to next page of paginated data
|
def as_uni_form(form):
"""
The original and still very useful way to generate a uni-form form/formset::
{% load uni_form_tags %}
<form class="uniForm" action="post">
{% csrf_token %}
{{ myform|as_uni_form }}
</form>
"""
if isinstance(form, BaseFormSet):
if settings.DEBUG:
template = get_template('uni_form/uni_formset.html')
else:
template = uni_formset_template
c = Context({'formset': form})
else:
if settings.DEBUG:
template = get_template('uni_form/uni_form.html')
else:
template = uni_form_template
c = Context({'form': form})
return template.render(c)
|
The original and still very useful way to generate a uni-form form/formset::
{% load uni_form_tags %}
<form class="uniForm" action="post">
{% csrf_token %}
{{ myform|as_uni_form }}
</form>
|
def export_original_data(self):
"""
Retrieves the original_data
"""
def export_field(value):
"""
Export item
"""
try:
return value.export_original_data()
except AttributeError:
return value
return [export_field(val) for val in self.__original_data__]
|
Retrieves the original_data
|
def visit_Call(self, node):
"""
Visit a function call.
We expect every logging statement and string format to be a function call.
"""
# CASE 1: We're in a logging statement
if self.within_logging_statement():
if self.within_logging_argument() and self.is_format_call(node):
self.violations.append((node, STRING_FORMAT_VIOLATION))
super(LoggingVisitor, self).generic_visit(node)
return
logging_level = self.detect_logging_level(node)
if logging_level and self.current_logging_level is None:
self.current_logging_level = logging_level
# CASE 2: We're in some other statement
if logging_level is None:
super(LoggingVisitor, self).generic_visit(node)
return
# CASE 3: We're entering a new logging statement
self.current_logging_call = node
if logging_level == "warn":
self.violations.append((node, WARN_VIOLATION))
self.check_exc_info(node)
for index, child in enumerate(iter_child_nodes(node)):
if index == 1:
self.current_logging_argument = child
if index >= 1:
self.check_exception_arg(child)
if index > 1 and isinstance(child, keyword) and child.arg == "extra":
self.current_extra_keyword = child
super(LoggingVisitor, self).visit(child)
self.current_logging_argument = None
self.current_extra_keyword = None
self.current_logging_call = None
self.current_logging_level = None
|
Visit a function call.
We expect every logging statement and string format to be a function call.
|
def get_provider_id(self):
"""Gets the ``Id`` of the provider.
return: (osid.id.Id) - the provider ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
if ('providerId' not in self.my_osid_object._my_map or
not self.my_osid_object._my_map['providerId']):
raise IllegalState('this sourceable object has no provider set')
return Id(self.my_osid_object._my_map['providerId'])
|
Gets the ``Id`` of the provider.
return: (osid.id.Id) - the provider ``Id``
*compliance: mandatory -- This method must be implemented.*
|
def send_contributor_email(self, contributor):
"""Send an EmailMessage object for a given contributor."""
ContributorReport(
contributor,
month=self.month,
year=self.year,
deadline=self._deadline,
start=self._start,
end=self._end
).send()
|
Send an EmailMessage object for a given contributor.
|
def process(in_path, boundaries_id=msaf.config.default_bound_id,
labels_id=msaf.config.default_label_id, annot_beats=False,
framesync=False, feature="pcp", hier=False, save=False,
out_file=None, n_jobs=4, annotator_id=0, config=None):
"""Main process to evaluate algorithms' results.
Parameters
----------
in_path : str
Path to the dataset root folder.
boundaries_id : str
Boundaries algorithm identifier (e.g. siplca, cnmf)
labels_id : str
Labels algorithm identifier (e.g. siplca, cnmf)
ds_name : str
Name of the dataset to be evaluated (e.g. SALAMI). * stands for all.
annot_beats : boolean
Whether to use the annotated beats or not.
framesync: str
Whether to use framesync features or not (default: False -> beatsync)
feature: str
String representing the feature to be used (e.g. pcp, mfcc, tonnetz)
hier : bool
Whether to compute a hierarchical or flat segmentation.
save: boolean
Whether to save the results into the `out_file` csv file.
out_file: str
Path to the csv file to save the results (if `None` and `save = True`
it will save the results in the default file name obtained by
calling `get_results_file_name`).
n_jobs: int
Number of processes to run in parallel. Only available in collection
mode.
annotator_id : int
Number identifiying the annotator.
config: dict
Dictionary containing custom configuration parameters for the
algorithms. If None, the default parameters are used.
Return
------
results : pd.DataFrame
DataFrame containing the evaluations for each file.
"""
# Set up configuration based on algorithms parameters
if config is None:
config = io.get_configuration(feature, annot_beats, framesync,
boundaries_id, labels_id)
# Hierarchical segmentation
config["hier"] = hier
# Remove actual features
config.pop("features", None)
# Get out file in case we want to save results
if out_file is None:
out_file = get_results_file_name(boundaries_id, labels_id, config,
annotator_id)
# If out_file already exists, read and return them
if os.path.exists(out_file):
logging.warning("Results already exists, reading from file %s" %
out_file)
results = pd.read_csv(out_file)
print_results(results)
return results
# Perform actual evaluations
if os.path.isfile(in_path):
# Single File mode
evals = [process_track(in_path, boundaries_id, labels_id, config,
annotator_id=annotator_id)]
else:
# Collection mode
# Get files
file_structs = io.get_dataset_files(in_path)
# Evaluate in parallel
logging.info("Evaluating %d tracks..." % len(file_structs))
evals = Parallel(n_jobs=n_jobs)(delayed(process_track)(
file_struct, boundaries_id, labels_id, config,
annotator_id=annotator_id) for file_struct in file_structs[:])
# Aggregate evaluations in pandas format
results = pd.DataFrame()
for e in evals:
if e != []:
results = results.append(e, ignore_index=True)
logging.info("%d tracks analyzed" % len(results))
# Print results
print_results(results)
# Save all results
if save:
logging.info("Writing results in %s" % out_file)
results.to_csv(out_file)
return results
|
Main process to evaluate algorithms' results.
Parameters
----------
in_path : str
Path to the dataset root folder.
boundaries_id : str
Boundaries algorithm identifier (e.g. siplca, cnmf)
labels_id : str
Labels algorithm identifier (e.g. siplca, cnmf)
ds_name : str
Name of the dataset to be evaluated (e.g. SALAMI). * stands for all.
annot_beats : boolean
Whether to use the annotated beats or not.
framesync: str
Whether to use framesync features or not (default: False -> beatsync)
feature: str
String representing the feature to be used (e.g. pcp, mfcc, tonnetz)
hier : bool
Whether to compute a hierarchical or flat segmentation.
save: boolean
Whether to save the results into the `out_file` csv file.
out_file: str
Path to the csv file to save the results (if `None` and `save = True`
it will save the results in the default file name obtained by
calling `get_results_file_name`).
n_jobs: int
Number of processes to run in parallel. Only available in collection
mode.
annotator_id : int
Number identifiying the annotator.
config: dict
Dictionary containing custom configuration parameters for the
algorithms. If None, the default parameters are used.
Return
------
results : pd.DataFrame
DataFrame containing the evaluations for each file.
|
def getAllFeatureSets(self):
"""
Returns all feature sets on the server.
"""
for dataset in self.getAllDatasets():
iterator = self._client.search_feature_sets(
dataset_id=dataset.id)
for featureSet in iterator:
yield featureSet
|
Returns all feature sets on the server.
|
def get_exptime(self, img):
"""Obtain EXPTIME"""
header = self.get_header(img)
if 'EXPTIME' in header.keys():
etime = header['EXPTIME']
elif 'EXPOSED' in header.keys():
etime = header['EXPOSED']
else:
etime = 1.0
return etime
|
Obtain EXPTIME
|
def combat(adata: AnnData, key: str = 'batch', covariates: Optional[Collection[str]] = None, inplace: bool = True):
"""ComBat function for batch effect correction [Johnson07]_ [Leek12]_ [Pedersen12]_.
Corrects for batch effects by fitting linear models, gains statistical power
via an EB framework where information is borrowed across genes. This uses the
implementation of `ComBat <https://github.com/brentp/combat.py>`__ [Pedersen12]_.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix
key: `str`, optional (default: `"batch"`)
Key to a categorical annotation from adata.obs that will be used for batch effect removal
covariates
Additional covariates such as adjustment variables or biological condition. Note that
not including covariates may introduce bias or lead to the removal of biological signal
in unbalanced designs.
inplace: bool, optional (default: `True`)
Wether to replace adata.X or to return the corrected data
Returns
-------
Depending on the value of inplace, either returns an updated AnnData object
or modifies the passed one.
"""
# check the input
if key not in adata.obs_keys():
raise ValueError('Could not find the key {!r} in adata.obs'.format(key))
if covariates is not None:
cov_exist = np.isin(covariates, adata.obs_keys())
if np.any(~cov_exist):
missing_cov = np.array(covariates)[~cov_exist].tolist()
raise ValueError('Could not find the covariate(s) {!r} in adata.obs'.format(missing_cov))
if key in covariates:
raise ValueError('Batch key and covariates cannot overlap')
if len(covariates) != len(set(covariates)):
raise ValueError('Covariates must be unique')
# only works on dense matrices so far
if issparse(adata.X):
X = adata.X.A.T
else:
X = adata.X.T
data = pd.DataFrame(
data=X,
index=adata.var_names,
columns=adata.obs_names,
)
sanitize_anndata(adata)
# construct a pandas series of the batch annotation
model = adata.obs[[key] + (covariates if covariates else [])]
batch_info = model.groupby(key).groups.values()
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
# standardize across genes using a pooled variance estimator
logg.info("Standardizing Data across genes.\n")
s_data, design, var_pooled, stand_mean = _standardize_data(model, data, key)
# fitting the parameters on the standardized data
logg.info("Fitting L/S model and finding priors\n")
batch_design = design[design.columns[:n_batch]]
# first estimate of the additive batch effect
gamma_hat = np.dot(np.dot(la.inv(np.dot(batch_design.T, batch_design)), batch_design.T), s_data.T)
delta_hat = []
# first estimate for the multiplicative batch effect
for i, batch_idxs in enumerate(batch_info):
delta_hat.append(s_data[batch_idxs].var(axis=1))
# empirically fix the prior hyperparameters
gamma_bar = gamma_hat.mean(axis=1)
t2 = gamma_hat.var(axis=1)
# a_prior and b_prior are the priors on lambda and theta from Johnson and Li (2006)
a_prior = list(map(_aprior, delta_hat))
b_prior = list(map(_bprior, delta_hat))
logg.info("Finding parametric adjustments\n")
# gamma star and delta star will be our empirical bayes (EB) estimators
# for the additive and multiplicative batch effect per batch and cell
gamma_star, delta_star = [], []
for i, batch_idxs in enumerate(batch_info):
# temp stores our estimates for the batch effect parameters.
# temp[0] is the additive batch effect
# temp[1] is the multiplicative batch effect
gamma, delta = _it_sol(
s_data[batch_idxs].values,
gamma_hat[i],
delta_hat[i].values,
gamma_bar[i],
t2[i],
a_prior[i],
b_prior[i],
)
gamma_star.append(gamma)
delta_star.append(delta)
logg.info("Adjusting data\n")
bayesdata = s_data
gamma_star = np.array(gamma_star)
delta_star = np.array(delta_star)
# we now apply the parametric adjustment to the standardized data from above
# loop over all batches in the data
for j, batch_idxs in enumerate(batch_info):
# we basically substract the additive batch effect, rescale by the ratio
# of multiplicative batch effect to pooled variance and add the overall gene
# wise mean
dsq = np.sqrt(delta_star[j,:])
dsq = dsq.reshape((len(dsq), 1))
denom = np.dot(dsq, np.ones((1, n_batches[j])))
numer = np.array(bayesdata[batch_idxs] - np.dot(batch_design.loc[batch_idxs], gamma_star).T)
bayesdata[batch_idxs] = numer / denom
vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1))
bayesdata = bayesdata * np.dot(vpsq, np.ones((1, int(n_array)))) + stand_mean
# put back into the adata object or return
if inplace:
adata.X = bayesdata.values.transpose()
else:
return bayesdata.values.transpose()
|
ComBat function for batch effect correction [Johnson07]_ [Leek12]_ [Pedersen12]_.
Corrects for batch effects by fitting linear models, gains statistical power
via an EB framework where information is borrowed across genes. This uses the
implementation of `ComBat <https://github.com/brentp/combat.py>`__ [Pedersen12]_.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix
key: `str`, optional (default: `"batch"`)
Key to a categorical annotation from adata.obs that will be used for batch effect removal
covariates
Additional covariates such as adjustment variables or biological condition. Note that
not including covariates may introduce bias or lead to the removal of biological signal
in unbalanced designs.
inplace: bool, optional (default: `True`)
Wether to replace adata.X or to return the corrected data
Returns
-------
Depending on the value of inplace, either returns an updated AnnData object
or modifies the passed one.
|
def _fit(self, Z, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
cv = self.cv
cv = _check_cv(cv, Z)
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch, backend="threading"
)(
delayed(_fit_and_score)(clone(base_estimator), Z, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
best_estimator.fit(Z, **self.fit_params)
self.best_estimator_ = best_estimator
return self
|
Actual fitting, performing the search over parameters.
|
def stdout_to_results(s):
"""Turns the multi-line output of a benchmark process into
a sequence of BenchmarkResult instances."""
results = s.strip().split('\n')
return [BenchmarkResult(*r.split()) for r in results]
|
Turns the multi-line output of a benchmark process into
a sequence of BenchmarkResult instances.
|
def output_randomized_kronecker_to_pickle(
left_matrix, right_matrix,
train_indices_out_path, test_indices_out_path,
train_metadata_out_path=None, test_metadata_out_path=None,
remove_empty_rows=True):
"""Compute randomized Kronecker product and dump it on the fly.
A standard Kronecker product between matrices A and B produces
[[a_11 B, ..., a_1n B],
...
[a_m1 B, ..., a_mn B]]
(if A's size is (m, n) and B's size is (p, q) then A Kronecker B has size
(m p, n q)).
Here we modify the standard Kronecker product expanding matrices in
https://cs.stanford.edu/~jure/pubs/kronecker-jmlr10.pdf
and randomize each block-wise operation a_ij B in the Kronecker product as
in https://arxiv.org/pdf/1901.08910.pdf section III.4.
The matrix we produce is
[[F(a_11, B, w_11), ..., F(a_1n, B, w_1n)],
...
[F(a_m1, B, w_m1), ... , F(a_mn, B, w_mn)]]
where (w_ij) is a sequence of pseudo random numbers and F is randomized
operator which will:
1) Shuffle rows and columns of B independently at random;
2) Dropout elements of B with a rate 1 - a_ij to compute
F(a_ij, B, w_ij).
(It is noteworthy that there is an abuse of notation above when writing
F(a_ij, B, w_ij) as each block-wise operation will in fact consume
multiple elements of the sequence (w_ij)).
Args:
left_matrix: sparse SciPy csr matrix with values in [0, 1].
right_matrix: sparse SciPy coo signed binary matrix. +1 values correspond
to train set and -1 values correspond to test set.
train_indices_out_path: path to output train file. The non zero indices of
the resulting sparse matrix are dumped as a series of pickled records.
As many shard will be created as there are rows in left matrix. The shard
corresponding to row i in the left matrix has the suffix _i appended to
its file name. Each shard contains a pickled list of list each of which
corresponds to a users.
test_indices_out_path: path to output train file. The non zero indices of
the resulting sparse matrix are dumped as a series of pickled records.
As many shard will be created as there are rows in left matrix. The shard
corresponding to row i in the left matrix has the suffix _i appended to
its file name. Each shard contains a pickled list of list each of which
corresponds to a users.
train_metadata_out_path: path to optional complementary output file
containing the number of train rows (r), columns (c) and non zeros (nnz)
in a pickled SparseMatrixMetadata named tuple.
test_metadata_out_path: path to optional complementary output file
containing the number of test rows (r), columns (c) and non zeros (nnz)
in a pickled SparseMatrixMetadata named tuple.
remove_empty_rows: whether to remove rows from the synthetic train and
test matrices which are not present in the train or the test matrix.
Returns:
(metadata, train_metadata, test_metadata) triplet of SparseMatrixMetadata
corresponding to the overall data set, train data set and test data set.
"""
logging.info("Writing item sequences to pickle files %s and %s.",
train_indices_out_path, test_indices_out_path)
num_rows = 0
num_removed_rows = 0
num_cols = left_matrix.shape[1] * right_matrix.shape[1]
num_interactions = 0
num_train_interactions = 0
num_test_interactions = 0
if not set(right_matrix.data).issubset({-1, 1}):
raise ValueError(
"Values of sparse matrix should be -1 or 1 but are:",
set(right_matrix.data))
for i in xrange(left_matrix.shape[0]):
(shard_num_removed_rows, shard_metadata, shard_train_metadata,
shard_test_metadata) = _compute_and_write_row_block(
i, left_matrix, right_matrix, train_indices_out_path,
test_indices_out_path, remove_empty_rows)
num_rows += shard_metadata.num_rows
num_removed_rows += shard_num_removed_rows
num_interactions += shard_metadata.num_interactions
num_train_interactions += shard_train_metadata.num_interactions
num_test_interactions += shard_test_metadata.num_interactions
logging.info("%d total interactions written.", num_interactions)
logging.info("%d total rows removed.", num_removed_rows)
logging.info("%d total train interactions written.", num_train_interactions)
logging.info("%d toal test interactions written.", num_test_interactions)
logging.info("Done writing.")
metadata = SparseMatrixMetadata(
num_interactions=num_interactions,
num_rows=num_rows, num_cols=num_cols)
train_metadata = SparseMatrixMetadata(
num_interactions=num_train_interactions,
num_rows=num_rows, num_cols=num_cols)
test_metadata = SparseMatrixMetadata(
num_interactions=num_test_interactions,
num_rows=num_rows, num_cols=num_cols)
if train_metadata_out_path is not None:
util.write_metadata_to_file(
train_metadata, train_metadata_out_path, tag="train")
if test_metadata_out_path is not None:
util.write_metadata_to_file(
test_metadata, test_metadata_out_path, tag="test")
return metadata, train_metadata, test_metadata
|
Compute randomized Kronecker product and dump it on the fly.
A standard Kronecker product between matrices A and B produces
[[a_11 B, ..., a_1n B],
...
[a_m1 B, ..., a_mn B]]
(if A's size is (m, n) and B's size is (p, q) then A Kronecker B has size
(m p, n q)).
Here we modify the standard Kronecker product expanding matrices in
https://cs.stanford.edu/~jure/pubs/kronecker-jmlr10.pdf
and randomize each block-wise operation a_ij B in the Kronecker product as
in https://arxiv.org/pdf/1901.08910.pdf section III.4.
The matrix we produce is
[[F(a_11, B, w_11), ..., F(a_1n, B, w_1n)],
...
[F(a_m1, B, w_m1), ... , F(a_mn, B, w_mn)]]
where (w_ij) is a sequence of pseudo random numbers and F is randomized
operator which will:
1) Shuffle rows and columns of B independently at random;
2) Dropout elements of B with a rate 1 - a_ij to compute
F(a_ij, B, w_ij).
(It is noteworthy that there is an abuse of notation above when writing
F(a_ij, B, w_ij) as each block-wise operation will in fact consume
multiple elements of the sequence (w_ij)).
Args:
left_matrix: sparse SciPy csr matrix with values in [0, 1].
right_matrix: sparse SciPy coo signed binary matrix. +1 values correspond
to train set and -1 values correspond to test set.
train_indices_out_path: path to output train file. The non zero indices of
the resulting sparse matrix are dumped as a series of pickled records.
As many shard will be created as there are rows in left matrix. The shard
corresponding to row i in the left matrix has the suffix _i appended to
its file name. Each shard contains a pickled list of list each of which
corresponds to a users.
test_indices_out_path: path to output train file. The non zero indices of
the resulting sparse matrix are dumped as a series of pickled records.
As many shard will be created as there are rows in left matrix. The shard
corresponding to row i in the left matrix has the suffix _i appended to
its file name. Each shard contains a pickled list of list each of which
corresponds to a users.
train_metadata_out_path: path to optional complementary output file
containing the number of train rows (r), columns (c) and non zeros (nnz)
in a pickled SparseMatrixMetadata named tuple.
test_metadata_out_path: path to optional complementary output file
containing the number of test rows (r), columns (c) and non zeros (nnz)
in a pickled SparseMatrixMetadata named tuple.
remove_empty_rows: whether to remove rows from the synthetic train and
test matrices which are not present in the train or the test matrix.
Returns:
(metadata, train_metadata, test_metadata) triplet of SparseMatrixMetadata
corresponding to the overall data set, train data set and test data set.
|
def yield_event(self, act):
"""
Hande completion for a request and return an (op, coro) to be
passed to the scheduler on the last completion loop of a proactor.
"""
if act in self.tokens:
coro = act.coro
op = self.try_run_act(act, self.tokens[act])
if op:
del self.tokens[act]
return op, coro
|
Hande completion for a request and return an (op, coro) to be
passed to the scheduler on the last completion loop of a proactor.
|
def gen_headers() -> Dict[str, str]:
"""Generate a header pairing."""
ua_list: List[str] = ['Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36']
headers: Dict[str, str] = {'User-Agent': ua_list[random.randint(0, len(ua_list) - 1)]}
return headers
|
Generate a header pairing.
|
def _delete_nxos_db(self, unused, vlan_id, device_id, host_id, vni,
is_provider_vlan):
"""Delete the nexus database entry.
Called during delete precommit port event.
"""
try:
rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id)
for row in rows:
nxos_db.remove_nexusport_binding(row.port_id, row.vlan_id,
row.vni, row.switch_ip, row.instance_id)
except excep.NexusPortBindingNotFound:
return
|
Delete the nexus database entry.
Called during delete precommit port event.
|
def daily_bounds(network, snapshots):
""" This will bound the storage level to 0.5 max_level every 24th hour.
"""
sus = network.storage_units
# take every first hour of the clustered days
network.model.period_starts = network.snapshot_weightings.index[0::24]
network.model.storages = sus.index
def day_rule(m, s, p):
"""
Sets the soc of the every first hour to the soc of the last hour
of the day (i.e. + 23 hours)
"""
return (
m.state_of_charge[s, p] ==
m.state_of_charge[s, p + pd.Timedelta(hours=23)])
network.model.period_bound = po.Constraint(
network.model.storages, network.model.period_starts, rule=day_rule)
|
This will bound the storage level to 0.5 max_level every 24th hour.
|
def get_image_tags(self):
"""
Fetches image labels (repository / tags) from Docker.
:return: A dictionary, with image name and tags as the key and the image id as value.
:rtype: dict
"""
current_images = self.images()
tags = {tag: i['Id'] for i in current_images for tag in i['RepoTags']}
return tags
|
Fetches image labels (repository / tags) from Docker.
:return: A dictionary, with image name and tags as the key and the image id as value.
:rtype: dict
|
def connect(self, fn):
"""SQLite connect method initialize db"""
self.conn = sqlite3.connect(fn)
cur = self.get_cursor()
cur.execute('PRAGMA page_size=4096')
cur.execute('PRAGMA FOREIGN_KEYS=ON')
cur.execute('PRAGMA cache_size=10000')
cur.execute('PRAGMA journal_mode=MEMORY')
|
SQLite connect method initialize db
|
def update_settings(self, service_id, version_number, settings={}):
"""Update the settings for a particular service and version."""
body = urllib.urlencode(settings)
content = self._fetch("/service/%s/version/%d/settings" % (service_id, version_number), method="PUT", body=body)
return FastlySettings(self, content)
|
Update the settings for a particular service and version.
|
def event_log_filter_between_date(start, end, utc):
"""betweenDate Query filter that SoftLayer_EventLog likes
:param string start: lower bound date in mm/dd/yyyy format
:param string end: upper bound date in mm/dd/yyyy format
:param string utc: utc offset. Defaults to '+0000'
"""
return {
'operation': 'betweenDate',
'options': [
{'name': 'startDate', 'value': [format_event_log_date(start, utc)]},
{'name': 'endDate', 'value': [format_event_log_date(end, utc)]}
]
}
|
betweenDate Query filter that SoftLayer_EventLog likes
:param string start: lower bound date in mm/dd/yyyy format
:param string end: upper bound date in mm/dd/yyyy format
:param string utc: utc offset. Defaults to '+0000'
|
def init_rotate(cls, radians):
"""Return a new :class:`Matrix` for a transformation
that rotates by :obj:`radians`.
:type radians: float
:param radians:
Angle of rotation, in radians.
The direction of rotation is defined such that
positive angles rotate in the direction
from the positive X axis toward the positive Y axis.
With the default axis orientation of cairo,
positive angles rotate in a clockwise direction.
"""
result = cls()
cairo.cairo_matrix_init_rotate(result._pointer, radians)
return result
|
Return a new :class:`Matrix` for a transformation
that rotates by :obj:`radians`.
:type radians: float
:param radians:
Angle of rotation, in radians.
The direction of rotation is defined such that
positive angles rotate in the direction
from the positive X axis toward the positive Y axis.
With the default axis orientation of cairo,
positive angles rotate in a clockwise direction.
|
def get_request_body_chunk(self, content: bytes, closed: bool,
more_content: bool) -> Dict[str, Any]:
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#request-body-chunk
'''
return {
'content': content,
'closed': closed,
'more_content': more_content
}
|
http://channels.readthedocs.io/en/stable/asgi/www.html#request-body-chunk
|
def get_specific_subnodes(self, node, name, recursive=0):
"""Given a node and a name, return a list of child `ELEMENT_NODEs`, that
have a `tagName` matching the `name`. Search recursively for `recursive`
levels.
"""
children = [x for x in node.childNodes if x.nodeType == x.ELEMENT_NODE]
ret = [x for x in children if x.tagName == name]
if recursive > 0:
for x in children:
ret.extend(self.get_specific_subnodes(x, name, recursive-1))
return ret
|
Given a node and a name, return a list of child `ELEMENT_NODEs`, that
have a `tagName` matching the `name`. Search recursively for `recursive`
levels.
|
def public_url(self):
"""The public URL for this blob.
Use :meth:`make_public` to enable anonymous access via the returned
URL.
:rtype: `string`
:returns: The public URL for this blob.
"""
return "{storage_base_url}/{bucket_name}/{quoted_name}".format(
storage_base_url=_API_ACCESS_ENDPOINT,
bucket_name=self.bucket.name,
quoted_name=quote(self.name.encode("utf-8")),
)
|
The public URL for this blob.
Use :meth:`make_public` to enable anonymous access via the returned
URL.
:rtype: `string`
:returns: The public URL for this blob.
|
def from_callback(cls, cb, nx=None, nparams=None, **kwargs):
""" Generate a SymbolicSys instance from a callback.
Parameters
----------
cb : callable
Should have the signature ``cb(x, p, backend) -> list of exprs``.
nx : int
Number of unknowns, when not given it is deduced from ``kwargs['names']``.
nparams : int
Number of parameters, when not given it is deduced from ``kwargs['param_names']``.
\\*\\*kwargs :
Keyword arguments passed on to :class:`SymbolicSys`. See also :class:`pyneqsys.NeqSys`.
Examples
--------
>>> symbolicsys = SymbolicSys.from_callback(lambda x, p, be: [
... x[0]*x[1] - p[0],
... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2
... ], 2, 1)
...
"""
if kwargs.get('x_by_name', False):
if 'names' not in kwargs:
raise ValueError("Need ``names`` in kwargs.")
if nx is None:
nx = len(kwargs['names'])
elif nx != len(kwargs['names']):
raise ValueError("Inconsistency between nx and length of ``names``.")
if kwargs.get('par_by_name', False):
if 'param_names' not in kwargs:
raise ValueError("Need ``param_names`` in kwargs.")
if nparams is None:
nparams = len(kwargs['param_names'])
elif nparams != len(kwargs['param_names']):
raise ValueError("Inconsistency between ``nparam`` and length of ``param_names``.")
if nparams is None:
nparams = 0
if nx is None:
raise ValueError("Need ``nx`` of ``names`` together with ``x_by_name==True``.")
be = Backend(kwargs.pop('backend', None))
x = be.real_symarray('x', nx)
p = be.real_symarray('p', nparams)
_x = dict(zip(kwargs['names'], x)) if kwargs.get('x_by_name', False) else x
_p = dict(zip(kwargs['param_names'], p)) if kwargs.get('par_by_name', False) else p
try:
exprs = cb(_x, _p, be)
except TypeError:
exprs = _ensure_3args(cb)(_x, _p, be)
return cls(x, exprs, p, backend=be, **kwargs)
|
Generate a SymbolicSys instance from a callback.
Parameters
----------
cb : callable
Should have the signature ``cb(x, p, backend) -> list of exprs``.
nx : int
Number of unknowns, when not given it is deduced from ``kwargs['names']``.
nparams : int
Number of parameters, when not given it is deduced from ``kwargs['param_names']``.
\\*\\*kwargs :
Keyword arguments passed on to :class:`SymbolicSys`. See also :class:`pyneqsys.NeqSys`.
Examples
--------
>>> symbolicsys = SymbolicSys.from_callback(lambda x, p, be: [
... x[0]*x[1] - p[0],
... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2
... ], 2, 1)
...
|
def _playsoundOSX(sound, block = True):
'''
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on
OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports.
Probably works on OS X 10.5 and newer. Probably works with all versions of
Python.
Inspired by (but not copied from) Aaron's Stack Overflow answer here:
http://stackoverflow.com/a/34568298/901641
I never would have tried using AppKit.NSSound without seeing his code.
'''
from AppKit import NSSound
from Foundation import NSURL
from time import sleep
if '://' not in sound:
if not sound.startswith('/'):
from os import getcwd
sound = getcwd() + '/' + sound
sound = 'file://' + sound
url = NSURL.URLWithString_(sound)
nssound = NSSound.alloc().initWithContentsOfURL_byReference_(url, True)
if not nssound:
raise IOError('Unable to load sound named: ' + sound)
nssound.play()
if block:
sleep(nssound.duration())
|
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on
OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports.
Probably works on OS X 10.5 and newer. Probably works with all versions of
Python.
Inspired by (but not copied from) Aaron's Stack Overflow answer here:
http://stackoverflow.com/a/34568298/901641
I never would have tried using AppKit.NSSound without seeing his code.
|
def _euristic_h_function(self, suffix, index):
"""
Вычисление h-эвристики из работы Hulden,2009 для текущей вершины словаря
Аргументы:
----------
suffix : string
непрочитанный суффикс входного слова
index : int
индекс текущего узла в словаре
Возвращает:
-----------
cost : float
оценка снизу для стоимости замены,
приводящей к входному слову с суффиксом suffix,
если прочитанный префикс слова без опечатки
привёл в вершину с номером index
"""
if self.euristics > 0:
suffix = suffix[:self.euristics]
# кэширование результатов
index_temporary_euristics = self._temporary_euristics[index]
cost = index_temporary_euristics.get(suffix, None)
if cost is not None:
return cost
# извлечение нужных данных из массивов
absense_costs = self._absense_costs_by_node[index]
data = self.dictionary.data[index]
costs = np.zeros(dtype=np.float64, shape=(self.euristics,))
# costs[j] --- оценка штрафа при предпросмотре вперёд на j символов
for i, a in enumerate(suffix):
costs[i:] += absense_costs[a][i:]
cost = max(costs)
index_temporary_euristics[suffix] = cost
return cost
|
Вычисление h-эвристики из работы Hulden,2009 для текущей вершины словаря
Аргументы:
----------
suffix : string
непрочитанный суффикс входного слова
index : int
индекс текущего узла в словаре
Возвращает:
-----------
cost : float
оценка снизу для стоимости замены,
приводящей к входному слову с суффиксом suffix,
если прочитанный префикс слова без опечатки
привёл в вершину с номером index
|
def create_api_pool(self):
"""Get an instance of Api Pool services facade."""
return ApiPool(
self.networkapi_url,
self.user,
self.password,
self.user_ldap)
|
Get an instance of Api Pool services facade.
|
def send_template_email(recipients, title_template, body_template, context, language):
"""Sends e-mail using templating system"""
send_emails = getattr(settings, 'SEND_PLANS_EMAILS', True)
if not send_emails:
return
site_name = getattr(settings, 'SITE_NAME', 'Please define settings.SITE_NAME')
domain = getattr(settings, 'SITE_URL', None)
if domain is None:
try:
Site = apps.get_model('sites', 'Site')
current_site = Site.objects.get_current()
site_name = current_site.name
domain = current_site.domain
except LookupError:
pass
context.update({'site_name': site_name, 'site_domain': domain})
if language is not None:
translation.activate(language)
mail_title_template = loader.get_template(title_template)
mail_body_template = loader.get_template(body_template)
title = mail_title_template.render(context)
body = mail_body_template.render(context)
try:
email_from = getattr(settings, 'DEFAULT_FROM_EMAIL')
except AttributeError:
raise ImproperlyConfigured('DEFAULT_FROM_EMAIL setting needed for sending e-mails')
mail.send_mail(title, body, email_from, recipients)
if language is not None:
translation.deactivate()
email_logger.info(u"Email (%s) sent to %s\nTitle: %s\n%s\n\n" % (language, recipients, title, body))
|
Sends e-mail using templating system
|
def add_record(self, record):
"""Add a record to the OAISet.
:param record: Record to be added.
:type record: `invenio_records.api.Record` or derivative.
"""
record.setdefault('_oai', {}).setdefault('sets', [])
assert not self.has_record(record)
record['_oai']['sets'].append(self.spec)
|
Add a record to the OAISet.
:param record: Record to be added.
:type record: `invenio_records.api.Record` or derivative.
|
def _executor_script(self):
"""Create shell-script in charge of executing the benchmark
and return its path.
"""
fd, path = tempfile.mkstemp(suffix='.sh', dir=os.getcwd())
os.close(fd)
with open(path, 'w') as ostr:
self._write_executor_script(ostr)
mode = os.stat(path).st_mode
os.chmod(path, mode | stat.S_IEXEC | stat.S_IRGRP | stat.S_IRUSR)
return path
|
Create shell-script in charge of executing the benchmark
and return its path.
|
def best_four_point_to_buy(self):
""" 判斷是否為四大買點
:rtype: str or False
"""
result = []
if self.check_mins_bias_ratio() and \
(self.best_buy_1() or self.best_buy_2() or self.best_buy_3() or \
self.best_buy_4()):
if self.best_buy_1():
result.append(self.best_buy_1.__doc__.strip().decode('utf-8'))
if self.best_buy_2():
result.append(self.best_buy_2.__doc__.strip().decode('utf-8'))
if self.best_buy_3():
result.append(self.best_buy_3.__doc__.strip().decode('utf-8'))
if self.best_buy_4():
result.append(self.best_buy_4.__doc__.strip().decode('utf-8'))
result = ', '.join(result)
else:
result = False
return result
|
判斷是否為四大買點
:rtype: str or False
|
def project_stored_info_type_path(cls, project, stored_info_type):
"""Return a fully-qualified project_stored_info_type string."""
return google.api_core.path_template.expand(
"projects/{project}/storedInfoTypes/{stored_info_type}",
project=project,
stored_info_type=stored_info_type,
)
|
Return a fully-qualified project_stored_info_type string.
|
def send_email(self, source, subject, body, to_addresses, cc_addresses=None,
bcc_addresses=None, format='text', reply_addresses=None,
return_path=None, text_body=None, html_body=None):
"""Composes an email message based on input data, and then immediately
queues the message for sending.
:type source: string
:param source: The sender's email address.
:type subject: string
:param subject: The subject of the message: A short summary of the
content, which will appear in the recipient's inbox.
:type body: string
:param body: The message body.
:type to_addresses: list of strings or string
:param to_addresses: The To: field(s) of the message.
:type cc_addresses: list of strings or string
:param cc_addresses: The CC: field(s) of the message.
:type bcc_addresses: list of strings or string
:param bcc_addresses: The BCC: field(s) of the message.
:type format: string
:param format: The format of the message's body, must be either "text"
or "html".
:type reply_addresses: list of strings or string
:param reply_addresses: The reply-to email address(es) for the
message. If the recipient replies to the
message, each reply-to address will
receive the reply.
:type return_path: string
:param return_path: The email address to which bounce notifications are
to be forwarded. If the message cannot be delivered
to the recipient, then an error message will be
returned from the recipient's ISP; this message will
then be forwarded to the email address specified by
the ReturnPath parameter.
:type text_body: string
:param text_body: The text body to send with this email.
:type html_body: string
:param html_body: The html body to send with this email.
"""
format = format.lower().strip()
if body is not None:
if format == "text":
if text_body is not None:
raise Warning("You've passed in both a body and a text_body; please choose one or the other.")
text_body = body
else:
if html_body is not None:
raise Warning("You've passed in both a body and an html_body; please choose one or the other.")
html_body = body
params = {
'Source': source,
'Message.Subject.Data': subject,
}
if return_path:
params['ReturnPath'] = return_path
if html_body is not None:
params['Message.Body.Html.Data'] = html_body
if text_body is not None:
params['Message.Body.Text.Data'] = text_body
if(format not in ("text","html")):
raise ValueError("'format' argument must be 'text' or 'html'")
if(not (html_body or text_body)):
raise ValueError("No text or html body found for mail")
self._build_list_params(params, to_addresses,
'Destination.ToAddresses.member')
if cc_addresses:
self._build_list_params(params, cc_addresses,
'Destination.CcAddresses.member')
if bcc_addresses:
self._build_list_params(params, bcc_addresses,
'Destination.BccAddresses.member')
if reply_addresses:
self._build_list_params(params, reply_addresses,
'ReplyToAddresses.member')
return self._make_request('SendEmail', params)
|
Composes an email message based on input data, and then immediately
queues the message for sending.
:type source: string
:param source: The sender's email address.
:type subject: string
:param subject: The subject of the message: A short summary of the
content, which will appear in the recipient's inbox.
:type body: string
:param body: The message body.
:type to_addresses: list of strings or string
:param to_addresses: The To: field(s) of the message.
:type cc_addresses: list of strings or string
:param cc_addresses: The CC: field(s) of the message.
:type bcc_addresses: list of strings or string
:param bcc_addresses: The BCC: field(s) of the message.
:type format: string
:param format: The format of the message's body, must be either "text"
or "html".
:type reply_addresses: list of strings or string
:param reply_addresses: The reply-to email address(es) for the
message. If the recipient replies to the
message, each reply-to address will
receive the reply.
:type return_path: string
:param return_path: The email address to which bounce notifications are
to be forwarded. If the message cannot be delivered
to the recipient, then an error message will be
returned from the recipient's ISP; this message will
then be forwarded to the email address specified by
the ReturnPath parameter.
:type text_body: string
:param text_body: The text body to send with this email.
:type html_body: string
:param html_body: The html body to send with this email.
|
def _set_font(self, font):
""" Sets the base font for the ConsoleWidget to the specified QFont.
"""
font_metrics = QtGui.QFontMetrics(font)
self._control.setTabStopWidth(self.tab_width * font_metrics.width(' '))
self._completion_widget.setFont(font)
self._control.document().setDefaultFont(font)
if self._page_control:
self._page_control.document().setDefaultFont(font)
self.font_changed.emit(font)
|
Sets the base font for the ConsoleWidget to the specified QFont.
|
def _decdeg_distance(pt1, pt2):
"""
Earth surface distance (in km) between decimal latlong points using
Haversine approximation.
http://stackoverflow.com/questions/15736995/
how-can-i-quickly-estimate-the-distance-between-two-latitude-longitude-
points
"""
lat1, lon1 = pt1
lat2, lon2 = pt2
# Convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6367 * c
return km
|
Earth surface distance (in km) between decimal latlong points using
Haversine approximation.
http://stackoverflow.com/questions/15736995/
how-can-i-quickly-estimate-the-distance-between-two-latitude-longitude-
points
|
def check_valid_rx_can_msg(result):
"""
Checks if function :meth:`UcanServer.read_can_msg` returns a valid CAN message.
:param ReturnCode result: Error code of the function.
:return: True if a valid CAN messages was received, otherwise False.
:rtype: bool
"""
return (result.value == ReturnCode.SUCCESSFUL) or (result.value > ReturnCode.WARNING)
|
Checks if function :meth:`UcanServer.read_can_msg` returns a valid CAN message.
:param ReturnCode result: Error code of the function.
:return: True if a valid CAN messages was received, otherwise False.
:rtype: bool
|
def wallet_destroy(self, wallet):
"""
Destroys **wallet** and all contained accounts
.. enable_control required
:param wallet: Wallet to destroy
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_destroy(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_destroy', payload)
return resp == {}
|
Destroys **wallet** and all contained accounts
.. enable_control required
:param wallet: Wallet to destroy
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_destroy(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
True
|
def is_flapping(self, alert, window=1800, count=2):
"""
Return true if alert severity has changed more than X times in Y seconds
"""
select = """
SELECT COUNT(*)
FROM alerts, unnest(history) h
WHERE environment=%(environment)s
AND resource=%(resource)s
AND h.event=%(event)s
AND h.update_time > (NOW() at time zone 'utc' - INTERVAL '{window} seconds')
AND h.type='severity'
AND {customer}
""".format(window=window, customer='customer=%(customer)s' if alert.customer else 'customer IS NULL')
return self._fetchone(select, vars(alert)).count > count
|
Return true if alert severity has changed more than X times in Y seconds
|
def delete_member(self, user):
"""Returns a response after attempting to remove
a member from the list.
"""
if not self.email_enabled:
raise EmailNotEnabledError("See settings.EMAIL_ENABLED")
return requests.delete(
f"{self.api_url}/{self.address}/members/{user.email}",
auth=("api", self.api_key),
)
|
Returns a response after attempting to remove
a member from the list.
|
def vflip(img):
"""Vertically flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Vertically flipped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_TOP_BOTTOM)
|
Vertically flip the given PIL Image.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Vertically flipped image.
|
def insert_rows(self, row, no_rows=1):
"""Adds no_rows rows before row, appends if row > maxrows
and marks grid as changed
"""
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
tab = self.grid.current_table
self.code_array.insert(row, no_rows, axis=0, tab=tab)
|
Adds no_rows rows before row, appends if row > maxrows
and marks grid as changed
|
def handle(self, cycle_delay=0.1):
"""
Call this method to spend about ``cycle_delay`` seconds processing
requests in the pcaspy server. Under load, for example when running ``caget`` at a
high frequency, the actual time spent in the method may be much shorter. This effect
is not corrected for.
:param cycle_delay: Approximate time to be spent processing requests in pcaspy server.
"""
if self._server is not None:
self._server.process(cycle_delay)
self._driver.process_pv_updates()
|
Call this method to spend about ``cycle_delay`` seconds processing
requests in the pcaspy server. Under load, for example when running ``caget`` at a
high frequency, the actual time spent in the method may be much shorter. This effect
is not corrected for.
:param cycle_delay: Approximate time to be spent processing requests in pcaspy server.
|
async def send_file(
self, entity, file, *, caption=None, force_document=False,
progress_callback=None, reply_to=None, attributes=None,
thumb=None, allow_cache=True, parse_mode=(),
voice_note=False, video_note=False, buttons=None, silent=None,
supports_streaming=False, **kwargs):
"""
Sends a file to the specified entity.
Args:
entity (`entity`):
Who will receive the file.
file (`str` | `bytes` | `file` | `media`):
The file to send, which can be one of:
* A local file path to an in-disk file. The file name
will be the path's base name.
* A `bytes` byte array with the file's data to send
(for example, by using ``text.encode('utf-8')``).
A default file name will be used.
* A bytes `io.IOBase` stream over the file to send
(for example, by using ``open(file, 'rb')``).
Its ``.name`` property will be used for the file name,
or a default if it doesn't have one.
* An external URL to a file over the internet. This will
send the file as "external" media, and Telegram is the
one that will fetch the media and send it.
* A Bot API-like ``file_id``. You can convert previously
sent media to file IDs for later reusing with
`telethon.utils.pack_bot_file_id`.
* A handle to an existing file (for example, if you sent a
message with media before, you can use its ``message.media``
as a file here).
* A handle to an uploaded file (from `upload_file`).
To send an album, you should provide a list in this parameter.
If a list or similar is provided, the files in it will be
sent as an album in the order in which they appear, sliced
in chunks of 10 if more than 10 are given.
caption (`str`, optional):
Optional caption for the sent media message. When sending an
album, the caption may be a list of strings, which will be
assigned to the files pairwise.
force_document (`bool`, optional):
If left to ``False`` and the file is a path that ends with
the extension of an image file or a video file, it will be
sent as such. Otherwise always as a document.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(sent bytes, total)``.
reply_to (`int` | `Message <telethon.tl.custom.message.Message>`):
Same as `reply_to` from `send_message`.
attributes (`list`, optional):
Optional attributes that override the inferred ones, like
:tl:`DocumentAttributeFilename` and so on.
thumb (`str` | `bytes` | `file`, optional):
Optional JPEG thumbnail (for documents). **Telegram will
ignore this parameter** unless you pass a ``.jpg`` file!
The file must also be small in dimensions and in-disk size.
Successful thumbnails were files below 20kb and 200x200px.
Width/height and dimensions/size ratios may be important.
allow_cache (`bool`, optional):
Whether to allow using the cached version stored in the
database or not. Defaults to ``True`` to avoid re-uploads.
Must be ``False`` if you wish to use different attributes
or thumb than those that were used when the file was cached.
parse_mode (`object`, optional):
See the `TelegramClient.parse_mode
<telethon.client.messageparse.MessageParseMethods.parse_mode>`
property for allowed values. Markdown parsing will be used by
default.
voice_note (`bool`, optional):
If ``True`` the audio will be sent as a voice note.
Set `allow_cache` to ``False`` if you sent the same file
without this setting before for it to work.
video_note (`bool`, optional):
If ``True`` the video will be sent as a video note,
also known as a round video message.
Set `allow_cache` to ``False`` if you sent the same file
without this setting before for it to work.
buttons (`list`, `custom.Button <telethon.tl.custom.button.Button>`, :tl:`KeyboardButton`):
The matrix (list of lists), row list or button to be shown
after sending the message. This parameter will only work if
you have signed in as a bot. You can also pass your own
:tl:`ReplyMarkup` here.
silent (`bool`, optional):
Whether the message should notify people in a broadcast
channel or not. Defaults to ``False``, which means it will
notify them. Set it to ``True`` to alter this behaviour.
supports_streaming (`bool`, optional):
Whether the sent video supports streaming or not. Note that
Telegram only recognizes as streamable some formats like MP4,
and others like AVI or MKV will not work. You should convert
these to MP4 before sending if you want them to be streamable.
Unsupported formats will result in ``VideoContentTypeError``.
Notes:
If the ``hachoir3`` package (``hachoir`` module) is installed,
it will be used to determine metadata from audio and video files.
If the `pillow` package is installed and you are sending a photo,
it will be resized to fit within the maximum dimensions allowed
by Telegram to avoid ``errors.PhotoInvalidDimensionsError``. This
cannot be done if you are sending :tl:`InputFile`, however.
Returns:
The `telethon.tl.custom.message.Message` (or messages) containing
the sent file, or messages if a list of them was passed.
"""
# i.e. ``None`` was used
if not file:
raise TypeError('Cannot use {!r} as file'.format(file))
if not caption:
caption = ''
# First check if the user passed an iterable, in which case
# we may want to send as an album if all are photo files.
if utils.is_list_like(file):
# TODO Fix progress_callback
images = []
if force_document:
documents = file
else:
documents = []
for x in file:
if utils.is_image(x):
images.append(x)
else:
documents.append(x)
result = []
while images:
result += await self._send_album(
entity, images[:10], caption=caption,
progress_callback=progress_callback, reply_to=reply_to,
parse_mode=parse_mode, silent=silent
)
images = images[10:]
for x in documents:
result.append(await self.send_file(
entity, x, allow_cache=allow_cache,
caption=caption, force_document=force_document,
progress_callback=progress_callback, reply_to=reply_to,
attributes=attributes, thumb=thumb, voice_note=voice_note,
video_note=video_note, buttons=buttons, silent=silent,
supports_streaming=supports_streaming,
**kwargs
))
return result
entity = await self.get_input_entity(entity)
reply_to = utils.get_message_id(reply_to)
# Not document since it's subject to change.
# Needed when a Message is passed to send_message and it has media.
if 'entities' in kwargs:
msg_entities = kwargs['entities']
else:
caption, msg_entities =\
await self._parse_message_text(caption, parse_mode)
file_handle, media, image = await self._file_to_media(
file, force_document=force_document,
progress_callback=progress_callback,
attributes=attributes, allow_cache=allow_cache, thumb=thumb,
voice_note=voice_note, video_note=video_note,
supports_streaming=supports_streaming
)
# e.g. invalid cast from :tl:`MessageMediaWebPage`
if not media:
raise TypeError('Cannot use {!r} as file'.format(file))
markup = self.build_reply_markup(buttons)
request = functions.messages.SendMediaRequest(
entity, media, reply_to_msg_id=reply_to, message=caption,
entities=msg_entities, reply_markup=markup, silent=silent
)
msg = self._get_response_message(request, await self(request), entity)
await self._cache_media(msg, file, file_handle, image=image)
return msg
|
Sends a file to the specified entity.
Args:
entity (`entity`):
Who will receive the file.
file (`str` | `bytes` | `file` | `media`):
The file to send, which can be one of:
* A local file path to an in-disk file. The file name
will be the path's base name.
* A `bytes` byte array with the file's data to send
(for example, by using ``text.encode('utf-8')``).
A default file name will be used.
* A bytes `io.IOBase` stream over the file to send
(for example, by using ``open(file, 'rb')``).
Its ``.name`` property will be used for the file name,
or a default if it doesn't have one.
* An external URL to a file over the internet. This will
send the file as "external" media, and Telegram is the
one that will fetch the media and send it.
* A Bot API-like ``file_id``. You can convert previously
sent media to file IDs for later reusing with
`telethon.utils.pack_bot_file_id`.
* A handle to an existing file (for example, if you sent a
message with media before, you can use its ``message.media``
as a file here).
* A handle to an uploaded file (from `upload_file`).
To send an album, you should provide a list in this parameter.
If a list or similar is provided, the files in it will be
sent as an album in the order in which they appear, sliced
in chunks of 10 if more than 10 are given.
caption (`str`, optional):
Optional caption for the sent media message. When sending an
album, the caption may be a list of strings, which will be
assigned to the files pairwise.
force_document (`bool`, optional):
If left to ``False`` and the file is a path that ends with
the extension of an image file or a video file, it will be
sent as such. Otherwise always as a document.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(sent bytes, total)``.
reply_to (`int` | `Message <telethon.tl.custom.message.Message>`):
Same as `reply_to` from `send_message`.
attributes (`list`, optional):
Optional attributes that override the inferred ones, like
:tl:`DocumentAttributeFilename` and so on.
thumb (`str` | `bytes` | `file`, optional):
Optional JPEG thumbnail (for documents). **Telegram will
ignore this parameter** unless you pass a ``.jpg`` file!
The file must also be small in dimensions and in-disk size.
Successful thumbnails were files below 20kb and 200x200px.
Width/height and dimensions/size ratios may be important.
allow_cache (`bool`, optional):
Whether to allow using the cached version stored in the
database or not. Defaults to ``True`` to avoid re-uploads.
Must be ``False`` if you wish to use different attributes
or thumb than those that were used when the file was cached.
parse_mode (`object`, optional):
See the `TelegramClient.parse_mode
<telethon.client.messageparse.MessageParseMethods.parse_mode>`
property for allowed values. Markdown parsing will be used by
default.
voice_note (`bool`, optional):
If ``True`` the audio will be sent as a voice note.
Set `allow_cache` to ``False`` if you sent the same file
without this setting before for it to work.
video_note (`bool`, optional):
If ``True`` the video will be sent as a video note,
also known as a round video message.
Set `allow_cache` to ``False`` if you sent the same file
without this setting before for it to work.
buttons (`list`, `custom.Button <telethon.tl.custom.button.Button>`, :tl:`KeyboardButton`):
The matrix (list of lists), row list or button to be shown
after sending the message. This parameter will only work if
you have signed in as a bot. You can also pass your own
:tl:`ReplyMarkup` here.
silent (`bool`, optional):
Whether the message should notify people in a broadcast
channel or not. Defaults to ``False``, which means it will
notify them. Set it to ``True`` to alter this behaviour.
supports_streaming (`bool`, optional):
Whether the sent video supports streaming or not. Note that
Telegram only recognizes as streamable some formats like MP4,
and others like AVI or MKV will not work. You should convert
these to MP4 before sending if you want them to be streamable.
Unsupported formats will result in ``VideoContentTypeError``.
Notes:
If the ``hachoir3`` package (``hachoir`` module) is installed,
it will be used to determine metadata from audio and video files.
If the `pillow` package is installed and you are sending a photo,
it will be resized to fit within the maximum dimensions allowed
by Telegram to avoid ``errors.PhotoInvalidDimensionsError``. This
cannot be done if you are sending :tl:`InputFile`, however.
Returns:
The `telethon.tl.custom.message.Message` (or messages) containing
the sent file, or messages if a list of them was passed.
|
def from_path_by_ext(dir_path, ext):
"""Create a new FileCollection, and select all files that extension
matching ``ext``::
dir_path = "your/path"
fc = FileCollection.from_path_by_ext(dir_path, ext=[".jpg", ".png"])
"""
if isinstance(ext, (list, set, dict)): # collection of extension
def filter(winfile):
if winfile.ext in ext:
return True
else:
return False
else: # str
def filter(winfile):
if winfile.ext == ext:
return True
else:
return False
return FileCollection.from_path_by_criterion(
dir_path, filter, keepboth=False)
|
Create a new FileCollection, and select all files that extension
matching ``ext``::
dir_path = "your/path"
fc = FileCollection.from_path_by_ext(dir_path, ext=[".jpg", ".png"])
|
def _make_valid_bounds(self, test_bounds):
"""
Private method: process input bounds into a form acceptable by scipy.optimize,
and check the validity of said bounds.
:param test_bounds: minimum and maximum weight of an asset
:type test_bounds: tuple
:raises ValueError: if ``test_bounds`` is not a tuple of length two.
:raises ValueError: if the lower bound is too high
:return: a tuple of bounds, e.g ((0, 1), (0, 1), (0, 1) ...)
:rtype: tuple of tuples
"""
if len(test_bounds) != 2 or not isinstance(test_bounds, tuple):
raise ValueError(
"test_bounds must be a tuple of (lower bound, upper bound)"
)
if test_bounds[0] is not None:
if test_bounds[0] * self.n_assets > 1:
raise ValueError("Lower bound is too high")
return (test_bounds,) * self.n_assets
|
Private method: process input bounds into a form acceptable by scipy.optimize,
and check the validity of said bounds.
:param test_bounds: minimum and maximum weight of an asset
:type test_bounds: tuple
:raises ValueError: if ``test_bounds`` is not a tuple of length two.
:raises ValueError: if the lower bound is too high
:return: a tuple of bounds, e.g ((0, 1), (0, 1), (0, 1) ...)
:rtype: tuple of tuples
|
def history_report(history, config=None, html=True):
"""
Test a model and save a history report.
Parameters
----------
history : memote.HistoryManager
The manager grants access to previous results.
config : dict, optional
The final test report configuration.
html : bool, optional
Whether to render the report as full HTML or JSON (default True).
"""
if config is None:
config = ReportConfiguration.load()
report = HistoryReport(history=history, configuration=config)
if html:
return report.render_html()
else:
return report.render_json()
|
Test a model and save a history report.
Parameters
----------
history : memote.HistoryManager
The manager grants access to previous results.
config : dict, optional
The final test report configuration.
html : bool, optional
Whether to render the report as full HTML or JSON (default True).
|
def bound_pseudo(arnoldifyer, Wt,
g_norm=0.,
G_norm=0.,
GW_norm=0.,
WGW_norm=0.,
tol=1e-6,
pseudo_type='auto',
pseudo_kwargs=None,
delta_n=20,
terminate_factor=1.
):
r'''Bound residual norms of next deflated system.
:param arnoldifyer: an instance of
:py:class:`~krypy.deflation.Arnoldifyer`.
:param Wt: coefficients :math:`\tilde{W}\in\mathbb{C}^{n+d,k}` of the
considered deflation vectors :math:`W` for the basis :math:`[V,U]`
where ``V=last_solver.V`` and ``U=last_P.U``, i.e.,
:math:`W=[V,U]\tilde{W}` and
:math:`\mathcal{W}=\operatorname{colspan}(W)`. Must fulfill
:math:`\tilde{W}^*\tilde{W}=I_k`.
:param g_norm: norm :math:`\|g\|` of difference :math:`g=c-b` of
right hand sides. Has to fulfill :math:`\|g\|<\|b\|`.
:param G_norm: norm :math:`\|G\|` of difference
:math:`G=B-A` of operators.
:param GW_norm: Norm :math:`\|G|_{\mathcal{W}}\|` of difference
:math:`G=B-A` of operators restricted to :math:`\mathcal{W}`.
:param WGW_norm: Norm :math:`\|\langle W,GW\rangle\|_2`.
:param pseudo_type: One of
* ``'auto'``: determines if :math:`\hat{H}` is non-normal, normal or
Hermitian and uses the corresponding mode (see other options below).
* ``'nonnormal'``: the pseudospectrum of the Hessenberg matrix
:math:`\hat{H}` is used (involves one computation of a pseudospectrum)
* ``'normal'``: the pseudospectrum of :math:`\hat{H}` is computed
efficiently by the union of circles around the eigenvalues.
* ``'hermitian'``: the pseudospectrum of :math:`\hat{H}` is computed
efficiently by the union of intervals around the eigenvalues.
* ``'contain'``: the pseudospectrum of the extended Hessenberg matrix
:math:`\begin{bmatrix}\hat{H}\\S_i\end{bmatrix}` is used
(pseudospectrum has to be re computed for each iteration).
* ``'omit'``: do not compute the pseudospectrum at all and just use the
residual bounds from the approximate Krylov subspace.
:param pseudo_kwargs: (optional) arguments that are passed to the method
that computes the pseudospectrum.
:param terminate_factor: (optional) terminate the computation if the ratio
of two subsequent residual norms is larger than the provided factor.
Defaults to 1.
'''
if pseudo_kwargs is None:
pseudo_kwargs = {}
# Arnoldify!
Hh, Rh, q_norm, vdiff_norm, PWAW_norm = arnoldifyer.get(Wt)
# get original linear system
ls_orig = arnoldifyer._deflated_solver.linear_system
k = Wt.shape[1]
if k > 0:
# smallest singular value of W^*AW
WAW = Wt.T.conj().dot(arnoldifyer.J.dot(arnoldifyer.L.dot(Wt)))
sigma_min = numpy.min(scipy.linalg.svdvals(WAW))
if sigma_min <= WGW_norm:
raise utils.AssumptionError(
'sigma_min(W^*AW) > ||W^*GW|| not satisfied.')
eta = GW_norm/(sigma_min - WGW_norm)
else:
eta = 0.
b_norm = ls_orig.MMlb_norm
beta = PWAW_norm*(eta*(b_norm + g_norm) + g_norm) + vdiff_norm
# check assumption on g_norm and b_norm
if g_norm >= b_norm:
raise utils.AssumptionError(
'||g_norm|| < ||b_norm|| not satisfied')
# compute residual norms of Hh*z=e_1*b_norm
ls_small = linsys.LinearSystem(Hh,
numpy.eye(Hh.shape[0], 1) * q_norm,
normal=ls_orig.normal,
self_adjoint=ls_orig.self_adjoint,
positive_definite=ls_orig.positive_definite
)
Solver = type(arnoldifyer._deflated_solver)
if issubclass(Solver, linsys.Minres) or issubclass(Solver, linsys.Gmres):
aresnorms = utils.get_residual_norms(Hh,
self_adjoint=ls_orig.self_adjoint)
else:
# TODO: compute residuals more efficiently for CG
try:
solver = Solver(ls_small, tol=tol, maxiter=Hh.shape[0])
except utils.ConvergenceError as e:
# use all residuals that have been computed
# (useful for short recurrences)
solver = e.solver
aresnorms = numpy.array(solver.resnorms)
# absolute residual norm
aresnorms = aresnorms * q_norm
if pseudo_type == 'omit':
return aresnorms / (b_norm - g_norm)
# spectrum of Hh
evals, evecs = scipy.linalg.eig(Hh)
if ls_small.self_adjoint:
evals = numpy.real(evals)
# norm of Hh
Hh_norm = numpy.linalg.norm(Hh, 2)
def _auto():
'''determine pseudo automatically'''
# is Hh Hermitian?
if numpy.linalg.norm(Hh-Hh.T.conj(), 2) < 1e-14*Hh_norm:
return 'hermitian'
# is Hh normal?
if numpy.linalg.cond(evecs, 2) < 1+1e-14:
return 'normal'
return 'nonnormal'
if pseudo_type == 'auto':
pseudo_type = _auto()
# for delta >= min(|\lambda|), the pseudospectrum will contain zero and
# the thus polymax > 1. nevertheless, the bound may provide useful
# information in early iterations with large values of delta.
# Therefore, the maximal perturbation is chosen as the maximal
# eigenvalue of Hh
delta_max = 1e2*numpy.max(numpy.abs(evals))
# minimal delta is defined via Rh
# HACK until numpy.linal.svd (and thus numpy.linalg.norm) is fixed
from scipy.linalg import svd
_, Rhsvd, _ = svd(Rh[:, :1])
delta_min = PWAW_norm*(eta*(Hh_norm + G_norm) + G_norm) + numpy.max(Rhsvd)
if delta_min == 0:
delta_min = 1e-16
import pseudopy
if not ls_small.normal:
# construct pseudospectrum for the expected range
pseudo = pseudopy.NonnormalAuto(Hh, delta_min*0.99, delta_max*1.01,
**pseudo_kwargs)
elif not ls_small.self_adjoint:
pseudo = pseudopy.NormalEvals(evals)
else:
pseudo = None
bounds = [aresnorms[0]]
for i in range(1, len(aresnorms)):
# compute roots of polynomial
if issubclass(Solver, linsys.Cg):
roots = scipy.linalg.eigvalsh(Hh[:i, :i])
else:
# TODO: more stable way of computing the roots of the MINRES
# poly with exploitation of symmetry?
HhQ, HhR = scipy.linalg.qr(Hh[:i+1, :i], mode='economic')
roots_inv = scipy.linalg.eigvals(HhQ[:i, :].T.conj(), HhR)
roots = 1./roots_inv[numpy.abs(roots_inv) > 1e-14]
if ls_small.self_adjoint:
roots = numpy.real(roots)
# compute polynomial
p = utils.NormalizedRootsPolynomial(roots)
if ls_small.self_adjoint:
p_minmax_candidates = p.minmax_candidates()
# absolute residual
aresnorm = aresnorms[i]
# perturbation
# HACK until numpy.linal.svd (and thus numpy.linalg.norm) is fixed
from scipy.linalg import svd
_, Rhsvd, _ = svd(Rh[:, :i])
Rhnrm = numpy.max(Rhsvd)
epsilon = PWAW_norm*(eta*(Hh_norm + G_norm) + G_norm) \
+ Rhnrm
#+ numpy.linalg.norm(Rh[:, :i], 2)
if epsilon == 0:
epsilon = 1e-16
if pseudo_type == 'contain':
raise NotImplementedError('contain not yet implemented')
# exit if epsilon >= delta_max
if epsilon >= delta_max:
break
delta_log_range = numpy.linspace(numpy.log10(1.01*epsilon),
numpy.log10(delta_max),
delta_n+2
)[0:-1]
def compute_pseudo(delta_log):
delta = 10**delta_log
if ls_small.self_adjoint:
# pseudospectrum are intervals
pseudo_intervals = utils.Intervals(
[utils.Interval(ev-delta, ev+delta) for ev in evals])
# add roots of first derivative of p
candidates = []
for candidate in p_minmax_candidates:
if pseudo_intervals.contains(candidate):
candidates.append(candidate)
all_candidates = numpy.r_[pseudo_intervals.get_endpoints(),
numpy.array(candidates)]
# evaluate polynomial
polymax = numpy.max(numpy.abs(p(all_candidates)))
pseudolen = 2 * delta
else:
# get pseudospectrum paths
pseudo_path = pseudo.contour_paths(delta)
# length of boundary
pseudolen = pseudo_path.length()
# evaluate polynomial on points of path
if pseudolen > 0:
polymax = numpy.max(numpy.abs(p(pseudo_path.vertices())))
else:
polymax = numpy.Inf
# compute THE bound
return pseudolen/(2*numpy.pi*delta) \
* (epsilon/(delta-epsilon)*(q_norm + beta) + beta) \
* polymax
# minimization
from scipy.optimize import minimize_scalar
opt_res = minimize_scalar(compute_pseudo,
bounds=(delta_log_range[0],
delta_log_range[-1]),
method='bounded',
options={'maxiter': delta_n}
)
# the delta with minimal value is min_delta = 10**opt_res.x
min_val = opt_res.fun
# minimal bound value
boundval = aresnorm + min_val
# if not increasing: append to bounds
if i > 1 and boundval/bounds[-1] > terminate_factor:
break
else:
bounds.append(numpy.min([boundval, bounds[-1]]))
return numpy.array(bounds) / (b_norm - g_norm)
|
r'''Bound residual norms of next deflated system.
:param arnoldifyer: an instance of
:py:class:`~krypy.deflation.Arnoldifyer`.
:param Wt: coefficients :math:`\tilde{W}\in\mathbb{C}^{n+d,k}` of the
considered deflation vectors :math:`W` for the basis :math:`[V,U]`
where ``V=last_solver.V`` and ``U=last_P.U``, i.e.,
:math:`W=[V,U]\tilde{W}` and
:math:`\mathcal{W}=\operatorname{colspan}(W)`. Must fulfill
:math:`\tilde{W}^*\tilde{W}=I_k`.
:param g_norm: norm :math:`\|g\|` of difference :math:`g=c-b` of
right hand sides. Has to fulfill :math:`\|g\|<\|b\|`.
:param G_norm: norm :math:`\|G\|` of difference
:math:`G=B-A` of operators.
:param GW_norm: Norm :math:`\|G|_{\mathcal{W}}\|` of difference
:math:`G=B-A` of operators restricted to :math:`\mathcal{W}`.
:param WGW_norm: Norm :math:`\|\langle W,GW\rangle\|_2`.
:param pseudo_type: One of
* ``'auto'``: determines if :math:`\hat{H}` is non-normal, normal or
Hermitian and uses the corresponding mode (see other options below).
* ``'nonnormal'``: the pseudospectrum of the Hessenberg matrix
:math:`\hat{H}` is used (involves one computation of a pseudospectrum)
* ``'normal'``: the pseudospectrum of :math:`\hat{H}` is computed
efficiently by the union of circles around the eigenvalues.
* ``'hermitian'``: the pseudospectrum of :math:`\hat{H}` is computed
efficiently by the union of intervals around the eigenvalues.
* ``'contain'``: the pseudospectrum of the extended Hessenberg matrix
:math:`\begin{bmatrix}\hat{H}\\S_i\end{bmatrix}` is used
(pseudospectrum has to be re computed for each iteration).
* ``'omit'``: do not compute the pseudospectrum at all and just use the
residual bounds from the approximate Krylov subspace.
:param pseudo_kwargs: (optional) arguments that are passed to the method
that computes the pseudospectrum.
:param terminate_factor: (optional) terminate the computation if the ratio
of two subsequent residual norms is larger than the provided factor.
Defaults to 1.
|
def push_data(self, data):
"""Push data broadcasted from gateway to device"""
if not _validate_data(data):
return False
jdata = json.loads(data['data']) if int(self.proto[0:1]) == 1 else _list2map(data['params'])
if jdata is None:
return False
sid = data['sid']
for func in self.callbacks[sid]:
func(jdata, data)
return True
|
Push data broadcasted from gateway to device
|
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
|
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
|
def ports(self):
"""
:return: dict
{
# container -> host
"1234": "2345"
}
"""
if self._ports is None:
self._ports = {}
if self.net_settings["Ports"]:
for key, value in self.net_settings["Ports"].items():
cleaned_port = key.split("/")[0]
self._ports[cleaned_port] = graceful_chain_get(value, 0, "HostPort")
# in case of --net=host, there's nothing in network settings, let's get it from "Config"
exposed_ports_section = graceful_chain_get(self.inspect_data, "Config", "ExposedPorts")
if exposed_ports_section:
for key, value in exposed_ports_section.items():
cleaned_port = key.split("/")[0]
self._ports[cleaned_port] = None # extremely docker specific
return self._ports
|
:return: dict
{
# container -> host
"1234": "2345"
}
|
def stage_import_from_file(self, fd, filename='upload.gz'):
"""Stage an import from a file upload.
:param fd: File-like object to upload.
:param filename: (optional) Filename to use for import as string.
:return: :class:`imports.Import <imports.Import>` object
"""
schema = ImportSchema()
resp = self.service.post(self.base,
files={'file': (filename, fd)})
return self.service.decode(schema, resp)
|
Stage an import from a file upload.
:param fd: File-like object to upload.
:param filename: (optional) Filename to use for import as string.
:return: :class:`imports.Import <imports.Import>` object
|
def reading_dates(reading):
"""
Given a Reading, with start and end dates and granularities[1] it returns
an HTML string representing that period. eg:
* '1–6 Feb 2017'
* '1 Feb to 3 Mar 2017'
* 'Feb 2017 to Mar 2018'
* '2017–2018'
etc.
[1] https://www.flickr.com/services/api/misc.dates.html
"""
# 3 September 2017
full_format = '<time datetime="%Y-%m-%d">{}</time>'.format('%-d %B %Y')
# September 2017
month_year_format = '<time datetime="%Y-%m">{}</time>'.format('%B %Y')
# 2017
year_format = '<time datetime="%Y">{}</time>'.format('%Y')
# 3
day_format = '<time datetime="%Y-%m-%d">{}</time>'.format('%-d')
# 3 September
day_month_format = '<time datetime="%Y-%m-%d">{}</time>'.format('%-d %B')
# September
month_format = '<time datetime="%Y-%m">{}</time>'.format('%B')
period_format_short = '{}–{}'
period_format_long = '{} to {}'
# For brevity:
start_date = reading.start_date
end_date = reading.end_date
start_gran = reading.start_granularity
end_gran = reading.end_granularity
# Are start and end in the same day, year or month?
same_day = False
same_month = False
same_year = False
if start_date and end_date:
if start_date.strftime('%Y') == end_date.strftime('%Y'):
same_year = True
if start_date.strftime('%m%Y') == end_date.strftime('%m%Y'):
same_month = True
if start_date.strftime('%d%m%Y') == end_date.strftime('%d%m%Y'):
same_day = True
start_str = ''
end_str = ''
output = ''
# Make some basic start and end strings, which we might use...
if start_date:
if start_gran == 3:
start_str = start_date.strftime(full_format)
elif start_gran == 4:
start_str = start_date.strftime(month_year_format)
else:
start_str = start_date.strftime(year_format)
if end_date:
if end_gran == 3:
end_str = end_date.strftime(full_format)
elif end_gran == 4:
end_str = end_date.strftime(month_year_format)
else:
end_str = end_date.strftime(year_format)
# Now make the final strings we'll return:
if start_date and end_date:
# A default which will be overridden in many cases. This covers:
# 1 February 2017 to 3 March 2018
# 1 February 2017 to March 2018
# 1 February 2017 to 2018
# February 2017 to 3 March 2018
# February 2017 to March 2018
# February 2017 to 2018
# 2017 to 3 March 2018
# 2017 to March 2018
# 2017 to 2018
output = period_format_long.format(start_str, end_str)
if (start_gran == 4 or end_gran == 4) and same_month:
# Only have enough to output 'February 2017'.
output = start_str
elif (start_gran == 6 or end_gran == 6) and same_year:
# Only have enough to output '2017'.
output = start_str
elif start_gran == 3:
if end_gran == 3:
if same_day:
# 1 February 2017
output = start_str
elif same_month:
# 1–6 February 2017
output = period_format_short.format(
start_date.strftime(day_format),
end_str)
elif same_year:
# 1 February to 3 March 2017
output = period_format_long.format(
start_date.strftime(day_month_format),
end_str)
elif end_gran == 4:
if same_year:
# 1 February to March 2017
output = period_format_long.format(
start_date.strftime(day_month_format),
end_str)
elif start_gran == 4:
if end_gran == 3:
if same_year:
# February to 3 March 2017
output = period_format_long.format(
start_date.strftime(month_format),
end_str)
elif end_gran == 4:
if same_year:
# February to March 2017
output = period_format_long.format(
start_date.strftime(month_format),
end_str)
elif end_date:
# Only an end_date.
if end_gran == 3:
# Finished on 1 February 2017
output = "Finished on {}".format(end_str)
else:
# Finished in February 2017
# Finished in 2017
output = "Finished in {}".format(end_str)
else:
# No end_date: the reading has started, but not ended.
if start_gran == 3:
# Started on 1 February 2017
output = "Started on {}".format(start_str)
else:
# Started in February 2017
# Started in 2017
output = "Started in {}".format(start_str)
return format_html(output)
|
Given a Reading, with start and end dates and granularities[1] it returns
an HTML string representing that period. eg:
* '1–6 Feb 2017'
* '1 Feb to 3 Mar 2017'
* 'Feb 2017 to Mar 2018'
* '2017–2018'
etc.
[1] https://www.flickr.com/services/api/misc.dates.html
|
def register_laser_hooks(self, hook_type: str, hook: Callable):
"""registers the hook with this Laser VM"""
if hook_type == "add_world_state":
self._add_world_state_hooks.append(hook)
elif hook_type == "execute_state":
self._execute_state_hooks.append(hook)
elif hook_type == "start_sym_exec":
self._start_sym_exec_hooks.append(hook)
elif hook_type == "stop_sym_exec":
self._stop_sym_exec_hooks.append(hook)
elif hook_type == "start_sym_trans":
self._start_sym_trans_hooks.append(hook)
elif hook_type == "stop_sym_trans":
self._stop_sym_trans_hooks.append(hook)
else:
raise ValueError(
"Invalid hook type %s. Must be one of {add_world_state}", hook_type
)
|
registers the hook with this Laser VM
|
def pre_release(version):
"""Generates new docs, release announcements and creates a local tag."""
announce(version)
regen()
changelog(version, write_out=True)
fix_formatting()
msg = "Preparing release version {}".format(version)
check_call(["git", "commit", "-a", "-m", msg])
print()
print(f"{Fore.CYAN}[generate.pre_release] {Fore.GREEN}All done!")
print()
print(f"Please push your branch and open a PR.")
|
Generates new docs, release announcements and creates a local tag.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.