code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def execute_pubsub(self, command, *channels):
"""Executes Redis (p)subscribe/(p)unsubscribe commands.
ConnectionsPool picks separate connection for pub/sub
and uses it until explicitly closed or disconnected
(unsubscribing from all channels/patterns will leave connection
locked for pub/sub use).
There is no auto-reconnect for this PUB/SUB connection.
Returns asyncio.gather coroutine waiting for all channels/patterns
to receive answers.
"""
conn, address = self.get_connection(command)
if conn is not None:
return conn.execute_pubsub(command, *channels)
else:
return self._wait_execute_pubsub(address, command, channels, {})
|
Executes Redis (p)subscribe/(p)unsubscribe commands.
ConnectionsPool picks separate connection for pub/sub
and uses it until explicitly closed or disconnected
(unsubscribing from all channels/patterns will leave connection
locked for pub/sub use).
There is no auto-reconnect for this PUB/SUB connection.
Returns asyncio.gather coroutine waiting for all channels/patterns
to receive answers.
|
def projects_from_cli(args):
"""Take arguments through the CLI can create a list of specified projects."""
description = ('Determine if a set of project dependencies will work with '
'Python 3')
parser = argparse.ArgumentParser(description=description)
req_help = 'path(s) to a pip requirements file (e.g. requirements.txt)'
parser.add_argument('--requirements', '-r', nargs='+', default=(),
help=req_help)
meta_help = 'path(s) to a PEP 426 metadata file (e.g. PKG-INFO, pydist.json)'
parser.add_argument('--metadata', '-m', nargs='+', default=(),
help=meta_help)
parser.add_argument('--projects', '-p', nargs='+', default=(),
help='name(s) of projects to test for Python 3 support')
parser.add_argument('--verbose', '-v', action='store_true',
help='verbose output (e.g. list compatibility overrides)')
parsed = parser.parse_args(args)
if not (parsed.requirements or parsed.metadata or parsed.projects):
parser.error("Missing 'requirements', 'metadata', or 'projects'")
projects = []
if parsed.verbose:
logging.getLogger('ciu').setLevel(logging.INFO)
projects.extend(projects_.projects_from_requirements(parsed.requirements))
metadata = []
for metadata_path in parsed.metadata:
with io.open(metadata_path) as file:
metadata.append(file.read())
projects.extend(projects_.projects_from_metadata(metadata))
projects.extend(map(packaging.utils.canonicalize_name, parsed.projects))
return projects
|
Take arguments through the CLI can create a list of specified projects.
|
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all rules in the available
low chunks and merges them into a single rules ref in the present low data
'''
rules = []
agg_enabled = [
'append',
'insert',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the iptables state, skip aggregation
continue
if chunk.get('state') == 'iptables':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
if chunk not in rules:
rules.append(chunk)
chunk['__agg__'] = True
if rules:
if 'rules' in low:
low['rules'].extend(rules)
else:
low['rules'] = rules
return low
|
The mod_aggregate function which looks up all rules in the available
low chunks and merges them into a single rules ref in the present low data
|
def archive(cwd,
output,
rev='HEAD',
prefix=None,
git_opts='',
user=None,
password=None,
ignore_retcode=False,
output_encoding=None,
**kwargs):
'''
.. versionchanged:: 2015.8.0
Returns ``True`` if successful, raises an error if not.
Interface to `git-archive(1)`_, exports a tarball/zip file of the
repository
cwd
The path to be archived
.. note::
``git archive`` permits a partial archive to be created. Thus, this
path does not need to be the root of the git repository. Only the
files within the directory specified by ``cwd`` (and its
subdirectories) will be in the resulting archive. For example, if
there is a git checkout at ``/tmp/foo``, then passing
``/tmp/foo/bar`` as the ``cwd`` will result in just the files
underneath ``/tmp/foo/bar`` to be exported as an archive.
output
The path of the archive to be created
overwrite : False
Unless set to ``True``, Salt will over overwrite an existing archive at
the path specified by the ``output`` argument.
.. versionadded:: 2015.8.0
rev : HEAD
The revision from which to create the archive
format
Manually specify the file format of the resulting archive. This
argument can be omitted, and ``git archive`` will attempt to guess the
archive type (and compression) from the filename. ``zip``, ``tar``,
``tar.gz``, and ``tgz`` are extensions that are recognized
automatically, and git can be configured to support other archive types
with the addition of git configuration keys.
See the `git-archive(1)`_ manpage explanation of the
``--format`` argument (as well as the ``CONFIGURATION`` section of the
manpage) for further information.
.. versionadded:: 2015.8.0
prefix
Prepend ``<prefix>`` to every filename in the archive. If unspecified,
the name of the directory at the top level of the repository will be
used as the prefix (e.g. if ``cwd`` is set to ``/foo/bar/baz``, the
prefix will be ``baz``, and the resulting archive will contain a
top-level directory by that name).
.. note::
The default behavior if the ``--prefix`` option for ``git archive``
is not specified is to not prepend a prefix, so Salt's behavior
differs slightly from ``git archive`` in this respect. Use
``prefix=''`` to create an archive with no prefix.
.. versionchanged:: 2015.8.0
The behavior of this argument has been changed slightly. As of
this version, it is necessary to include the trailing slash when
specifying a prefix, if the prefix is intended to create a
top-level directory.
git_opts
Any additional options to add to git command itself (not the
``archive`` subcommand), in a single string. This is useful for passing
``-c`` to run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-archive(1)`: http://git-scm.com/docs/git-archive
CLI Example:
.. code-block:: bash
salt myminion git.archive /path/to/repo /path/to/archive.tar
'''
cwd = _expand_path(cwd, user)
output = _expand_path(output, user)
# Sanitize kwargs and make sure that no invalid ones were passed. This
# allows us to accept 'format' as an argument to this function without
# shadowing the format() global, while also not allowing unwanted arguments
# to be passed.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
format_ = kwargs.pop('format', None)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
command = ['git'] + _format_git_opts(git_opts)
command.append('archive')
# If prefix was set to '' then we skip adding the --prefix option, but if
# it was not passed (i.e. None) we use the cwd.
if prefix != '':
if not prefix:
prefix = os.path.basename(cwd) + os.sep
command.extend(['--prefix', prefix])
if format_:
command.extend(['--format', format_])
command.extend(['--output', output, rev])
_git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)
# No output (unless --verbose is used, and we don't want all files listed
# in the output in case there are thousands), so just return True. If there
# was an error in the git command, it will have already raised an exception
# and we will never get to this return statement.
return True
|
.. versionchanged:: 2015.8.0
Returns ``True`` if successful, raises an error if not.
Interface to `git-archive(1)`_, exports a tarball/zip file of the
repository
cwd
The path to be archived
.. note::
``git archive`` permits a partial archive to be created. Thus, this
path does not need to be the root of the git repository. Only the
files within the directory specified by ``cwd`` (and its
subdirectories) will be in the resulting archive. For example, if
there is a git checkout at ``/tmp/foo``, then passing
``/tmp/foo/bar`` as the ``cwd`` will result in just the files
underneath ``/tmp/foo/bar`` to be exported as an archive.
output
The path of the archive to be created
overwrite : False
Unless set to ``True``, Salt will over overwrite an existing archive at
the path specified by the ``output`` argument.
.. versionadded:: 2015.8.0
rev : HEAD
The revision from which to create the archive
format
Manually specify the file format of the resulting archive. This
argument can be omitted, and ``git archive`` will attempt to guess the
archive type (and compression) from the filename. ``zip``, ``tar``,
``tar.gz``, and ``tgz`` are extensions that are recognized
automatically, and git can be configured to support other archive types
with the addition of git configuration keys.
See the `git-archive(1)`_ manpage explanation of the
``--format`` argument (as well as the ``CONFIGURATION`` section of the
manpage) for further information.
.. versionadded:: 2015.8.0
prefix
Prepend ``<prefix>`` to every filename in the archive. If unspecified,
the name of the directory at the top level of the repository will be
used as the prefix (e.g. if ``cwd`` is set to ``/foo/bar/baz``, the
prefix will be ``baz``, and the resulting archive will contain a
top-level directory by that name).
.. note::
The default behavior if the ``--prefix`` option for ``git archive``
is not specified is to not prepend a prefix, so Salt's behavior
differs slightly from ``git archive`` in this respect. Use
``prefix=''`` to create an archive with no prefix.
.. versionchanged:: 2015.8.0
The behavior of this argument has been changed slightly. As of
this version, it is necessary to include the trailing slash when
specifying a prefix, if the prefix is intended to create a
top-level directory.
git_opts
Any additional options to add to git command itself (not the
``archive`` subcommand), in a single string. This is useful for passing
``-c`` to run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-archive(1)`: http://git-scm.com/docs/git-archive
CLI Example:
.. code-block:: bash
salt myminion git.archive /path/to/repo /path/to/archive.tar
|
def fso_rmtree(self, path, ignore_errors=False, onerror=None):
'overlays shutil.rmtree()'
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if self.fso_islink(path):
# symlinks to directories are forbidden, see shutil bug #1669
raise OSError('Cannot call rmtree on a symbolic link')
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = self.fso_listdir(path)
except os.error, err:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = self.fso_lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
self.fso_rmtree(fullname, ignore_errors, onerror)
else:
try:
self.fso_remove(fullname)
except OSError as err:
onerror(os.remove, fullname, sys.exc_info())
try:
self.fso_rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
|
overlays shutil.rmtree()
|
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
|
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
|
def process_exception(self, request, exception):
"""
When we get a CasTicketException, that is probably caused by the ticket timing out.
So logout/login and get the same page again.
"""
if isinstance(exception, CasTicketException):
do_logout(request)
# This assumes that request.path requires authentication.
return HttpResponseRedirect(request.path)
else:
return None
|
When we get a CasTicketException, that is probably caused by the ticket timing out.
So logout/login and get the same page again.
|
def find_uncommitted_filefields(sender, instance, **kwargs):
"""
A pre_save signal handler which attaches an attribute to the model instance
containing all uncommitted ``FileField``s, which can then be used by the
:func:`signal_committed_filefields` post_save handler.
"""
uncommitted = instance._uncommitted_filefields = []
fields = sender._meta.fields
if kwargs.get('update_fields', None):
update_fields = set(kwargs['update_fields'])
fields = update_fields.intersection(fields)
for field in fields:
if isinstance(field, FileField):
fieldfile = getattr(instance, field.name)
if fieldfile and not fieldfile._committed:
uncommitted.append(field.name)
|
A pre_save signal handler which attaches an attribute to the model instance
containing all uncommitted ``FileField``s, which can then be used by the
:func:`signal_committed_filefields` post_save handler.
|
def numpy_binning(data, bins=10, range=None, *args, **kwargs) -> NumpyBinning:
"""Construct binning schema compatible with numpy.histogram
Parameters
----------
data: array_like, optional
This is optional if both bins and range are set
bins: int or array_like
range: Optional[tuple]
(min, max)
includes_right_edge: Optional[bool]
default: True
See Also
--------
numpy.histogram
"""
if isinstance(bins, int):
if range:
bins = np.linspace(range[0], range[1], bins + 1)
else:
start = data.min()
stop = data.max()
bins = np.linspace(start, stop, bins + 1)
elif np.iterable(bins):
bins = np.asarray(bins)
else:
# Some numpy edge case
_, bins = np.histogram(data, bins, **kwargs)
return NumpyBinning(bins)
|
Construct binning schema compatible with numpy.histogram
Parameters
----------
data: array_like, optional
This is optional if both bins and range are set
bins: int or array_like
range: Optional[tuple]
(min, max)
includes_right_edge: Optional[bool]
default: True
See Also
--------
numpy.histogram
|
def dead_letter(self, description=None):
"""Move the message to the Dead Letter queue.
The Dead Letter queue is a sub-queue that can be
used to store messages that failed to process correctly, or otherwise require further inspection
or processing. The queue can also be configured to send expired messages to the Dead Letter queue.
To receive dead-lettered messages, use `QueueClient.get_deadletter_receiver()` or
`SubscriptionClient.get_deadletter_receiver()`.
:param description: The reason for dead-lettering the message.
:type description: str
:raises: ~azure.servicebus.common.errors.MessageAlreadySettled if the message has been settled.
:raises: ~azure.servicebus.common.errors.MessageLockExpired if message lock has already expired.
:raises: ~azure.servicebus.common.errors.SessionLockExpired if session lock has already expired.
:raises: ~azure.servicebus.common.errors.MessageSettleFailed if message settle operation fails.
"""
self._is_live('dead-letter')
details = {
'deadletter-reason': str(description) if description else "",
'deadletter-description': str(description) if description else ""}
self._receiver._settle_deferred( # pylint: disable=protected-access
'suspended', [self.lock_token], dead_letter_details=details)
self._settled = True
|
Move the message to the Dead Letter queue.
The Dead Letter queue is a sub-queue that can be
used to store messages that failed to process correctly, or otherwise require further inspection
or processing. The queue can also be configured to send expired messages to the Dead Letter queue.
To receive dead-lettered messages, use `QueueClient.get_deadletter_receiver()` or
`SubscriptionClient.get_deadletter_receiver()`.
:param description: The reason for dead-lettering the message.
:type description: str
:raises: ~azure.servicebus.common.errors.MessageAlreadySettled if the message has been settled.
:raises: ~azure.servicebus.common.errors.MessageLockExpired if message lock has already expired.
:raises: ~azure.servicebus.common.errors.SessionLockExpired if session lock has already expired.
:raises: ~azure.servicebus.common.errors.MessageSettleFailed if message settle operation fails.
|
def add_record(self, is_sslv2=None, is_tls13=None):
"""
Add a new TLS or SSLv2 or TLS 1.3 record to the packets buffered out.
"""
if is_sslv2 is None and is_tls13 is None:
v = (self.cur_session.tls_version or
self.cur_session.advertised_tls_version)
if v in [0x0200, 0x0002]:
is_sslv2 = True
elif v >= 0x0304:
is_tls13 = True
if is_sslv2:
self.buffer_out.append(SSLv2(tls_session=self.cur_session))
elif is_tls13:
self.buffer_out.append(TLS13(tls_session=self.cur_session))
else:
self.buffer_out.append(TLS(tls_session=self.cur_session))
|
Add a new TLS or SSLv2 or TLS 1.3 record to the packets buffered out.
|
def factorset_divide(factorset1, factorset2):
r"""
Base method for dividing two factor sets.
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the factors
present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in :math:`\vec\phi_2`.
Parameters
----------
factorset1: FactorSet
The dividend
factorset2: FactorSet
The divisor
Returns
-------
The division of factorset1 and factorset2
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factorset_divide
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factorset_divide(factor_set2, factor_set1)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f119ad78f90>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f119ad78e50>,
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f119ad78ed0>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f119ad78e90>])
"""
if not isinstance(factorset1, FactorSet) or not isinstance(factorset2, FactorSet):
raise TypeError("factorset1 and factorset2 must be FactorSet instances")
return factorset1.divide(factorset2, inplace=False)
|
r"""
Base method for dividing two factor sets.
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the factors
present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in :math:`\vec\phi_2`.
Parameters
----------
factorset1: FactorSet
The dividend
factorset2: FactorSet
The divisor
Returns
-------
The division of factorset1 and factorset2
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factorset_divide
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factorset_divide(factor_set2, factor_set1)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f119ad78f90>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f119ad78e50>,
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f119ad78ed0>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f119ad78e90>])
|
def encode(self, x):
"""
Given an input array `x` it returns its associated encoding `y(x)`.
Please cf. the paper for more details.
Note that NO learning takes place.
"""
n = self._outputSize
y = np.zeros(n)
Q = self._Q
W = self._W
t = self._t
lam = self._lambda
try:
y_star = np.random.sample(n)
y_star = fixed_point(lambda p: expit(lam * ( np.dot(Q,x) + np.dot(W,p) - t)),
y_star, maxiter=2000, method='del2')
except RuntimeError:
pass
winner = np.where(y_star > 0.5)[0]
y[ winner ] = 1.
return y
|
Given an input array `x` it returns its associated encoding `y(x)`.
Please cf. the paper for more details.
Note that NO learning takes place.
|
def query_disease():
"""
Returns list of diseases by query parameters
---
tags:
- Query functions
parameters:
- name: identifier
in: query
type: string
required: false
description: Disease identifier
default: DI-03832
- name: ref_id
in: query
type: string
required: false
description: reference identifier
default: 104300
- name: ref_type
in: query
type: string
required: false
description: Reference type
default: MIM
- name: name
in: query
type: string
required: false
description: Disease name
default: Alzheimer disease
- name: acronym
in: query
type: string
required: false
description: Disease acronym
default: AD
- name: description
in: query
type: string
required: false
description: Description of disease
default: '%neurodegenerative disorder%'
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10
"""
allowed_str_args = ['identifier', 'ref_id', 'ref_type', 'name', 'acronym', 'description']
args = get_args(
request_args=request.args,
allowed_str_args=allowed_str_args
)
return jsonify(query.disease(**args))
|
Returns list of diseases by query parameters
---
tags:
- Query functions
parameters:
- name: identifier
in: query
type: string
required: false
description: Disease identifier
default: DI-03832
- name: ref_id
in: query
type: string
required: false
description: reference identifier
default: 104300
- name: ref_type
in: query
type: string
required: false
description: Reference type
default: MIM
- name: name
in: query
type: string
required: false
description: Disease name
default: Alzheimer disease
- name: acronym
in: query
type: string
required: false
description: Disease acronym
default: AD
- name: description
in: query
type: string
required: false
description: Description of disease
default: '%neurodegenerative disorder%'
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10
|
def hash_from_stream(n, hash_stream):
"""
Not standard hashing algorithm!
Install NumPy for better hashing service.
>>> from Redy.Tools._py_hash import hash_from_stream
>>> s = iter((1, 2, 3))
>>> assert hash_from_stream(3, map(hash, s)) == hash((1, 2, 3))
"""
_to_int64 = to_int64
x = 0x345678
multiplied = _to_int64(1000003)
for n in range(n - 1, -1, -1):
h = next(hash_stream)
x = _to_int64((x ^ h) * multiplied)
multiplied += _to_int64(82520 + _to_int64(2 * n))
multiplied = _to_int64(multiplied)
x += 97531
x = _to_int64(x)
if x == -1:
return -2
return x
|
Not standard hashing algorithm!
Install NumPy for better hashing service.
>>> from Redy.Tools._py_hash import hash_from_stream
>>> s = iter((1, 2, 3))
>>> assert hash_from_stream(3, map(hash, s)) == hash((1, 2, 3))
|
def join(self, *args):
"""Returns the arguments in the list joined by STR.
FIRST,JOIN_BY,ARG_1,...,ARG_N
%{JOIN: ,A,...,F} -> 'A B C ... F'
"""
call_args = list(args)
joiner = call_args.pop(0)
self.random.shuffle(call_args)
return joiner.join(call_args)
|
Returns the arguments in the list joined by STR.
FIRST,JOIN_BY,ARG_1,...,ARG_N
%{JOIN: ,A,...,F} -> 'A B C ... F'
|
def get_strategy_types():
"""Get a list of all :class:`Strategy` subclasses."""
def get_subtypes(type_):
subtypes = type_.__subclasses__()
for subtype in subtypes:
subtypes.extend(get_subtypes(subtype))
return subtypes
return get_subtypes(Strategy)
|
Get a list of all :class:`Strategy` subclasses.
|
def get_deployments(self, project, definition_id=None, definition_environment_id=None, created_by=None, min_modified_time=None, max_modified_time=None, deployment_status=None, operation_status=None, latest_attempts_only=None, query_order=None, top=None, continuation_token=None, created_for=None, min_started_time=None, max_started_time=None, source_branch=None):
"""GetDeployments.
:param str project: Project ID or project name
:param int definition_id:
:param int definition_environment_id:
:param str created_by:
:param datetime min_modified_time:
:param datetime max_modified_time:
:param str deployment_status:
:param str operation_status:
:param bool latest_attempts_only:
:param str query_order:
:param int top:
:param int continuation_token:
:param str created_for:
:param datetime min_started_time:
:param datetime max_started_time:
:param str source_branch:
:rtype: [Deployment]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definition_id is not None:
query_parameters['definitionId'] = self._serialize.query('definition_id', definition_id, 'int')
if definition_environment_id is not None:
query_parameters['definitionEnvironmentId'] = self._serialize.query('definition_environment_id', definition_environment_id, 'int')
if created_by is not None:
query_parameters['createdBy'] = self._serialize.query('created_by', created_by, 'str')
if min_modified_time is not None:
query_parameters['minModifiedTime'] = self._serialize.query('min_modified_time', min_modified_time, 'iso-8601')
if max_modified_time is not None:
query_parameters['maxModifiedTime'] = self._serialize.query('max_modified_time', max_modified_time, 'iso-8601')
if deployment_status is not None:
query_parameters['deploymentStatus'] = self._serialize.query('deployment_status', deployment_status, 'str')
if operation_status is not None:
query_parameters['operationStatus'] = self._serialize.query('operation_status', operation_status, 'str')
if latest_attempts_only is not None:
query_parameters['latestAttemptsOnly'] = self._serialize.query('latest_attempts_only', latest_attempts_only, 'bool')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int')
if created_for is not None:
query_parameters['createdFor'] = self._serialize.query('created_for', created_for, 'str')
if min_started_time is not None:
query_parameters['minStartedTime'] = self._serialize.query('min_started_time', min_started_time, 'iso-8601')
if max_started_time is not None:
query_parameters['maxStartedTime'] = self._serialize.query('max_started_time', max_started_time, 'iso-8601')
if source_branch is not None:
query_parameters['sourceBranch'] = self._serialize.query('source_branch', source_branch, 'str')
response = self._send(http_method='GET',
location_id='b005ef73-cddc-448e-9ba2-5193bf36b19f',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Deployment]', self._unwrap_collection(response))
|
GetDeployments.
:param str project: Project ID or project name
:param int definition_id:
:param int definition_environment_id:
:param str created_by:
:param datetime min_modified_time:
:param datetime max_modified_time:
:param str deployment_status:
:param str operation_status:
:param bool latest_attempts_only:
:param str query_order:
:param int top:
:param int continuation_token:
:param str created_for:
:param datetime min_started_time:
:param datetime max_started_time:
:param str source_branch:
:rtype: [Deployment]
|
def reload_plugin(self, name, *args):
"""
Reloads a given plugin
:param name: The name of the plugin
:param args: The args to pass to the plugin
"""
self._logger.debug("Reloading {}.".format(name))
self._logger.debug("Disabling {}.".format(name))
self.get_plugin(name).disable()
self._logger.debug("Removing plugin instance.")
del self._plugins[name]
self._logger.debug("Unloading module.")
del self._modules[name]
self._logger.debug("Reloading manifest.")
old_manifest = self.get_manifest(name)
self._manifests.remove(old_manifest)
self.load_manifest(old_manifest["path"])
self._logger.debug("Loading {}.".format(name))
self.load_plugin(self.get_manifest(name), *args)
self._logger.debug("Enabling {}.".format(name))
self.get_plugin(name).enable()
self._logger.debug("Plugin {} reloaded.".format(name))
|
Reloads a given plugin
:param name: The name of the plugin
:param args: The args to pass to the plugin
|
def set_org_disclaimer(self):
"""Auto-connect slot activated when org disclaimer checkbox is toggled.
"""
is_checked = self.custom_org_disclaimer_checkbox.isChecked()
if is_checked:
# Show previous organisation disclaimer
org_disclaimer = setting(
'reportDisclaimer',
default=disclaimer(),
expected_type=str,
qsettings=self.settings)
else:
# Set the organisation disclaimer to the default one
org_disclaimer = disclaimer()
self.txtDisclaimer.setPlainText(org_disclaimer)
self.txtDisclaimer.setEnabled(is_checked)
|
Auto-connect slot activated when org disclaimer checkbox is toggled.
|
def ingest(self, text, logMessage=None):
"""Ingest a new object into Fedora. Returns the pid of the new object on success.
Return response should have a status of 201 Created on success, and
the content of the response will be the newly created pid.
Wrapper function for `Fedora REST API ingest <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-ingest>`_
:param text: full text content of the object to be ingested
:param logMessage: optional log message
:rtype: :class:`requests.models.Response`
"""
# NOTE: ingest method supports additional options for
# label/format/namespace/ownerId, etc - but we generally set
# those in the foxml that is passed in
http_args = {}
if logMessage:
http_args['logMessage'] = logMessage
headers = {'Content-Type': 'text/xml'}
url = 'objects/new'
# if text is unicode, it needs to be encoded so we can send the
# data as bytes; otherwise, we get ascii encode errors in httplib/ssl
if isinstance(text, six.text_type):
text = bytes(text.encode('utf-8'))
return self.post(url, data=text, params=http_args, headers=headers)
|
Ingest a new object into Fedora. Returns the pid of the new object on success.
Return response should have a status of 201 Created on success, and
the content of the response will be the newly created pid.
Wrapper function for `Fedora REST API ingest <http://fedora-commons.org/confluence/display/FCR30/REST+API#RESTAPI-ingest>`_
:param text: full text content of the object to be ingested
:param logMessage: optional log message
:rtype: :class:`requests.models.Response`
|
def learn_transportation_mode(track, clf):
""" Inserts transportation modes of a track into a classifier
Args:
track (:obj:`Track`)
clf (:obj:`Classifier`)
"""
for segment in track.segments:
tmodes = segment.transportation_modes
points = segment.points
features = []
labels = []
for tmode in tmodes:
points_part = points[tmode['from']:tmode['to']]
if len(points_part) > 0:
features.append(extract_features_2(points_part))
labels.append(tmode['label'])
clf.learn(features, labels)
|
Inserts transportation modes of a track into a classifier
Args:
track (:obj:`Track`)
clf (:obj:`Classifier`)
|
def parse_instance_count(instance_count, speaker_total_count):
"""This parses the instance count dictionary
(that may contain floats from 0.0 to 1.0 representing a percentage)
and converts it to actual instance count.
"""
# Use all the instances of a speaker unless specified
result = copy.copy(speaker_total_count)
for speaker_id, count in instance_count.items():
speaker_id = str(speaker_id)
speaker_total = speaker_total_count.get(speaker_id, 0)
if type(count) == float and 0.0 <= count <= 1.0:
result[speaker_id] = int(speaker_total * count)
else:
result[speaker_id] = int(count)
return result
|
This parses the instance count dictionary
(that may contain floats from 0.0 to 1.0 representing a percentage)
and converts it to actual instance count.
|
def getScreenshotPropertyType(self, screenshotHandle):
"""
When your application receives a
VREvent_RequestScreenshot event, call these functions to get
the details of the screenshot request.
"""
fn = self.function_table.getScreenshotPropertyType
pError = EVRScreenshotError()
result = fn(screenshotHandle, byref(pError))
return result, pError
|
When your application receives a
VREvent_RequestScreenshot event, call these functions to get
the details of the screenshot request.
|
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> print ''.join(diff),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
|
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> print ''.join(diff),
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
|
def nonwhitelisted_allowed_principals(self, whitelist=None):
"""Find non whitelisted allowed principals."""
if not whitelist:
return []
nonwhitelisted = []
for statement in self.statements:
if statement.non_whitelisted_principals(whitelist) and statement.effect == "Allow":
nonwhitelisted.append(statement)
return nonwhitelisted
|
Find non whitelisted allowed principals.
|
def network_from_df(self, df):
"""
Defines a network from an array.
Parameters
----------
array : array
Pandas dataframe. Should have columns: \'i\', \'j\', \'t\' where i and j are node indicies and t is the temporal index.
If weighted, should also include \'weight\'. Each row is an edge.
"""
teneto.utils.check_TemporalNetwork_input(df, 'df')
self.network = df
self._update_network()
|
Defines a network from an array.
Parameters
----------
array : array
Pandas dataframe. Should have columns: \'i\', \'j\', \'t\' where i and j are node indicies and t is the temporal index.
If weighted, should also include \'weight\'. Each row is an edge.
|
def get_action_side_effects(self):
"""Returns all side effects for all batches of this
Executor used by the underlying Action.
"""
result = SCons.Util.UniqueList([])
for target in self.get_action_targets():
result.extend(target.side_effects)
return result
|
Returns all side effects for all batches of this
Executor used by the underlying Action.
|
def query(conn_type, option, post_data=None):
'''
Execute the HTTP request to the API
'''
if ticket is None or csrf is None or url is None:
log.debug('Not authenticated yet, doing that now..')
_authenticate()
full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option)
log.debug('%s: %s (%s)', conn_type, full_url, post_data)
httpheaders = {'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'salt-cloud-proxmox'}
if conn_type == 'post':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.post(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'put':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.put(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'delete':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.delete(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'get':
response = requests.get(full_url, verify=verify_ssl,
cookies=ticket)
response.raise_for_status()
try:
returned_data = response.json()
if 'data' not in returned_data:
raise SaltCloudExecutionFailure
return returned_data['data']
except Exception:
log.error('Error in trying to process JSON')
log.error(response)
|
Execute the HTTP request to the API
|
def list_items(cls, repo, *args, **kwargs):
"""
Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances"""
out_list = IterableList(cls._id_attribute_)
out_list.extend(cls.iter_items(repo, *args, **kwargs))
return out_list
|
Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances
|
def mem_extend(self, start: int, size: int) -> None:
"""Extends the memory of this machine state.
:param start: Start of memory extension
:param size: Size of memory extension
"""
m_extend = self.calculate_extension_size(start, size)
if m_extend:
extend_gas = self.calculate_memory_gas(start, size)
self.min_gas_used += extend_gas
self.max_gas_used += extend_gas
self.check_gas()
self.memory.extend(m_extend)
|
Extends the memory of this machine state.
:param start: Start of memory extension
:param size: Size of memory extension
|
def cmyk(self):
"""CMYK: all returned in range 0.0 - 1.0"""
c, m, y = self.cmy
k = min(c, m, y)
# Handle division by zero in case of black = 1
if k != 1:
c = (c - k) / (1 - k)
m = (m - k) / (1 - k)
y = (y - k) / (1 - k)
else:
c, m, y = 1, 1, 1
cmyk = (c, m, y, k)
# Apply bound and return
return tuple(map(lambda x: self._apply_float_bounds(x), cmyk))
|
CMYK: all returned in range 0.0 - 1.0
|
def indices(this, that, axis=semantics.axis_default, missing='raise'):
"""Find indices such that this[indices] == that
If multiple indices satisfy this condition, the first index found is returned
Parameters
----------
this : indexable object
items to search in
that : indexable object
items to search for
axis : int, optional
axis to operate on
missing : {'raise', 'ignore', 'mask' or int}
if `missing` is 'raise', a KeyError is raised if not all elements of `that` are present in `this`
if `missing` is 'mask', a masked array is returned,
where items of `that` not present in `this` are masked out
if `missing` is 'ignore', all elements of `that` are assumed to be present in `this`,
and output is undefined otherwise
if missing is an integer, this is used as a fill-value
Returns
-------
indices : ndarray, [that.size], int
indices such that this[indices] == that
Notes
-----
May be regarded as a vectorized numpy equivalent of list.index
"""
this = as_index(this, axis=axis, lex_as_struct=True)
# use this for getting this.keys and that.keys organized the same way;
# sorting is superfluous though. make sorting a cached property?
# should we be working with cached properties generally?
# or we should use sorted values, if searchsorted can exploit this knowledge?
that = as_index(that, axis=axis, base=True, lex_as_struct=True)
# use raw private keys here, rather than public unpacked keys
insertion = np.searchsorted(this._keys, that._keys, sorter=this.sorter, side='left')
indices = np.take(this.sorter, insertion, mode='clip')
if missing != 'ignore':
invalid = this._keys[indices] != that._keys
if missing == 'raise':
if np.any(invalid):
raise KeyError('Not all keys in `that` are present in `this`')
elif missing == 'mask':
indices = np.ma.masked_array(indices, invalid)
else:
indices[invalid] = missing
return indices
|
Find indices such that this[indices] == that
If multiple indices satisfy this condition, the first index found is returned
Parameters
----------
this : indexable object
items to search in
that : indexable object
items to search for
axis : int, optional
axis to operate on
missing : {'raise', 'ignore', 'mask' or int}
if `missing` is 'raise', a KeyError is raised if not all elements of `that` are present in `this`
if `missing` is 'mask', a masked array is returned,
where items of `that` not present in `this` are masked out
if `missing` is 'ignore', all elements of `that` are assumed to be present in `this`,
and output is undefined otherwise
if missing is an integer, this is used as a fill-value
Returns
-------
indices : ndarray, [that.size], int
indices such that this[indices] == that
Notes
-----
May be regarded as a vectorized numpy equivalent of list.index
|
def get(self, entry):
"""Gets an entry by key. Will return None if there is no
matching entry."""
try:
list = self.cache[entry.key]
return list[list.index(entry)]
except:
return None
|
Gets an entry by key. Will return None if there is no
matching entry.
|
def to_unicode(s):
"""Return the object as unicode (only matters for Python 2.x).
If s is already Unicode, return s as is.
Otherwise, assume that s is UTF-8 encoded, and convert to Unicode.
:param (basestring) s: a str, unicode or other basestring object
:return (unicode): the object as unicode
"""
if not isinstance(s, six.string_types):
raise ValueError("{} must be str or unicode.".format(s))
if not isinstance(s, six.text_type):
s = six.text_type(s, 'utf-8')
return s
|
Return the object as unicode (only matters for Python 2.x).
If s is already Unicode, return s as is.
Otherwise, assume that s is UTF-8 encoded, and convert to Unicode.
:param (basestring) s: a str, unicode or other basestring object
:return (unicode): the object as unicode
|
def enable_caching(self):
"Enable the cache of this object."
self.caching_enabled = True
for c in self.values():
c.enable_cacher()
|
Enable the cache of this object.
|
def handle_profile_delete(self, sender, instance, **kwargs):
""" Custom handler for user profile delete """
try:
self.handle_save(instance.user.__class__, instance.user) # we call save just as well
except (get_profile_model().DoesNotExist):
pass
|
Custom handler for user profile delete
|
def add(repo_path, dest_path):
'''
Registers a git repository with homely so that it will run its `HOMELY.py`
script on each invocation of `homely update`. `homely add` also immediately
executes a `homely update` so that the dotfiles are installed straight
away. If the git repository is hosted online, a local clone will be created
first.
REPO_PATH
A path to a local git repository, or the URL for a git repository
hosted online. If REPO_PATH is a URL, then it should be in a format
accepted by `git clone`. If REPO_PATH is a URL, you may also specify
DEST_PATH.
DEST_PATH
If REPO_PATH is a URL, then the local clone will be created at
DEST_PATH. If DEST_PATH is omitted then the path to the local clone
will be automatically derived from REPO_PATH.
'''
mkcfgdir()
try:
repo = getrepohandler(repo_path)
except NotARepo as err:
echo("ERROR: {}: {}".format(ERR_NOT_A_REPO, err.repo_path))
sys.exit(1)
# if the repo isn't on disk yet, we'll need to make a local clone of it
if repo.isremote:
localrepo, needpull = addfromremote(repo, dest_path)
elif dest_path:
raise UsageError("DEST_PATH is only for repos hosted online")
else:
try:
repoid = repo.getrepoid()
except RepoHasNoCommitsError as err:
echo("ERROR: {}".format(ERR_NO_COMMITS))
sys.exit(1)
localrepo = RepoInfo(repo, repoid, None)
needpull = False
# if we don't have a local repo, then there is nothing more to do
if not localrepo:
return
# remember this new local repo
with saveconfig(RepoListConfig()) as cfg:
cfg.add_repo(localrepo)
success = run_update([localrepo], pullfirst=needpull, cancleanup=True)
if not success:
sys.exit(1)
|
Registers a git repository with homely so that it will run its `HOMELY.py`
script on each invocation of `homely update`. `homely add` also immediately
executes a `homely update` so that the dotfiles are installed straight
away. If the git repository is hosted online, a local clone will be created
first.
REPO_PATH
A path to a local git repository, or the URL for a git repository
hosted online. If REPO_PATH is a URL, then it should be in a format
accepted by `git clone`. If REPO_PATH is a URL, you may also specify
DEST_PATH.
DEST_PATH
If REPO_PATH is a URL, then the local clone will be created at
DEST_PATH. If DEST_PATH is omitted then the path to the local clone
will be automatically derived from REPO_PATH.
|
def _generate_autoscaling_metadata(self, cls, args):
""" Provides special handling for the autoscaling.Metadata object """
assert isinstance(args, Mapping)
init_config = self._create_instance(
cloudformation.InitConfig,
args['AWS::CloudFormation::Init']['config'])
init = self._create_instance(
cloudformation.Init, {'config': init_config})
auth = None
if 'AWS::CloudFormation::Authentication' in args:
auth_blocks = {}
for k in args['AWS::CloudFormation::Authentication']:
auth_blocks[k] = self._create_instance(
cloudformation.AuthenticationBlock,
args['AWS::CloudFormation::Authentication'][k],
k)
auth = self._create_instance(
cloudformation.Authentication, auth_blocks)
return cls(init, auth)
|
Provides special handling for the autoscaling.Metadata object
|
def steal_docstring_from(obj):
"""Decorator that lets you steal a docstring from another object
Example
-------
::
@steal_docstring_from(superclass.meth)
def meth(self, arg):
"Extra subclass documentation"
pass
In this case the docstring of the new 'meth' will be copied from superclass.meth, and
if an additional dosctring was defined for meth it will be appended to the superclass
docstring with a two newlines inbetween.
"""
def deco(fn):
docs = [obj.__doc__]
if fn.__doc__:
docs.append(fn.__doc__)
fn.__doc__ = '\n\n'.join(docs)
return fn
return deco
|
Decorator that lets you steal a docstring from another object
Example
-------
::
@steal_docstring_from(superclass.meth)
def meth(self, arg):
"Extra subclass documentation"
pass
In this case the docstring of the new 'meth' will be copied from superclass.meth, and
if an additional dosctring was defined for meth it will be appended to the superclass
docstring with a two newlines inbetween.
|
def add_entity(self, rdf_type, superclass, label, definition=None):
''' Adds entity as long as it doesn't exist and has a usable
superclass ILX ID and rdf:type
'''
# Checks if you inputed the right type
rdf_type = rdf_type.lower().strip().replace('owl:Class', 'term')
accepted_types = ['owl:Class', 'term', 'cde', 'annotation', 'relationship', 'fde']
if rdf_type not in accepted_types:
error = 'rdf_type must be one of the following: {accepted_types}'
return self.test_check(error.format(accepted_types=accepted_types))
# Pulls superclass data out and checks if it exists
superclass_data, success = self.get_data_from_ilx(ilx_id=superclass)
superclass_data = superclass_data['data']
if not success:
error = '{superclass} is does not exist and cannot be used as a superclass.'
return self.test_check(error.format(superclass=superclass))
# Searchs database to see if the term exists. Will return anything similar,
# but we want only what is_equal
search_results = self.search_by_label(label)['data']
search_results = [sr for sr in search_results
if self.is_equal(sr['label'], label_bug_fix(label))]
# If search_results is not empty, we need to see if the type and superclass are also a
# match. If not, you can create this entity. HOWEVER. If you are the creator of an entity,
# you can only have one label of any type or superclass
if search_results:
search_hits = 0
for entity in search_results: # garunteed to only have one match if any
entity, success = self.get_data_from_ilx(ilx_id = entity['ilx']) # all metadata
entity = entity['data']
user_url = 'https://scicrunch.org/api/1/user/info?key={api_key}'
user_data = self.get(user_url.format(api_key=self.APIKEY))
user_data = user_data['data']
if str(entity['uid']) == str(user_data['id']): # creator check
bp = 'Entity {label} already created by you with ILX ID {ilx_id} and of type {rdf_type}'
return self.test_check(bp.format(label = label,
ilx_id = entity['ilx'],
rdf_type = entity['type']))
types_equal = self.is_equal(entity['type'], rdf_type) # type check
if 'superclasses' in entity and entity['superclasses']:
entity_super_ilx = entity['superclasses'][0]['ilx']
else:
entity_super_ilx = ''
supers_equal = self.is_equal(entity_super_ilx, superclass_data['ilx'])
if types_equal and supers_equal:
bp = 'Entity {label} already exisits with ILX ID {ilx_id} and of type {rdf_type}'
return self.test_check(bp.format(label = label,
ilx_id = self.fix_ilx(entity['ilx']),
rdf_type = entity['type']))
# Generates ILX ID and does a validation check
url = self.base_path + 'ilx/add'
data = {'term': label,
'superclasses': [{
'id': superclass_data['id'],
'ilx': superclass_data['ilx']}],
'type': rdf_type,}
data = superclasses_bug_fix(data)
output = self.post(url, data)['data']
if output.get('ilx'):
ilx_id = output['ilx']
else:
ilx_id = output['fragment'] # archetype of beta
# Uses generated ILX ID to make a formal row in the database
url = self.base_path + 'term/add'
data = {'label': label.replace(''', "'").replace('"', '"'),
'ilx': ilx_id,
'superclasses': [{
'id': superclass_data['id'],
'ilx': superclass_data['ilx']}],
'type': rdf_type}
data = superclasses_bug_fix(data)
if definition:
data.update({'definition':definition})
return self.post(url, data)
|
Adds entity as long as it doesn't exist and has a usable
superclass ILX ID and rdf:type
|
def ensure_directory(directory):
"""
Create the directories along the provided directory path that do not exist.
"""
directory = os.path.expanduser(directory)
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
|
Create the directories along the provided directory path that do not exist.
|
def write_options_to_JSON(self, filename):
"""Writes the options in JSON format to a file.
:param str filename: Target file to write the options.
"""
fd = open(filename, "w")
fd.write(json.dumps(_options_to_dict(self.gc), indent=2,
separators=(',', ': ')))
fd.close()
|
Writes the options in JSON format to a file.
:param str filename: Target file to write the options.
|
def size(self, destination):
"""
Size of the queue for specified destination.
@param destination: The queue destination (e.g. /queue/foo)
@type destination: C{str}
@return: The number of frames in specified queue.
@rtype: C{int}
"""
if not destination in self.queue_metadata:
return 0
else:
return len(self.queue_metadata[destination]['frames'])
|
Size of the queue for specified destination.
@param destination: The queue destination (e.g. /queue/foo)
@type destination: C{str}
@return: The number of frames in specified queue.
@rtype: C{int}
|
def json_serializer(pid, data, *args):
"""Build a JSON Flask response using the given data.
:param pid: The `invenio_pidstore.models.PersistentIdentifier` of the
record.
:param data: The record metadata.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
"""
if data is not None:
response = Response(
json.dumps(data.dumps()),
mimetype='application/json'
)
else:
response = Response(mimetype='application/json')
return response
|
Build a JSON Flask response using the given data.
:param pid: The `invenio_pidstore.models.PersistentIdentifier` of the
record.
:param data: The record metadata.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
|
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value
)
for d in self._dims
]
)
|
Returns this shape as a `TensorShapeProto`.
|
def normalize(x:TensorImage, mean:FloatTensor,std:FloatTensor)->TensorImage:
"Normalize `x` with `mean` and `std`."
return (x-mean[...,None,None]) / std[...,None,None]
|
Normalize `x` with `mean` and `std`.
|
def sim(self, args):
"""
Simulate I/O points by setting the Out_Of_Service property, then doing a
WriteProperty to the point's Present_Value.
:param args: String with <addr> <type> <inst> <prop> <value> [ <indx> ] [ <priority> ]
"""
if not self._started:
raise ApplicationNotStarted("BACnet stack not running - use startApp()")
# with self.this_application._lock: if use lock...won't be able to call read...
args = args.split()
addr, obj_type, obj_inst, prop_id, value = args[:5]
if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)):
self.write(
"{} {} {} {} {}".format(addr, obj_type, obj_inst, prop_id, value)
)
else:
try:
self.write(
"{} {} {} outOfService True".format(addr, obj_type, obj_inst)
)
except NoResponseFromController:
pass
try:
if self.read("{} {} {} outOfService".format(addr, obj_type, obj_inst)):
self.write(
"{} {} {} {} {}".format(
addr, obj_type, obj_inst, prop_id, value
)
)
else:
raise OutOfServiceNotSet()
except NoResponseFromController:
pass
|
Simulate I/O points by setting the Out_Of_Service property, then doing a
WriteProperty to the point's Present_Value.
:param args: String with <addr> <type> <inst> <prop> <value> [ <indx> ] [ <priority> ]
|
def Hk(self, k, m_pred, P_pred): # returns state iteration matrix
"""
function (k, m, P) return Jacobian of measurement function, it is
passed into p_h.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
"""
return self.H[:, :, int(self.index[self.H_time_var_index, k])]
|
function (k, m, P) return Jacobian of measurement function, it is
passed into p_h.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
|
def wald_wolfowitz(sequence):
"""
implements the wald-wolfowitz runs test:
http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test
http://support.sas.com/kb/33/092.html
:param sequence: any iterable with at most 2 values. e.g.
'1001001'
[1, 0, 1, 0, 1]
'abaaabbba'
:rtype: a dict with keys of
`n_runs`: the number of runs in the sequence
`p`: the support to reject the null-hypothesis that the number of runs
supports a random sequence
`z`: the z-score, used to calculate the p-value
`sd`, `mean`: the expected standard deviation, mean the number of runs,
given the ratio of numbers of 1's/0's in the sequence
>>> r = wald_wolfowitz('1000001')
>>> r['n_runs'] # should be 3, because 1, 0, 1
3
>>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence
False
# this should show significance for non-randomness
>>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
>>> wald_wolfowitz(li)['p'] < 0.05
True
"""
R = n_runs = sum(1 for s in groupby(sequence, lambda a: a))
n = float(sum(1 for s in sequence if s == sequence[0]))
m = float(sum(1 for s in sequence if s != sequence[0]))
# expected mean runs
ER = ((2 * n * m ) / (n + m)) + 1
# expected variance runs
VR = (2 * n * m * (2 * n * m - n - m )) / ((n + m)**2 * (n + m - 1))
O = (ER - 1) * (ER - 2) / (n + m - 1.)
assert VR - O < 0.001, (VR, O)
SD = math.sqrt(VR)
# Z-score
Z = (R - ER) / SD
return {'z': Z, 'mean': ER, 'sd': SD, 'p': zprob(Z), 'n_runs': R}
|
implements the wald-wolfowitz runs test:
http://en.wikipedia.org/wiki/Wald-Wolfowitz_runs_test
http://support.sas.com/kb/33/092.html
:param sequence: any iterable with at most 2 values. e.g.
'1001001'
[1, 0, 1, 0, 1]
'abaaabbba'
:rtype: a dict with keys of
`n_runs`: the number of runs in the sequence
`p`: the support to reject the null-hypothesis that the number of runs
supports a random sequence
`z`: the z-score, used to calculate the p-value
`sd`, `mean`: the expected standard deviation, mean the number of runs,
given the ratio of numbers of 1's/0's in the sequence
>>> r = wald_wolfowitz('1000001')
>>> r['n_runs'] # should be 3, because 1, 0, 1
3
>>> r['p'] < 0.05 # not < 0.05 evidence to reject Ho of random sequence
False
# this should show significance for non-randomness
>>> li = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
>>> wald_wolfowitz(li)['p'] < 0.05
True
|
def getSizeFromPage(rh, page):
"""
Convert a size value from page to a number with a magnitude appended.
Input:
Request Handle
Size in page
Output:
Converted value with a magnitude
"""
rh.printSysLog("Enter generalUtils.getSizeFromPage")
bSize = float(page) * 4096
mSize = cvtToMag(rh, bSize)
rh.printSysLog("Exit generalUtils.getSizeFromPage, magSize: " + mSize)
return mSize
|
Convert a size value from page to a number with a magnitude appended.
Input:
Request Handle
Size in page
Output:
Converted value with a magnitude
|
def key56_to_key64(key):
"""
This takes in an a bytes string of 7 bytes and converts it to a bytes
string of 8 bytes with the odd parity bit being set to every 8 bits,
For example
b"\x01\x02\x03\x04\x05\x06\x07"
00000001 00000010 00000011 00000100 00000101 00000110 00000111
is converted to
b"\x01\x80\x80\x61\x40\x29\x19\x0E"
00000001 10000000 10000000 01100001 01000000 00101001 00011001 00001110
https://crypto.stackexchange.com/questions/15799/des-with-actual-7-byte-key
:param key: 7-byte string sized key
:return: 8-byte string with the parity bits sets from the 7-byte string
"""
if len(key) != 7:
raise ValueError("DES 7-byte key is not 7 bytes in length, "
"actual: %d" % len(key))
new_key = b""
for i in range(0, 8):
if i == 0:
new_value = struct.unpack("B", key[i:i+1])[0]
elif i == 7:
new_value = struct.unpack("B", key[6:7])[0]
new_value = (new_value << 1) & 0xFF
else:
new_value = struct.unpack("B", key[i - 1:i])[0]
next_value = struct.unpack("B", key[i:i + 1])[0]
new_value = ((new_value << (8 - i)) & 0xFF) | next_value >> i
# clear the last bit so the count isn't off
new_value = new_value & ~(1 << 0)
# set the last bit if the number of set bits are even
new_value = new_value | int(not DES.bit_count(new_value) & 0x1)
new_key += struct.pack("B", new_value)
return new_key
|
This takes in an a bytes string of 7 bytes and converts it to a bytes
string of 8 bytes with the odd parity bit being set to every 8 bits,
For example
b"\x01\x02\x03\x04\x05\x06\x07"
00000001 00000010 00000011 00000100 00000101 00000110 00000111
is converted to
b"\x01\x80\x80\x61\x40\x29\x19\x0E"
00000001 10000000 10000000 01100001 01000000 00101001 00011001 00001110
https://crypto.stackexchange.com/questions/15799/des-with-actual-7-byte-key
:param key: 7-byte string sized key
:return: 8-byte string with the parity bits sets from the 7-byte string
|
def getContactUIDForUser(self):
"""Get the UID of the user associated with the authenticated user
"""
membership_tool = api.get_tool("portal_membership")
member = membership_tool.getAuthenticatedMember()
username = member.getUserName()
r = self.portal_catalog(
portal_type="Contact",
getUsername=username
)
if len(r) == 1:
return r[0].UID
|
Get the UID of the user associated with the authenticated user
|
def get_value_tuple(self):
"""
Returns a tuple of the color's values (in order). For example,
an LabColor object will return (lab_l, lab_a, lab_b), where each
member of the tuple is the float value for said variable.
"""
retval = tuple()
for val in self.VALUES:
retval += (getattr(self, val),)
return retval
|
Returns a tuple of the color's values (in order). For example,
an LabColor object will return (lab_l, lab_a, lab_b), where each
member of the tuple is the float value for said variable.
|
def blk_1d(blk, shape):
"""Iterate through the slices that recover a line.
This function is used by :func:`blk_nd` as a base 1d case.
The last slice is returned even if is lesser than blk.
:param blk: the size of the block
:param shape: the size of the array
:return: a generator that yields the slices
"""
maxpix, rem = blk_coverage_1d(blk, shape)
for i in range(0, maxpix, blk):
yield slice(i, i + blk)
if rem != 0:
yield slice(maxpix, shape)
|
Iterate through the slices that recover a line.
This function is used by :func:`blk_nd` as a base 1d case.
The last slice is returned even if is lesser than blk.
:param blk: the size of the block
:param shape: the size of the array
:return: a generator that yields the slices
|
def unpack(self, buff, offset=0):
"""Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
super().unpack(buff, offset)
self.version = self._version_ihl.value >> 4
self.ihl = self._version_ihl.value & 15
self.dscp = self._dscp_ecn.value >> 2
self.ecn = self._dscp_ecn.value & 3
self.length = self.length.value
self.identification = self.identification.value
self.flags = self._flags_offset.value >> 13
self.offset = self._flags_offset.value & 8191
self.ttl = self.ttl.value
self.protocol = self.protocol.value
self.checksum = self.checksum.value
self.source = self.source.value
self.destination = self.destination.value
if self.ihl > 5:
options_size = (self.ihl - 5) * 4
self.data = self.options.value[options_size:]
self.options = self.options.value[:options_size]
else:
self.data = self.options.value
self.options = b''
|
Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
|
def time2slurm(timeval, unit="s"):
"""
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the slurm convention: "days-hours:minutes:seconds".
>>> assert time2slurm(61) == '0-0:1:1' and time2slurm(60*60+1) == '0-1:0:1'
>>> assert time2slurm(0.5, unit="h") == '0-0:30:0'
"""
d, h, m, s = 24*3600, 3600, 60, 1
timeval = Time(timeval, unit).to("s")
days, hours = divmod(timeval, d)
hours, minutes = divmod(hours, h)
minutes, secs = divmod(minutes, m)
return "%d-%d:%d:%d" % (days, hours, minutes, secs)
|
Convert a number representing a time value in the given unit (Default: seconds)
to a string following the slurm convention: "days-hours:minutes:seconds".
>>> assert time2slurm(61) == '0-0:1:1' and time2slurm(60*60+1) == '0-1:0:1'
>>> assert time2slurm(0.5, unit="h") == '0-0:30:0'
|
def visit_RETURN(self, node):
""" Visits only children[1], since children[0] points to
the current function being returned from (if any), and
might cause infinite recursion.
"""
if len(node.children) == 2:
node.children[1] = (yield ToVisit(node.children[1]))
yield node
|
Visits only children[1], since children[0] points to
the current function being returned from (if any), and
might cause infinite recursion.
|
def add(self, opener):
"""Adds an opener to the registry
:param opener: Opener object
:type opener: Opener inherited object
"""
index = len(self.openers)
self.openers[index] = opener
for name in opener.names:
self.registry[name] = index
|
Adds an opener to the registry
:param opener: Opener object
:type opener: Opener inherited object
|
def copy(self):
"""Return a copy of this _TimeAnchor."""
return _TimeAnchor(self.reading_id, self.uptime, self.utc, self.is_break, self.exact)
|
Return a copy of this _TimeAnchor.
|
def mask_unphysical(self, data):
"""Mask data array where values are outside physically valid range."""
if not self.valid_range:
return data
else:
return np.ma.masked_outside(data, np.min(self.valid_range),
np.max(self.valid_range))
|
Mask data array where values are outside physically valid range.
|
def make_defaults_and_annotations(make_function_instr, builders):
"""
Get the AST expressions corresponding to the defaults, kwonly defaults, and
annotations for a function created by `make_function_instr`.
"""
# Integer counts.
n_defaults, n_kwonlydefaults, n_annotations = unpack_make_function_arg(
make_function_instr.arg
)
if n_annotations:
# TOS should be a tuple of annotation names.
load_annotation_names = builders.pop()
annotations = dict(zip(
reversed(load_annotation_names.arg),
(make_expr(builders) for _ in range(n_annotations - 1))
))
else:
annotations = {}
kwonlys = {}
while n_kwonlydefaults:
default_expr = make_expr(builders)
key_instr = builders.pop()
if not isinstance(key_instr, instrs.LOAD_CONST):
raise DecompilationError(
"kwonlydefault key is not a LOAD_CONST: %s" % key_instr
)
if not isinstance(key_instr.arg, str):
raise DecompilationError(
"kwonlydefault key builder is not a "
"'LOAD_CONST of a string: %s" % key_instr
)
kwonlys[key_instr.arg] = default_expr
n_kwonlydefaults -= 1
defaults = make_exprs(builders, n_defaults)
return defaults, kwonlys, annotations
|
Get the AST expressions corresponding to the defaults, kwonly defaults, and
annotations for a function created by `make_function_instr`.
|
def show_top(queue=False, **kwargs):
'''
Return the top data that the minion will use for a highstate
CLI Example:
.. code-block:: bash
salt '*' state.show_top
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
errors = []
top_ = st_.get_top()
errors += st_.verify_tops(top_)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
matches = st_.top_matches(top_)
return matches
|
Return the top data that the minion will use for a highstate
CLI Example:
.. code-block:: bash
salt '*' state.show_top
|
def getHTML(self):
'''
getHTML - Get the full HTML as contained within this tree.
If parsed from a document, this will contain the original whitespacing.
@returns - <str> of html
@see getFormattedHTML
@see getMiniHTML
'''
root = self.getRoot()
if root is None:
raise ValueError('Did not parse anything. Use parseFile or parseStr')
if self.doctype:
doctypeStr = '<!%s>\n' %(self.doctype)
else:
doctypeStr = ''
# 6.6.0: If we have a real root tag, print the outerHTML. If we have a fake root tag (for multiple root condition),
# then print the innerHTML (skipping the outer root tag). Otherwise, we will miss
# untagged text (between the multiple root nodes).
rootNode = self.getRoot()
if rootNode.tagName == INVISIBLE_ROOT_TAG:
return doctypeStr + rootNode.innerHTML
else:
return doctypeStr + rootNode.outerHTML
|
getHTML - Get the full HTML as contained within this tree.
If parsed from a document, this will contain the original whitespacing.
@returns - <str> of html
@see getFormattedHTML
@see getMiniHTML
|
def getMaximinScores(profile):
"""
Returns a dictionary that associates integer representations of each candidate with their
Copeland score.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported election type")
exit()
wmgMap = profile.getWmg()
# Initialize each Copeland score as infinity.
maximinscores = {}
for cand in wmgMap.keys():
maximinscores[cand] = float("inf")
# For each pair of candidates, calculate the number of votes in which one beat the other.
# For each pair of candidates, calculate the number of times each beats the other.
for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2):
if cand2 in wmgMap[cand1].keys():
maximinscores[cand1] = min(maximinscores[cand1], wmgMap[cand1][cand2])
maximinscores[cand2] = min(maximinscores[cand2], wmgMap[cand2][cand1])
return maximinscores
|
Returns a dictionary that associates integer representations of each candidate with their
Copeland score.
:ivar Profile profile: A Profile object that represents an election profile.
|
def postprocess(x, n_bits_x=8):
"""Converts x from [-0.5, 0.5], to [0, 255].
Args:
x: 3-D or 4-D Tensor normalized between [-0.5, 0.5]
n_bits_x: Number of bits representing each pixel of the output.
Defaults to 8, to default to 256 possible values.
Returns:
x: 3-D or 4-D Tensor representing images or videos.
"""
x = tf.where(tf.is_finite(x), x, tf.ones_like(x))
x = tf.clip_by_value(x, -0.5, 0.5)
x += 0.5
x = x * 2**n_bits_x
return tf.cast(tf.clip_by_value(x, 0, 255), dtype=tf.uint8)
|
Converts x from [-0.5, 0.5], to [0, 255].
Args:
x: 3-D or 4-D Tensor normalized between [-0.5, 0.5]
n_bits_x: Number of bits representing each pixel of the output.
Defaults to 8, to default to 256 possible values.
Returns:
x: 3-D or 4-D Tensor representing images or videos.
|
def seek(self, offset, whence=os.SEEK_SET):
"""Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek.
whence (Optional[int]): value that indicates whether offset is an
absolute or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
if self._decrypted_stream_size is None:
self._decrypted_stream_size = self._GetDecryptedStreamSize()
if self._decrypted_stream_size is None:
raise IOError('Invalid decrypted stream size.')
offset += self._decrypted_stream_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
if offset != self._current_offset:
self._current_offset = offset
self._realign_offset = True
|
Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek.
whence (Optional[int]): value that indicates whether offset is an
absolute or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
|
def commit_analyzer(commits, label_pattern, label_position="footer"):
"""Analyzes a list of :class:`~braulio.git.Commit` objects searching for
messages that match a given message convention and extract metadata from
them.
A message convention is determined by ``label_pattern``, which is not a
regular expression pattern. Instead it must be a string literals with
placeholders that indicates metadata information in a given position of
the commit message. The possible placeholders are ``{type}``, ``{scope}``
and ``{subject}``.
The ``label_position`` argument dictates where (header|footer) to look in
the commit message for the pattern passed in ```label_pattern``.
``{subject}`` must be included in ``label_pattern`` just if the metadata is
in the header, otherwise must be omitted.
Examples.
If ``label_position`` is equal to **header**, in order to match the next
commit message::
fix(cli): Ensure --help option doesn't hang
The pattern must be ``{type}({scope}): {subject}``, where the metadata
information extracted will be::
{
'type': 'fix',
'scope': 'cli',
'subject': 'Ensure --help option doesn't hang'
}
"""
# Internally, a real regular expression pattern is used
pattern_string = re.escape(label_pattern)
# Capturing group patterns
action_cgp = r"(?P<type>\w+)"
scope_cgp = r"(?P<scope>\w*)"
subject_cgp = r"(?P<subject>.+)"
pattern_string = pattern_string.replace(r"\{type\}", action_cgp).replace(
r"\{scope\}", scope_cgp
)
if label_position == "header":
pattern_string = pattern_string.replace(r"\{subject\}", subject_cgp)
regexp_pattern = re.compile(pattern_string)
semantic_commits = []
for commit in commits:
text = commit.header if label_position == "header" else commit.footer
match = regexp_pattern.search(text)
if not match:
continue
metadata = match.groupdict()
if label_position == "footer":
metadata["subject"] = commit.header
sc = SemanticCommit(
subject=metadata["subject"].strip(),
type=metadata["type"],
scope=metadata.get("scope") or None,
message=commit.message,
)
semantic_commits.append(sc)
return semantic_commits
|
Analyzes a list of :class:`~braulio.git.Commit` objects searching for
messages that match a given message convention and extract metadata from
them.
A message convention is determined by ``label_pattern``, which is not a
regular expression pattern. Instead it must be a string literals with
placeholders that indicates metadata information in a given position of
the commit message. The possible placeholders are ``{type}``, ``{scope}``
and ``{subject}``.
The ``label_position`` argument dictates where (header|footer) to look in
the commit message for the pattern passed in ```label_pattern``.
``{subject}`` must be included in ``label_pattern`` just if the metadata is
in the header, otherwise must be omitted.
Examples.
If ``label_position`` is equal to **header**, in order to match the next
commit message::
fix(cli): Ensure --help option doesn't hang
The pattern must be ``{type}({scope}): {subject}``, where the metadata
information extracted will be::
{
'type': 'fix',
'scope': 'cli',
'subject': 'Ensure --help option doesn't hang'
}
|
def create_database(self, name):
"""Create a new database."""
statement = "CREATE DATABASE {0} DEFAULT CHARACTER SET latin1 COLLATE latin1_swedish_ci".format(wrap(name))
return self.execute(statement)
|
Create a new database.
|
def update_task(self, differences):
"""
Updates a task as done if we have a new value for this alternative language
:param differences:
:return:
"""
self.log('differences UPDATING: {}'.format(differences))
object_name = '{} - {}'.format(self.app_label, self.instance.master._meta.verbose_name)
lang = self.instance.language_code
object_pk = self.instance.master.pk
for field in differences:
value = getattr(self.instance, field)
if value is None or value == '':
continue
try:
TransTask.objects.filter(
language__code=lang, object_field=field, object_name=object_name, object_pk=object_pk
).update(done=True, date_modification=datetime.now(), object_field_value_translation=value)
self.log('MARKED TASK AS DONE')
except TransTask.DoesNotExist:
self.log('error MARKING TASK AS DONE: {} - {} - {} - {}'.format(lang, field, object_name, object_pk))
|
Updates a task as done if we have a new value for this alternative language
:param differences:
:return:
|
def add_default_plugins(self, except_global=[], except_local=[]):
"""
Add the ginga-distributed default set of plugins to the
reference viewer.
"""
# add default global plugins
for spec in plugins:
ptype = spec.get('ptype', 'local')
if ptype == 'global' and spec.module not in except_global:
self.add_plugin_spec(spec)
if ptype == 'local' and spec.module not in except_local:
self.add_plugin_spec(spec)
|
Add the ginga-distributed default set of plugins to the
reference viewer.
|
def locked_get(self):
"""Retrieves the current credentials from the store.
Returns:
An instance of :class:`oauth2client.client.Credentials` or `None`.
"""
credential = self._backend.locked_get(self._key)
if credential is not None:
credential.set_store(self)
return credential
|
Retrieves the current credentials from the store.
Returns:
An instance of :class:`oauth2client.client.Credentials` or `None`.
|
def get_value_for_expr(self, expr, target):
"""I have no idea."""
if expr in LOGICAL_OPERATORS.values():
return None
rvalue = expr['value']
if rvalue == HISTORICAL:
history = self.history[target]
if len(history) < self.history_size:
return None
rvalue = sum(history) / float(len(history))
rvalue = expr['mod'](rvalue)
return rvalue
|
I have no idea.
|
def get_message_handler(self, message_handlers):
"""
Create a MessageHandler for the configured Encoder
:param message_handlers: a dictionart of MessageHandler keyed by encoder
:return: a MessageHandler
"""
encoder = self.options.encoder
try:
return message_handlers[encoder]
except KeyError:
raise NotImplementedError('No RequestHandler defined for given encoder (%s).' % encoder)
|
Create a MessageHandler for the configured Encoder
:param message_handlers: a dictionart of MessageHandler keyed by encoder
:return: a MessageHandler
|
def jam_pack(jam, **kwargs):
'''Pack data into a jams sandbox.
If not already present, this creates a `muda` field within `jam.sandbox`,
along with `history`, `state`, and version arrays which are populated by
deformation objects.
Any additional fields can be added to the `muda` sandbox by supplying
keyword arguments.
Parameters
----------
jam : jams.JAMS
A JAMS object
Returns
-------
jam : jams.JAMS
The updated JAMS object
Examples
--------
>>> jam = jams.JAMS()
>>> muda.jam_pack(jam, my_data=dict(foo=5, bar=None))
>>> jam.sandbox
<Sandbox: muda>
>>> jam.sandbox.muda
<Sandbox: state, version, my_data, history>
>>> jam.sandbox.muda.my_data
{'foo': 5, 'bar': None}
'''
if not hasattr(jam.sandbox, 'muda'):
# If there's no mudabox, create one
jam.sandbox.muda = jams.Sandbox(history=[],
state=[],
version=dict(muda=version,
librosa=librosa.__version__,
jams=jams.__version__,
pysoundfile=psf.__version__))
elif not isinstance(jam.sandbox.muda, jams.Sandbox):
# If there is a muda entry, but it's not a sandbox, coerce it
jam.sandbox.muda = jams.Sandbox(**jam.sandbox.muda)
jam.sandbox.muda.update(**kwargs)
return jam
|
Pack data into a jams sandbox.
If not already present, this creates a `muda` field within `jam.sandbox`,
along with `history`, `state`, and version arrays which are populated by
deformation objects.
Any additional fields can be added to the `muda` sandbox by supplying
keyword arguments.
Parameters
----------
jam : jams.JAMS
A JAMS object
Returns
-------
jam : jams.JAMS
The updated JAMS object
Examples
--------
>>> jam = jams.JAMS()
>>> muda.jam_pack(jam, my_data=dict(foo=5, bar=None))
>>> jam.sandbox
<Sandbox: muda>
>>> jam.sandbox.muda
<Sandbox: state, version, my_data, history>
>>> jam.sandbox.muda.my_data
{'foo': 5, 'bar': None}
|
def get_observation_fields(search_query: str="", page: int=1) -> List[Dict[str, Any]]:
"""
Search the (globally available) observation
:param search_query:
:param page:
:return:
"""
payload = {
'q': search_query,
'page': page
} # type: Dict[str, Union[int, str]]
response = requests.get("{base_url}/observation_fields.json".format(base_url=INAT_BASE_URL), params=payload)
return response.json()
|
Search the (globally available) observation
:param search_query:
:param page:
:return:
|
def _learn_init_params(self, n_calib_beats=8):
"""
Find a number of consecutive beats and use them to initialize:
- recent qrs amplitude
- recent noise amplitude
- recent rr interval
- qrs detection threshold
The learning works as follows:
- Find all local maxima (largest sample within `qrs_radius`
samples) of the filtered signal.
- Inspect the local maxima until `n_calib_beats` beats are
found:
- Calculate the cross-correlation between a ricker wavelet of
length `qrs_width`, and the filtered signal segment centered
around the local maximum.
- If the cross-correlation exceeds 0.6, classify it as a beat.
- Use the beats to initialize the previously described
parameters.
- If the system fails to find enough beats, the default
parameters will be used instead. See the docstring of
`XQRS._set_default_init_params` for detauls.
Parameters
----------
n_calib_beats : int, optional
Number of calibration beats to detect for learning
"""
if self.verbose:
print('Learning initial signal parameters...')
last_qrs_ind = -self.rr_max
qrs_inds = []
qrs_amps = []
noise_amps = []
ricker_wavelet = signal.ricker(self.qrs_radius * 2, 4).reshape(-1,1)
# Find the local peaks of the signal.
peak_inds_f = find_local_peaks(self.sig_f, self.qrs_radius)
# Peak numbers at least qrs_width away from signal boundaries
peak_nums_r = np.where(peak_inds_f > self.qrs_width)[0]
peak_nums_l = np.where(peak_inds_f <= self.sig_len - self.qrs_width)[0]
# Skip if no peaks in range
if (not peak_inds_f.size or not peak_nums_r.size
or not peak_nums_l.size):
if self.verbose:
print('Failed to find %d beats during learning.'
% n_calib_beats)
self._set_default_init_params()
return
# Go through the peaks and find qrs peaks and noise peaks.
# only inspect peaks with at least qrs_radius around either side
for peak_num in range(peak_nums_r[0], peak_nums_l[-1]):
i = peak_inds_f[peak_num]
# Calculate cross-correlation between the filtered signal
# segment and a ricker wavelet
# Question: should the signal be squared? Case for inverse qrs
# complexes
sig_segment = normalize((self.sig_f[i - self.qrs_radius:
i + self.qrs_radius]).reshape(-1, 1), axis=0)
xcorr = np.correlate(sig_segment[:, 0], ricker_wavelet[:,0])
# Classify as qrs if xcorr is large enough
if xcorr > 0.6 and i-last_qrs_ind > self.rr_min:
last_qrs_ind = i
qrs_inds.append(i)
qrs_amps.append(self.sig_i[i])
else:
noise_amps.append(self.sig_i[i])
if len(qrs_inds) == n_calib_beats:
break
# Found enough calibration beats to initialize parameters
if len(qrs_inds) == n_calib_beats:
if self.verbose:
print('Found %d beats during learning.' % n_calib_beats
+ ' Initializing using learned parameters')
# QRS amplitude is most important.
qrs_amp = np.mean(qrs_amps)
# Set noise amplitude if found
if noise_amps:
noise_amp = np.mean(noise_amps)
else:
# Set default of 1/10 of qrs amplitude
noise_amp = qrs_amp / 10
# Get rr intervals of consecutive beats, if any.
rr_intervals = np.diff(qrs_inds)
rr_intervals = rr_intervals[rr_intervals < self.rr_max]
if rr_intervals.any():
rr_recent = np.mean(rr_intervals)
else:
rr_recent = self.rr_init
# If an early qrs was detected, set last_qrs_ind so that it can be
# picked up.
last_qrs_ind = min(0, qrs_inds[0] - self.rr_min - 1)
self._set_init_params(qrs_amp_recent=qrs_amp,
noise_amp_recent=noise_amp,
rr_recent=rr_recent,
last_qrs_ind=last_qrs_ind)
self.learned_init_params = True
# Failed to find enough calibration beats. Use default values.
else:
if self.verbose:
print('Failed to find %d beats during learning.'
% n_calib_beats)
self._set_default_init_params()
|
Find a number of consecutive beats and use them to initialize:
- recent qrs amplitude
- recent noise amplitude
- recent rr interval
- qrs detection threshold
The learning works as follows:
- Find all local maxima (largest sample within `qrs_radius`
samples) of the filtered signal.
- Inspect the local maxima until `n_calib_beats` beats are
found:
- Calculate the cross-correlation between a ricker wavelet of
length `qrs_width`, and the filtered signal segment centered
around the local maximum.
- If the cross-correlation exceeds 0.6, classify it as a beat.
- Use the beats to initialize the previously described
parameters.
- If the system fails to find enough beats, the default
parameters will be used instead. See the docstring of
`XQRS._set_default_init_params` for detauls.
Parameters
----------
n_calib_beats : int, optional
Number of calibration beats to detect for learning
|
def _convert_default_value(self, default):
"""Convert the passed default value to binary.
The default value (if passed) may be specified as either a `bytes`
object or a python int or list of ints. If an int or list of ints is
passed, it is converted to binary. Otherwise, the raw binary data is
used.
If you pass a bytes object with python_type as True, do not null terminate
it, an additional null termination will be added.
Passing a unicode string is only allowed if as_string is True and it
will be encoded as utf-8 and null terminated for use as a default value.
"""
if default is None:
return None
if isinstance(default, str):
if self.special_type == 'string':
return default.encode('utf-8') + b'\0'
raise DataError("You can only pass a unicode string if you are declaring a string type config variable", default=default)
if isinstance(default, (bytes, bytearray)):
if self.special_type == 'string' and isinstance(default, bytes):
default += b'\0'
return default
if isinstance(default, int):
default = [default]
format_string = "<" + (self.base_type*len(default))
return struct.pack(format_string, *default)
|
Convert the passed default value to binary.
The default value (if passed) may be specified as either a `bytes`
object or a python int or list of ints. If an int or list of ints is
passed, it is converted to binary. Otherwise, the raw binary data is
used.
If you pass a bytes object with python_type as True, do not null terminate
it, an additional null termination will be added.
Passing a unicode string is only allowed if as_string is True and it
will be encoded as utf-8 and null terminated for use as a default value.
|
def panels(self):
"""
Add 2 panels to the figure, top for signal and bottom for gene models
"""
ax1 = self.fig.add_subplot(211)
ax2 = self.fig.add_subplot(212, sharex=ax1)
return (ax2, self.gene_panel), (ax1, self.signal_panel)
|
Add 2 panels to the figure, top for signal and bottom for gene models
|
def add_document(self, question, answer):
"""Add question answer set to DB.
:param question: A question to an answer
:type question: :class:`str`
:param answer: An answer to a question
:type answer: :class:`str`
"""
question = question.strip()
answer = answer.strip()
session = self.Session()
if session.query(Document) \
.filter_by(text=question, answer=answer).count():
logger.info('Already here: {0} -> {1}'.format(question, answer))
return
logger.info('add document: {0} -> {1}'.format(question, answer))
grams = self._get_grams(session, question, make=True)
doc = Document(question, answer)
doc.grams = list(grams)
self._recalc_idfs(session, grams)
session.add(doc)
session.commit()
|
Add question answer set to DB.
:param question: A question to an answer
:type question: :class:`str`
:param answer: An answer to a question
:type answer: :class:`str`
|
def should_execute(self, workload):
"""
If we have been suspended by i3bar, only execute those modules that set the keep_alive flag to a truthy
value. See the docs on the suspend_signal_handler method of the io module for more information.
"""
if not self._suspended.is_set():
return True
workload = unwrap_workload(workload)
return hasattr(workload, 'keep_alive') and getattr(workload, 'keep_alive')
|
If we have been suspended by i3bar, only execute those modules that set the keep_alive flag to a truthy
value. See the docs on the suspend_signal_handler method of the io module for more information.
|
def _GetMemberDataTypeMaps(self, data_type_definition, data_type_map_cache):
"""Retrieves the member data type maps.
Args:
data_type_definition (DataTypeDefinition): data type definition.
data_type_map_cache (dict[str, DataTypeMap]): cached data type maps.
Returns:
list[DataTypeMap]: member data type maps.
Raises:
FormatError: if the data type maps cannot be determined from the data
type definition.
"""
if not data_type_definition:
raise errors.FormatError('Missing data type definition')
members = getattr(data_type_definition, 'members', None)
if not members:
raise errors.FormatError('Invalid data type definition missing members')
data_type_maps = []
members_data_size = 0
for member_definition in members:
if isinstance(member_definition, data_types.MemberDataTypeDefinition):
member_definition = member_definition.member_data_type_definition
if (data_type_definition.byte_order != definitions.BYTE_ORDER_NATIVE and
member_definition.byte_order == definitions.BYTE_ORDER_NATIVE):
# Make a copy of the data type definition where byte-order can be
# safely changed.
member_definition = copy.copy(member_definition)
member_definition.name = '_{0:s}_{1:s}'.format(
data_type_definition.name, member_definition.name)
member_definition.byte_order = data_type_definition.byte_order
if member_definition.name not in data_type_map_cache:
data_type_map = DataTypeMapFactory.CreateDataTypeMapByType(
member_definition)
data_type_map_cache[member_definition.name] = data_type_map
data_type_map = data_type_map_cache[member_definition.name]
if members_data_size is not None:
if not isinstance(member_definition, data_types.PaddingDefinition):
byte_size = member_definition.GetByteSize()
else:
_, byte_size = divmod(
members_data_size, member_definition.alignment_size)
if byte_size > 0:
byte_size = member_definition.alignment_size - byte_size
data_type_map.byte_size = byte_size
if byte_size is None:
members_data_size = None
else:
members_data_size += byte_size
data_type_maps.append(data_type_map)
return data_type_maps
|
Retrieves the member data type maps.
Args:
data_type_definition (DataTypeDefinition): data type definition.
data_type_map_cache (dict[str, DataTypeMap]): cached data type maps.
Returns:
list[DataTypeMap]: member data type maps.
Raises:
FormatError: if the data type maps cannot be determined from the data
type definition.
|
def make_rw(obj: Any):
"""
Copy a RO object into a RW structure made with standard Python classes.
WARNING there is no protection against recursion.
"""
if isinstance(obj, RoDict):
return {k: make_rw(v) for k, v in obj.items()}
elif isinstance(obj, RoList):
return [make_rw(x) for x in obj]
else:
return obj
|
Copy a RO object into a RW structure made with standard Python classes.
WARNING there is no protection against recursion.
|
def get_policy_config(platform,
filters=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d'):
'''
Return the configuration of the whole policy.
platform
The name of the Capirca platform.
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
CLI Example:
.. code-block:: bash
salt '*' capirca.get_policy_config juniper pillar_key=netacl
Output Example:
.. code-block:: text
firewall {
family inet {
replace:
/*
** $Id:$
** $Date:$
** $Revision:$
**
*/
filter my-filter {
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
firewall {
family inet {
replace:
/*
** $Id:$
** $Date:$
** $Revision:$
**
*/
filter my-other-filter {
interface-specific;
term dummy-term {
from {
protocol [ tcp udp ];
}
then {
reject;
}
}
}
}
}
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
options:
- not-interface-specific
terms:
- my-term:
source_port: [1234, 1235]
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- my-other-filter:
terms:
- dummy-term:
protocol:
- tcp
- udp
action: reject
'''
if not filters:
filters = []
if merge_pillar and not only_lower_merge:
# the pillar key for the policy config is the `pillar_key` itself
policy_pillar_cfg = _get_pillar_cfg(pillar_key,
saltenv=saltenv,
pillarenv=pillarenv)
# now, let's merge everything witht the pillar data
# again, this will not remove any extra filters/terms
# but it will merge with the pillar data
# if this behaviour is not wanted, the user can set `merge_pillar` as `False`
filters = _merge_list_of_dict(filters, policy_pillar_cfg, prepend=prepend)
policy_object = _get_policy_object(platform,
filters=filters,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar)
policy_text = six.text_type(policy_object)
return _revision_tag(policy_text,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
|
Return the configuration of the whole policy.
platform
The name of the Capirca platform.
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
CLI Example:
.. code-block:: bash
salt '*' capirca.get_policy_config juniper pillar_key=netacl
Output Example:
.. code-block:: text
firewall {
family inet {
replace:
/*
** $Id:$
** $Date:$
** $Revision:$
**
*/
filter my-filter {
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
firewall {
family inet {
replace:
/*
** $Id:$
** $Date:$
** $Revision:$
**
*/
filter my-other-filter {
interface-specific;
term dummy-term {
from {
protocol [ tcp udp ];
}
then {
reject;
}
}
}
}
}
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
options:
- not-interface-specific
terms:
- my-term:
source_port: [1234, 1235]
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- my-other-filter:
terms:
- dummy-term:
protocol:
- tcp
- udp
action: reject
|
def td_sp(points, speed_threshold):
""" Top-Down Speed-Based Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
speed_threshold (float): max speed error, in km/h
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory
"""
if len(points) <= 2:
return points
else:
max_speed_threshold = 0
found_index = 0
for i in range(1, len(points)-1):
dt1 = time_dist(points[i], points[i-1])
if dt1 == 0:
dt1 = 0.000000001
vim = loc_dist(points[i], points[i-1]) / dt1
dt2 = time_dist(points[i+1], points[i])
if dt2 == 0:
dt2 = 0.000000001
vi_ = loc_dist(points[i+1], points[i]) / dt2
if abs(vi_ - vim) > max_speed_threshold:
max_speed_threshold = abs(vi_ - vim)
found_index = i
if max_speed_threshold > speed_threshold:
one = td_sp(points[:found_index], speed_threshold)
two = td_sp(points[found_index:], speed_threshold)
one.extend(two)
return one
else:
return [points[0], points[-1]]
|
Top-Down Speed-Based Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
speed_threshold (float): max speed error, in km/h
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory
|
def delete_bucket():
"""
Delete S3 Bucket
"""
args = parser.parse_args
s3_bucket(args.aws_access_key_id, args.aws_secret_access_key, args.bucket_name)().delete()
|
Delete S3 Bucket
|
def construct(cls, name: str, declared_fields: typing.List[tuple]):
"""
Utility method packaged along with the factory to be able to construct Request Object
classes on the fly.
Example:
.. code-block:: python
UserShowRequestObject = Factory.create_request_object(
'CreateRequestObject',
[('identifier', int, {'required': True}),
('name', str, {'required': True}),
('desc', str, {'default': 'Blah'})])
And then create a request object like so:
.. code-block:: python
request_object = UserShowRequestObject.from_dict(
{'identifier': 112,
'name': 'Jane',
'desc': "Doer is not Doe"})
The third tuple element is a `dict` of the form: {'required': True, 'default': 'John'}
* ``required`` is False by default, so ``{required: False, default: 'John'}`` and \
``{default: 'John'}`` evaluate to the same field definition
* ``default`` is a *concrete* value of the correct type
"""
# FIXME Refactor this method to make it simpler
@classmethod
def from_dict(cls, adict):
"""Validate and initialize a Request Object"""
invalid_req = InvalidRequestObject()
values = {}
for item in fields(cls):
value = None
if item.metadata and 'required' in item.metadata and item.metadata['required']:
if item.name not in adict or adict.get(item.name) is None:
invalid_req.add_error(item.name, 'is required')
else:
value = adict[item.name]
elif item.name in adict:
value = adict[item.name]
elif item.default:
value = item.default
try:
if item.type not in [typing.Any, 'typing.Any'] and value is not None:
if item.type in [int, float, str, bool, list, dict, tuple,
datetime.date, datetime.datetime]:
value = item.type(value)
else:
if not (isinstance(value, item.type) or issubclass(value, item.type)):
invalid_req.add_error(
item.name,
'{} should be of type {}'.format(item.name, item.type))
except Exception:
invalid_req.add_error(
item.name,
'Value {} for {} is invalid'.format(value, item.name))
values[item.name] = value
# Return errors, if any, instead of a request object
if invalid_req.has_errors:
return invalid_req
# Return the initialized Request Object instance
return cls(**values)
formatted_fields = cls._format_fields(declared_fields)
dc = make_dataclass(name, formatted_fields,
namespace={'from_dict': from_dict, 'is_valid': True})
return dc
|
Utility method packaged along with the factory to be able to construct Request Object
classes on the fly.
Example:
.. code-block:: python
UserShowRequestObject = Factory.create_request_object(
'CreateRequestObject',
[('identifier', int, {'required': True}),
('name', str, {'required': True}),
('desc', str, {'default': 'Blah'})])
And then create a request object like so:
.. code-block:: python
request_object = UserShowRequestObject.from_dict(
{'identifier': 112,
'name': 'Jane',
'desc': "Doer is not Doe"})
The third tuple element is a `dict` of the form: {'required': True, 'default': 'John'}
* ``required`` is False by default, so ``{required: False, default: 'John'}`` and \
``{default: 'John'}`` evaluate to the same field definition
* ``default`` is a *concrete* value of the correct type
|
def get_type_properties(self, property_obj, name, additional_prop=False):
"""
Extend parents 'Get internal properties of property'-method
"""
property_type, property_format, property_dict = \
super(Schema, self).get_type_properties(property_obj, name, additional_prop=additional_prop)
_schema = self.storage.get(property_type)
if _schema and ('additionalProperties' in property_obj):
_property_type, _property_format, _property_dict = super(Schema, self).get_type_properties(
property_obj['additionalProperties'], '{}-mapped'.format(name), additional_prop=True)
if _property_type not in PRIMITIVE_TYPES:
SchemaMapWrapper.wrap(self.storage.get(_property_type))
_schema.nested_schemas.add(_property_type)
else:
_schema.type_format = _property_type
return property_type, property_format, property_dict
|
Extend parents 'Get internal properties of property'-method
|
def schemaValidateOneElement(self, elem):
"""Validate a branch of a tree, starting with the given @elem. """
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlSchemaValidateOneElement(self._o, elem__o)
return ret
|
Validate a branch of a tree, starting with the given @elem.
|
def _create_emitter(self, event):
"""Create a method that emits an event of the same name."""
if not hasattr(self, event):
setattr(self, event,
lambda *args, **kwargs: self.emit(event, *args, **kwargs))
|
Create a method that emits an event of the same name.
|
def call(self, op, args):
"""
Calls operation `op` on args `args` with this backend.
:return: A backend object representing the result.
"""
converted = self.convert_list(args)
return self._call(op, converted)
|
Calls operation `op` on args `args` with this backend.
:return: A backend object representing the result.
|
def vinet_v(p, v0, k0, k0p, min_strain=0.01):
"""
find volume at given pressure
:param p: pressure in GPa
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param min_strain: defining minimum v/v0 value to search volume for
:return: unit cell volume at high pressure in A^3
:note: wrapper function vetorizing vinet_v_single
"""
if isuncertainties([p, v0, k0, k0p]):
f_u = np.vectorize(uct.wrap(vinet_v_single), excluded=[1, 2, 3, 4])
return f_u(p, v0, k0, k0p, min_strain=min_strain)
else:
f_v = np.vectorize(vinet_v_single, excluded=[1, 2, 3, 4])
return f_v(p, v0, k0, k0p, min_strain=min_strain)
|
find volume at given pressure
:param p: pressure in GPa
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:param min_strain: defining minimum v/v0 value to search volume for
:return: unit cell volume at high pressure in A^3
:note: wrapper function vetorizing vinet_v_single
|
def popup(self, title, callfn, initialdir=None, filename=None):
"""Let user select and load file."""
self.cb = callfn
self.filew.set_title(title)
if initialdir:
self.filew.set_current_folder(initialdir)
if filename:
#self.filew.set_filename(filename)
self.filew.set_current_name(filename)
self.filew.show()
|
Let user select and load file.
|
def web_services_from_str(
list_splitter_fn=ujson.loads,
):
"""
parameters:
list_splitter_fn - a function that will take the json compatible string
rerpesenting a list of mappings.
"""
# -------------------------------------------------------------------------
def class_list_converter(collector_services_str):
"""This function becomes the actual converter used by configman to
take a string and convert it into the nested sequence of Namespaces,
one for each class in the list. It does this by creating a proxy
class stuffed with its own 'required_config' that's dynamically
generated."""
if isinstance(collector_services_str, basestring):
all_collector_services = list_splitter_fn(collector_services_str)
else:
raise TypeError('must be derivative of a basestring')
# =====================================================================
class InnerClassList(RequiredConfig):
"""This nested class is a proxy list for the classes. It collects
all the config requirements for the listed classes and places them
each into their own Namespace.
"""
# we're dynamically creating a class here. The following block of
# code is actually adding class level attributes to this new class
# 1st requirement for configman
required_config = Namespace()
# to help the programmer know what Namespaces we added
subordinate_namespace_names = []
# for display
original_input = collector_services_str.replace('\n', '\\n')
# for each class in the class list
service_list = []
for namespace_index, collector_service_element in enumerate(
all_collector_services
):
service_name = collector_service_element['name']
service_uri = collector_service_element['uri']
service_implementation_class = class_converter(
collector_service_element['service_implementation_class']
)
service_list.append(
(
service_name,
service_uri,
service_implementation_class,
)
)
subordinate_namespace_names.append(service_name)
# create the new Namespace
required_config.namespace(service_name)
a_class_namespace = required_config[service_name]
a_class_namespace.add_option(
"service_implementation_class",
doc='fully qualified classname for a class that implements'
'the action associtated with the URI',
default=service_implementation_class,
from_string_converter=class_converter,
likely_to_be_changed=True,
)
a_class_namespace.add_option(
"uri",
doc='uri for this service',
default=service_uri,
likely_to_be_changed=True,
)
@classmethod
def to_str(cls):
"""this method takes this inner class object and turns it back
into the original string of classnames. This is used
primarily as for the output of the 'help' option"""
return "'%s'" % cls.original_input
return InnerClassList # result of class_list_converter
return class_list_converter
|
parameters:
list_splitter_fn - a function that will take the json compatible string
rerpesenting a list of mappings.
|
def args_ok(self, options, args):
"""Check for conflicts and problems in the options.
Returns True if everything is ok, or False if not.
"""
for i in ['erase', 'execute']:
for j in ['annotate', 'html', 'report', 'combine']:
if (i in options.actions) and (j in options.actions):
self.help_fn("You can't specify the '%s' and '%s' "
"options at the same time." % (i, j))
return False
if not options.actions:
self.help_fn(
"You must specify at least one of -e, -x, -c, -r, -a, or -b."
)
return False
args_allowed = (
'execute' in options.actions or
'annotate' in options.actions or
'html' in options.actions or
'debug' in options.actions or
'report' in options.actions or
'xml' in options.actions
)
if not args_allowed and args:
self.help_fn("Unexpected arguments: %s" % " ".join(args))
return False
if 'execute' in options.actions and not args:
self.help_fn("Nothing to do.")
return False
return True
|
Check for conflicts and problems in the options.
Returns True if everything is ok, or False if not.
|
def from_config(self, k, v):
"""
Hook method that allows converting values from the dictionary.
:param k: the key in the dictionary
:type k: str
:param v: the value
:type v: object
:return: the potentially parsed value
:rtype: object
"""
if k == "setup":
return from_commandline(v, classname=to_commandline(datagen.DataGenerator()))
return super(DataGenerator, self).from_config(k, v)
|
Hook method that allows converting values from the dictionary.
:param k: the key in the dictionary
:type k: str
:param v: the value
:type v: object
:return: the potentially parsed value
:rtype: object
|
def run_check200(_):
'''
Running the script.
'''
tstr = ''
idx = 1
for kind in config.router_post.keys():
posts = MPost.query_all(kind=kind, limit=20000)
for post in posts:
the_url0 = '{site_url}/{kind_url}/{uid}'.format(
site_url=config.SITE_CFG['site_url'],
kind_url=config.router_post[post.kind],
uid=post.uid)
the_url = '{site_url}/{kind_url}/_edit/{uid}'.format(
site_url=config.SITE_CFG['site_url'],
kind_url=config.router_post[post.kind],
uid=post.uid)
req = requests.get(the_url0)
if req.status_code == 200:
pass
else:
print(the_url0)
tstr = tstr + DT_STR.format(idx=str(idx).zfill(2), url0=the_url0, code=req.status_code, edit_link=the_url)
idx = idx + 1
time_local = time.localtime(timestamp())
with open('xx200_{d}.html'.format(d=str(time.strftime("%Y_%m_%d", time_local))), 'w') as fileo:
fileo.write(HTML_TMPL.format(cnt=tstr))
print('Checking 200 finished.')
|
Running the script.
|
def pool_full(self, session):
"""
Returns a boolean as to whether the slot pool has room for this
task to run
"""
if not self.task.pool:
return False
pool = (
session
.query(Pool)
.filter(Pool.pool == self.task.pool)
.first()
)
if not pool:
return False
open_slots = pool.open_slots(session=session)
return open_slots <= 0
|
Returns a boolean as to whether the slot pool has room for this
task to run
|
def _instantiate_layers(self):
"""Instantiates all the convolutional modules used in the network."""
# Here we are entering the module's variable scope to name our submodules
# correctly (not to create variables). As such it's safe to not check
# whether we're in the same graph. This is important if we're constructing
# the module in one graph and connecting it in another (e.g. with `defun`
# the module is created in some default graph, and connected to a capturing
# graph in order to turn it into a graph function).
with self._enter_variable_scope(check_same_graph=False):
self._layers = tuple(conv.Conv2D(name="conv_2d_{}".format(i), # pylint: disable=g-complex-comprehension
output_channels=self._output_channels[i],
kernel_shape=self._kernel_shapes[i],
stride=self._strides[i],
rate=self._rates[i],
padding=self._paddings[i],
use_bias=self._use_bias[i],
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
data_format=self._data_format)
for i in xrange(self._num_layers))
|
Instantiates all the convolutional modules used in the network.
|
def find_global(self, pattern):
"""
Searches for the pattern in the whole process memory space and returns the first occurrence.
This is exhaustive!
"""
pos_s = self.reader.search(pattern)
if len(pos_s) == 0:
return -1
return pos_s[0]
|
Searches for the pattern in the whole process memory space and returns the first occurrence.
This is exhaustive!
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.