code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _htpasswd(username, password, **kwargs):
'''
Provide authentication via Apache-style htpasswd files
'''
from passlib.apache import HtpasswdFile
pwfile = HtpasswdFile(kwargs['filename'])
# passlib below version 1.6 uses 'verify' function instead of 'check_password'
if salt.utils.versions.version_cmp(kwargs['passlib_version'], '1.6') < 0:
return pwfile.verify(username, password)
else:
return pwfile.check_password(username, password) | Provide authentication via Apache-style htpasswd files | Below is the the instruction that describes the task:
### Input:
Provide authentication via Apache-style htpasswd files
### Response:
def _htpasswd(username, password, **kwargs):
'''
Provide authentication via Apache-style htpasswd files
'''
from passlib.apache import HtpasswdFile
pwfile = HtpasswdFile(kwargs['filename'])
# passlib below version 1.6 uses 'verify' function instead of 'check_password'
if salt.utils.versions.version_cmp(kwargs['passlib_version'], '1.6') < 0:
return pwfile.verify(username, password)
else:
return pwfile.check_password(username, password) |
def to_png_file(self, fname: str):
"""
write a '.png' file.
"""
cmd = pipes.Template()
cmd.append('dot -Tpng > %s' % fname, '-.')
with cmd.open('pipefile', 'w') as f:
f.write(self.to_dot()) | write a '.png' file. | Below is the the instruction that describes the task:
### Input:
write a '.png' file.
### Response:
def to_png_file(self, fname: str):
"""
write a '.png' file.
"""
cmd = pipes.Template()
cmd.append('dot -Tpng > %s' % fname, '-.')
with cmd.open('pipefile', 'w') as f:
f.write(self.to_dot()) |
def load(self, coll):
"""Load and receive the metadata associated with a collection.
If the metadata for the collection is not cached yet its metadata file is read in and stored.
If the cache has seen the collection before the mtime of the metadata file is checked and if it is more recent
than the cached time, the cache is updated and returned otherwise the cached version is returned.
:param str coll: Name of a collection
:return: The cached metadata for a collection
:rtype: dict
"""
path = self.template_str.format(coll=coll)
try:
mtime = os.path.getmtime(path)
obj = self.cache.get(path)
except:
return {}
if not obj:
return self.store_new(coll, path, mtime)
cached_mtime, data = obj
if mtime == cached_mtime == mtime:
return obj
return self.store_new(coll, path, mtime) | Load and receive the metadata associated with a collection.
If the metadata for the collection is not cached yet its metadata file is read in and stored.
If the cache has seen the collection before the mtime of the metadata file is checked and if it is more recent
than the cached time, the cache is updated and returned otherwise the cached version is returned.
:param str coll: Name of a collection
:return: The cached metadata for a collection
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Load and receive the metadata associated with a collection.
If the metadata for the collection is not cached yet its metadata file is read in and stored.
If the cache has seen the collection before the mtime of the metadata file is checked and if it is more recent
than the cached time, the cache is updated and returned otherwise the cached version is returned.
:param str coll: Name of a collection
:return: The cached metadata for a collection
:rtype: dict
### Response:
def load(self, coll):
"""Load and receive the metadata associated with a collection.
If the metadata for the collection is not cached yet its metadata file is read in and stored.
If the cache has seen the collection before the mtime of the metadata file is checked and if it is more recent
than the cached time, the cache is updated and returned otherwise the cached version is returned.
:param str coll: Name of a collection
:return: The cached metadata for a collection
:rtype: dict
"""
path = self.template_str.format(coll=coll)
try:
mtime = os.path.getmtime(path)
obj = self.cache.get(path)
except:
return {}
if not obj:
return self.store_new(coll, path, mtime)
cached_mtime, data = obj
if mtime == cached_mtime == mtime:
return obj
return self.store_new(coll, path, mtime) |
def search_in_rubric(self, **kwargs):
"""Firms search in rubric
http://api.2gis.ru/doc/firms/searches/searchinrubric/
"""
point = kwargs.pop('point', False)
if point:
kwargs['point'] = '%s,%s' % point
bound = kwargs.pop('bound', False)
if bound:
kwargs['bound[point1]'] = bound[0]
kwargs['bound[point2]'] = bound[1]
filters = kwargs.pop('filters', False)
if filters:
for k, v in filters.items():
kwargs['filters[%s]' % k] = v
return self._search_in_rubric(**kwargs) | Firms search in rubric
http://api.2gis.ru/doc/firms/searches/searchinrubric/ | Below is the the instruction that describes the task:
### Input:
Firms search in rubric
http://api.2gis.ru/doc/firms/searches/searchinrubric/
### Response:
def search_in_rubric(self, **kwargs):
"""Firms search in rubric
http://api.2gis.ru/doc/firms/searches/searchinrubric/
"""
point = kwargs.pop('point', False)
if point:
kwargs['point'] = '%s,%s' % point
bound = kwargs.pop('bound', False)
if bound:
kwargs['bound[point1]'] = bound[0]
kwargs['bound[point2]'] = bound[1]
filters = kwargs.pop('filters', False)
if filters:
for k, v in filters.items():
kwargs['filters[%s]' % k] = v
return self._search_in_rubric(**kwargs) |
def hash_dir(path):
"""Write directory at path to Git index, return its SHA1 as a string."""
dir_hash = {}
for root, dirs, files in os.walk(path, topdown=False):
f_hash = ((f, hash_file(join(root, f))) for f in files)
d_hash = ((d, dir_hash[join(root, d)]) for d in dirs)
# split+join normalizes paths on Windows (note the imports)
dir_hash[join(*split(root))] = _mktree(f_hash, d_hash)
return dir_hash[path] | Write directory at path to Git index, return its SHA1 as a string. | Below is the the instruction that describes the task:
### Input:
Write directory at path to Git index, return its SHA1 as a string.
### Response:
def hash_dir(path):
"""Write directory at path to Git index, return its SHA1 as a string."""
dir_hash = {}
for root, dirs, files in os.walk(path, topdown=False):
f_hash = ((f, hash_file(join(root, f))) for f in files)
d_hash = ((d, dir_hash[join(root, d)]) for d in dirs)
# split+join normalizes paths on Windows (note the imports)
dir_hash[join(*split(root))] = _mktree(f_hash, d_hash)
return dir_hash[path] |
def namespacereveal_sanity_check( namespace_id, version, lifetime, coeff, base, bucket_exponents, nonalpha_discount, no_vowel_discount ):
"""
Verify the validity of a namespace reveal.
Return True if valid
Raise an Exception if not valid.
"""
# sanity check
if not is_b40( namespace_id ) or "+" in namespace_id or namespace_id.count(".") > 0:
raise Exception("Namespace ID '%s' has non-base-38 characters" % namespace_id)
if len(namespace_id) > LENGTHS['blockchain_id_namespace_id']:
raise Exception("Invalid namespace ID length for '%s' (expected length between 1 and %s)" % (namespace_id, LENGTHS['blockchain_id_namespace_id']))
if version not in [NAMESPACE_VERSION_PAY_TO_BURN, NAMESPACE_VERSION_PAY_TO_CREATOR, NAMESPACE_VERSION_PAY_WITH_STACKS]:
raise Exception("Invalid namespace version bits {:x}".format(version))
if lifetime < 0 or lifetime > (2**32 - 1):
lifetime = NAMESPACE_LIFE_INFINITE
if coeff < 0 or coeff > 255:
raise Exception("Invalid cost multiplier %s: must be in range [0, 256)" % coeff)
if base < 0 or base > 255:
raise Exception("Invalid base price %s: must be in range [0, 256)" % base)
if type(bucket_exponents) != list:
raise Exception("Bucket exponents must be a list")
if len(bucket_exponents) != 16:
raise Exception("Exactly 16 buckets required")
for i in xrange(0, len(bucket_exponents)):
if bucket_exponents[i] < 0 or bucket_exponents[i] > 15:
raise Exception("Invalid bucket exponent %s (must be in range [0, 16)" % bucket_exponents[i])
if nonalpha_discount <= 0 or nonalpha_discount > 15:
raise Exception("Invalid non-alpha discount %s: must be in range [0, 16)" % nonalpha_discount)
if no_vowel_discount <= 0 or no_vowel_discount > 15:
raise Exception("Invalid no-vowel discount %s: must be in range [0, 16)" % no_vowel_discount)
return True | Verify the validity of a namespace reveal.
Return True if valid
Raise an Exception if not valid. | Below is the the instruction that describes the task:
### Input:
Verify the validity of a namespace reveal.
Return True if valid
Raise an Exception if not valid.
### Response:
def namespacereveal_sanity_check( namespace_id, version, lifetime, coeff, base, bucket_exponents, nonalpha_discount, no_vowel_discount ):
"""
Verify the validity of a namespace reveal.
Return True if valid
Raise an Exception if not valid.
"""
# sanity check
if not is_b40( namespace_id ) or "+" in namespace_id or namespace_id.count(".") > 0:
raise Exception("Namespace ID '%s' has non-base-38 characters" % namespace_id)
if len(namespace_id) > LENGTHS['blockchain_id_namespace_id']:
raise Exception("Invalid namespace ID length for '%s' (expected length between 1 and %s)" % (namespace_id, LENGTHS['blockchain_id_namespace_id']))
if version not in [NAMESPACE_VERSION_PAY_TO_BURN, NAMESPACE_VERSION_PAY_TO_CREATOR, NAMESPACE_VERSION_PAY_WITH_STACKS]:
raise Exception("Invalid namespace version bits {:x}".format(version))
if lifetime < 0 or lifetime > (2**32 - 1):
lifetime = NAMESPACE_LIFE_INFINITE
if coeff < 0 or coeff > 255:
raise Exception("Invalid cost multiplier %s: must be in range [0, 256)" % coeff)
if base < 0 or base > 255:
raise Exception("Invalid base price %s: must be in range [0, 256)" % base)
if type(bucket_exponents) != list:
raise Exception("Bucket exponents must be a list")
if len(bucket_exponents) != 16:
raise Exception("Exactly 16 buckets required")
for i in xrange(0, len(bucket_exponents)):
if bucket_exponents[i] < 0 or bucket_exponents[i] > 15:
raise Exception("Invalid bucket exponent %s (must be in range [0, 16)" % bucket_exponents[i])
if nonalpha_discount <= 0 or nonalpha_discount > 15:
raise Exception("Invalid non-alpha discount %s: must be in range [0, 16)" % nonalpha_discount)
if no_vowel_discount <= 0 or no_vowel_discount > 15:
raise Exception("Invalid no-vowel discount %s: must be in range [0, 16)" % no_vowel_discount)
return True |
def run_local(
context: cli.CommandContext,
project: projects.Project,
project_steps: typing.List[projects.ProjectStep],
force: bool,
continue_after: bool,
single_step: bool,
limit: int,
print_status: bool,
skip_library_reload: bool = False
) -> environ.Response:
"""
Execute the run command locally within this cauldron environment
:param context:
:param project:
:param project_steps:
:param force:
:param continue_after:
:param single_step:
:param limit:
:param print_status:
:param skip_library_reload:
Whether or not to skip reloading all project libraries prior to
execution of the project. By default this is False in which case
the project libraries are reloaded prior to execution.
:return:
"""
skip_reload = (
skip_library_reload
or environ.modes.has(environ.modes.TESTING)
)
if not skip_reload:
runner.reload_libraries()
environ.log_header('RUNNING', 5)
steps_run = []
if single_step:
# If the user specifies the single step flag, only run one step. Force
# the step to be run if they specified it explicitly
ps = project_steps[0] if len(project_steps) > 0 else None
force = force or (single_step and bool(ps is not None))
steps_run = runner.section(
response=context.response,
project=project,
starting=ps,
limit=1,
force=force
)
elif continue_after or len(project_steps) == 0:
# If the continue after flag is set, start with the specified step
# and run the rest of the project after that. Or, if no steps were
# specified, run the entire project with the specified flags.
ps = project_steps[0] if len(project_steps) > 0 else None
steps_run = runner.complete(
context.response,
project,
ps,
force=force,
limit=limit
)
else:
for ps in project_steps:
steps_run += runner.section(
response=context.response,
project=project,
starting=ps,
limit=max(1, limit),
force=force or (limit < 1 and len(project_steps) < 2),
skips=steps_run + []
)
project.write()
environ.log_blanks()
step_changes = []
for ps in steps_run:
step_changes.append(dict(
name=ps.definition.name,
action='updated',
step=writing.step_writer.serialize(ps)._asdict()
))
context.response.update(step_changes=step_changes)
if print_status or context.response.failed:
context.response.update(project=project.kernel_serialize())
return context.response | Execute the run command locally within this cauldron environment
:param context:
:param project:
:param project_steps:
:param force:
:param continue_after:
:param single_step:
:param limit:
:param print_status:
:param skip_library_reload:
Whether or not to skip reloading all project libraries prior to
execution of the project. By default this is False in which case
the project libraries are reloaded prior to execution.
:return: | Below is the the instruction that describes the task:
### Input:
Execute the run command locally within this cauldron environment
:param context:
:param project:
:param project_steps:
:param force:
:param continue_after:
:param single_step:
:param limit:
:param print_status:
:param skip_library_reload:
Whether or not to skip reloading all project libraries prior to
execution of the project. By default this is False in which case
the project libraries are reloaded prior to execution.
:return:
### Response:
def run_local(
context: cli.CommandContext,
project: projects.Project,
project_steps: typing.List[projects.ProjectStep],
force: bool,
continue_after: bool,
single_step: bool,
limit: int,
print_status: bool,
skip_library_reload: bool = False
) -> environ.Response:
"""
Execute the run command locally within this cauldron environment
:param context:
:param project:
:param project_steps:
:param force:
:param continue_after:
:param single_step:
:param limit:
:param print_status:
:param skip_library_reload:
Whether or not to skip reloading all project libraries prior to
execution of the project. By default this is False in which case
the project libraries are reloaded prior to execution.
:return:
"""
skip_reload = (
skip_library_reload
or environ.modes.has(environ.modes.TESTING)
)
if not skip_reload:
runner.reload_libraries()
environ.log_header('RUNNING', 5)
steps_run = []
if single_step:
# If the user specifies the single step flag, only run one step. Force
# the step to be run if they specified it explicitly
ps = project_steps[0] if len(project_steps) > 0 else None
force = force or (single_step and bool(ps is not None))
steps_run = runner.section(
response=context.response,
project=project,
starting=ps,
limit=1,
force=force
)
elif continue_after or len(project_steps) == 0:
# If the continue after flag is set, start with the specified step
# and run the rest of the project after that. Or, if no steps were
# specified, run the entire project with the specified flags.
ps = project_steps[0] if len(project_steps) > 0 else None
steps_run = runner.complete(
context.response,
project,
ps,
force=force,
limit=limit
)
else:
for ps in project_steps:
steps_run += runner.section(
response=context.response,
project=project,
starting=ps,
limit=max(1, limit),
force=force or (limit < 1 and len(project_steps) < 2),
skips=steps_run + []
)
project.write()
environ.log_blanks()
step_changes = []
for ps in steps_run:
step_changes.append(dict(
name=ps.definition.name,
action='updated',
step=writing.step_writer.serialize(ps)._asdict()
))
context.response.update(step_changes=step_changes)
if print_status or context.response.failed:
context.response.update(project=project.kernel_serialize())
return context.response |
def read_iiasa(name, meta=False, **kwargs):
"""
Query an IIASA database. See Connection.query() for more documentation
Parameters
----------
name : str
A valid IIASA database name, see pyam.iiasa.valid_connection_names()
meta : bool or list of strings
If not False, also include metadata indicators (or subset if provided).
kwargs :
Arguments for pyam.iiasa.Connection.query()
"""
conn = Connection(name)
# data
df = conn.query(**kwargs)
df = IamDataFrame(df)
# metadata
if meta:
mdf = conn.metadata()
# only data for models/scenarios in df
mdf = mdf[mdf.model.isin(df['model'].unique()) &
mdf.scenario.isin(df['scenario'].unique())]
# get subset of data if meta is a list
if islistable(meta):
mdf = mdf[['model', 'scenario'] + meta]
mdf = mdf.set_index(['model', 'scenario'])
# we have to loop here because `set_meta()` can only take series
for col in mdf:
df.set_meta(mdf[col])
return df | Query an IIASA database. See Connection.query() for more documentation
Parameters
----------
name : str
A valid IIASA database name, see pyam.iiasa.valid_connection_names()
meta : bool or list of strings
If not False, also include metadata indicators (or subset if provided).
kwargs :
Arguments for pyam.iiasa.Connection.query() | Below is the the instruction that describes the task:
### Input:
Query an IIASA database. See Connection.query() for more documentation
Parameters
----------
name : str
A valid IIASA database name, see pyam.iiasa.valid_connection_names()
meta : bool or list of strings
If not False, also include metadata indicators (or subset if provided).
kwargs :
Arguments for pyam.iiasa.Connection.query()
### Response:
def read_iiasa(name, meta=False, **kwargs):
"""
Query an IIASA database. See Connection.query() for more documentation
Parameters
----------
name : str
A valid IIASA database name, see pyam.iiasa.valid_connection_names()
meta : bool or list of strings
If not False, also include metadata indicators (or subset if provided).
kwargs :
Arguments for pyam.iiasa.Connection.query()
"""
conn = Connection(name)
# data
df = conn.query(**kwargs)
df = IamDataFrame(df)
# metadata
if meta:
mdf = conn.metadata()
# only data for models/scenarios in df
mdf = mdf[mdf.model.isin(df['model'].unique()) &
mdf.scenario.isin(df['scenario'].unique())]
# get subset of data if meta is a list
if islistable(meta):
mdf = mdf[['model', 'scenario'] + meta]
mdf = mdf.set_index(['model', 'scenario'])
# we have to loop here because `set_meta()` can only take series
for col in mdf:
df.set_meta(mdf[col])
return df |
def select(self, *features):
"""
selects the features given as string
e.g
passing 'hello' and 'world' will result in imports of
'hello' and 'world'. Then, if possible 'hello.feature'
and 'world.feature' are imported and select is called
in each feature module.
"""
for feature_name in features:
feature_module = importlib.import_module(feature_name)
# if available, import feature.py and select the feature
try:
feature_spec_module = importlib.import_module(
feature_name + '.feature'
)
if not hasattr(feature_spec_module, 'select'):
raise CompositionError(
'Function %s.feature.select not found!\n '
'Feature modules need to specify a function'
' select(composer).' % (
feature_name
)
)
args, varargs, keywords, defaults = inspect.getargspec(
feature_spec_module.select
)
if varargs or keywords or defaults or len(args) != 1:
raise CompositionError(
'invalid signature: %s.feature.select must '
'have the signature select(composer)' % (
feature_name
)
)
# call the feature`s select function
feature_spec_module.select(self)
except ImportError:
# Unfortunately, python makes it really hard
# to distinguish missing modules from modules
# that contain errors.
# Hacks like parsing the exception message will
# not work reliably due to import hooks and such.
# Conclusion: features must contain a feature.py for now
# re-raise
raise | selects the features given as string
e.g
passing 'hello' and 'world' will result in imports of
'hello' and 'world'. Then, if possible 'hello.feature'
and 'world.feature' are imported and select is called
in each feature module. | Below is the the instruction that describes the task:
### Input:
selects the features given as string
e.g
passing 'hello' and 'world' will result in imports of
'hello' and 'world'. Then, if possible 'hello.feature'
and 'world.feature' are imported and select is called
in each feature module.
### Response:
def select(self, *features):
"""
selects the features given as string
e.g
passing 'hello' and 'world' will result in imports of
'hello' and 'world'. Then, if possible 'hello.feature'
and 'world.feature' are imported and select is called
in each feature module.
"""
for feature_name in features:
feature_module = importlib.import_module(feature_name)
# if available, import feature.py and select the feature
try:
feature_spec_module = importlib.import_module(
feature_name + '.feature'
)
if not hasattr(feature_spec_module, 'select'):
raise CompositionError(
'Function %s.feature.select not found!\n '
'Feature modules need to specify a function'
' select(composer).' % (
feature_name
)
)
args, varargs, keywords, defaults = inspect.getargspec(
feature_spec_module.select
)
if varargs or keywords or defaults or len(args) != 1:
raise CompositionError(
'invalid signature: %s.feature.select must '
'have the signature select(composer)' % (
feature_name
)
)
# call the feature`s select function
feature_spec_module.select(self)
except ImportError:
# Unfortunately, python makes it really hard
# to distinguish missing modules from modules
# that contain errors.
# Hacks like parsing the exception message will
# not work reliably due to import hooks and such.
# Conclusion: features must contain a feature.py for now
# re-raise
raise |
def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
'''
List the packages currently installed as a dict. By default, the dict
contains versions as a comma separated string::
{'<package_name>': '<version>[,<version>...]'}
versions_as_list:
If set to true, the versions are provided as a list
{'<package_name>': ['<version>', '<version>']}
root:
operate on a different root directory.
includes:
List of types of packages to include (package, patch, pattern, product)
By default packages are always included
attr:
If a list of package attributes is specified, returned value will
contain them in addition to version, eg.::
{'<package_name>': [{'version' : 'version', 'arch' : 'arch'}]}
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
``install_date``, ``install_date_time_t``.
If ``all`` is specified, all valid attributes will be returned.
.. versionadded:: 2018.3.0
removed:
not supported
purge_desired:
not supported
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs attr=version,arch
salt '*' pkg.list_pkgs attr='["version", "arch"]'
'''
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any([salt.utils.data.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
attr = kwargs.get('attr')
if attr is not None:
attr = salt.utils.args.split_input(attr)
includes = includes if includes else []
contextkey = 'pkg.list_pkgs'
# TODO(aplanas): this cached value depends on the parameters
if contextkey not in __context__:
ret = {}
cmd = ['rpm']
if root:
cmd.extend(['--root', root])
cmd.extend(['-qa', '--queryformat',
salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)') + '\n'])
output = __salt__['cmd.run'](cmd,
python_shell=False,
output_loglevel='trace')
for line in output.splitlines():
pkginfo = salt.utils.pkg.rpm.parse_pkginfo(
line,
osarch=__grains__['osarch']
)
if pkginfo:
# see rpm version string rules available at https://goo.gl/UGKPNd
pkgver = pkginfo.version
epoch = ''
release = ''
if ':' in pkgver:
epoch, pkgver = pkgver.split(":", 1)
if '-' in pkgver:
pkgver, release = pkgver.split("-", 1)
all_attr = {
'epoch': epoch,
'version': pkgver,
'release': release,
'arch': pkginfo.arch,
'install_date': pkginfo.install_date,
'install_date_time_t': pkginfo.install_date_time_t
}
__salt__['pkg_resource.add_pkg'](ret, pkginfo.name, all_attr)
_ret = {}
for pkgname in ret:
# Filter out GPG public keys packages
if pkgname.startswith('gpg-pubkey'):
continue
_ret[pkgname] = sorted(ret[pkgname], key=lambda d: d['version'])
for include in includes:
if include in ('pattern', 'patch'):
if include == 'pattern':
pkgs = list_installed_patterns(root=root)
elif include == 'patch':
pkgs = list_installed_patches(root=root)
else:
pkgs = []
for pkg in pkgs:
pkg_extended_name = '{}:{}'.format(include, pkg)
info = info_available(pkg_extended_name,
refresh=False,
root=root)
_ret[pkg_extended_name] = [{
'epoch': None,
'version': info[pkg]['version'],
'release': None,
'arch': info[pkg]['arch'],
'install_date': None,
'install_date_time_t': None,
}]
__context__[contextkey] = _ret
return __salt__['pkg_resource.format_pkg_list'](
__context__[contextkey],
versions_as_list,
attr) | List the packages currently installed as a dict. By default, the dict
contains versions as a comma separated string::
{'<package_name>': '<version>[,<version>...]'}
versions_as_list:
If set to true, the versions are provided as a list
{'<package_name>': ['<version>', '<version>']}
root:
operate on a different root directory.
includes:
List of types of packages to include (package, patch, pattern, product)
By default packages are always included
attr:
If a list of package attributes is specified, returned value will
contain them in addition to version, eg.::
{'<package_name>': [{'version' : 'version', 'arch' : 'arch'}]}
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
``install_date``, ``install_date_time_t``.
If ``all`` is specified, all valid attributes will be returned.
.. versionadded:: 2018.3.0
removed:
not supported
purge_desired:
not supported
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs attr=version,arch
salt '*' pkg.list_pkgs attr='["version", "arch"]' | Below is the the instruction that describes the task:
### Input:
List the packages currently installed as a dict. By default, the dict
contains versions as a comma separated string::
{'<package_name>': '<version>[,<version>...]'}
versions_as_list:
If set to true, the versions are provided as a list
{'<package_name>': ['<version>', '<version>']}
root:
operate on a different root directory.
includes:
List of types of packages to include (package, patch, pattern, product)
By default packages are always included
attr:
If a list of package attributes is specified, returned value will
contain them in addition to version, eg.::
{'<package_name>': [{'version' : 'version', 'arch' : 'arch'}]}
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
``install_date``, ``install_date_time_t``.
If ``all`` is specified, all valid attributes will be returned.
.. versionadded:: 2018.3.0
removed:
not supported
purge_desired:
not supported
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs attr=version,arch
salt '*' pkg.list_pkgs attr='["version", "arch"]'
### Response:
def list_pkgs(versions_as_list=False, root=None, includes=None, **kwargs):
'''
List the packages currently installed as a dict. By default, the dict
contains versions as a comma separated string::
{'<package_name>': '<version>[,<version>...]'}
versions_as_list:
If set to true, the versions are provided as a list
{'<package_name>': ['<version>', '<version>']}
root:
operate on a different root directory.
includes:
List of types of packages to include (package, patch, pattern, product)
By default packages are always included
attr:
If a list of package attributes is specified, returned value will
contain them in addition to version, eg.::
{'<package_name>': [{'version' : 'version', 'arch' : 'arch'}]}
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
``install_date``, ``install_date_time_t``.
If ``all`` is specified, all valid attributes will be returned.
.. versionadded:: 2018.3.0
removed:
not supported
purge_desired:
not supported
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs attr=version,arch
salt '*' pkg.list_pkgs attr='["version", "arch"]'
'''
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any([salt.utils.data.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
attr = kwargs.get('attr')
if attr is not None:
attr = salt.utils.args.split_input(attr)
includes = includes if includes else []
contextkey = 'pkg.list_pkgs'
# TODO(aplanas): this cached value depends on the parameters
if contextkey not in __context__:
ret = {}
cmd = ['rpm']
if root:
cmd.extend(['--root', root])
cmd.extend(['-qa', '--queryformat',
salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)') + '\n'])
output = __salt__['cmd.run'](cmd,
python_shell=False,
output_loglevel='trace')
for line in output.splitlines():
pkginfo = salt.utils.pkg.rpm.parse_pkginfo(
line,
osarch=__grains__['osarch']
)
if pkginfo:
# see rpm version string rules available at https://goo.gl/UGKPNd
pkgver = pkginfo.version
epoch = ''
release = ''
if ':' in pkgver:
epoch, pkgver = pkgver.split(":", 1)
if '-' in pkgver:
pkgver, release = pkgver.split("-", 1)
all_attr = {
'epoch': epoch,
'version': pkgver,
'release': release,
'arch': pkginfo.arch,
'install_date': pkginfo.install_date,
'install_date_time_t': pkginfo.install_date_time_t
}
__salt__['pkg_resource.add_pkg'](ret, pkginfo.name, all_attr)
_ret = {}
for pkgname in ret:
# Filter out GPG public keys packages
if pkgname.startswith('gpg-pubkey'):
continue
_ret[pkgname] = sorted(ret[pkgname], key=lambda d: d['version'])
for include in includes:
if include in ('pattern', 'patch'):
if include == 'pattern':
pkgs = list_installed_patterns(root=root)
elif include == 'patch':
pkgs = list_installed_patches(root=root)
else:
pkgs = []
for pkg in pkgs:
pkg_extended_name = '{}:{}'.format(include, pkg)
info = info_available(pkg_extended_name,
refresh=False,
root=root)
_ret[pkg_extended_name] = [{
'epoch': None,
'version': info[pkg]['version'],
'release': None,
'arch': info[pkg]['arch'],
'install_date': None,
'install_date_time_t': None,
}]
__context__[contextkey] = _ret
return __salt__['pkg_resource.format_pkg_list'](
__context__[contextkey],
versions_as_list,
attr) |
def SetProtocol(self, protocol):
"""Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: If an invalid protocol is selected.
"""
protocol = protocol.lower().strip()
if protocol not in ['http', 'https']:
raise ValueError('Invalid protocol specified for Viper lookup')
self._analyzer.SetProtocol(protocol) | Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: If an invalid protocol is selected. | Below is the the instruction that describes the task:
### Input:
Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: If an invalid protocol is selected.
### Response:
def SetProtocol(self, protocol):
"""Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: If an invalid protocol is selected.
"""
protocol = protocol.lower().strip()
if protocol not in ['http', 'https']:
raise ValueError('Invalid protocol specified for Viper lookup')
self._analyzer.SetProtocol(protocol) |
def merge_groundings(stmts_in):
"""Gather and merge original grounding information from evidences.
Each Statement's evidences are traversed to find original grounding
information. These groundings are then merged into an overall consensus
grounding dict with as much detail as possible.
The current implementation is only applicable to Statements whose
concept/agent roles are fixed. Complexes, Associations and Conversions
cannot be handled correctly.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of INDRA Statements whose groundings should be merged. These
Statements are meant to have been preassembled and potentially have
multiple pieces of evidence.
Returns
-------
stmts_out : list[indra.statements.Statement]
The list of Statements now with groundings merged at the Statement
level.
"""
def surface_grounding(stmt):
# Find the "best" grounding for a given concept and its evidences
# and surface that
for idx, concept in enumerate(stmt.agent_list()):
if concept is None:
continue
aggregate_groundings = {}
for ev in stmt.evidence:
if 'agents' in ev.annotations:
groundings = ev.annotations['agents']['raw_grounding'][idx]
for ns, value in groundings.items():
if ns not in aggregate_groundings:
aggregate_groundings[ns] = []
if isinstance(value, list):
aggregate_groundings[ns] += value
else:
aggregate_groundings[ns].append(value)
best_groundings = get_best_groundings(aggregate_groundings)
concept.db_refs = best_groundings
def get_best_groundings(aggregate_groundings):
best_groundings = {}
for ns, values in aggregate_groundings.items():
# There are 3 possibilities here
# 1. All the entries in the list are scored in which case we
# get unique entries and sort them by score
if all([isinstance(v, (tuple, list)) for v in values]):
best_groundings[ns] = []
for unique_value in {v[0] for v in values}:
scores = [v[1] for v in values if v[0] == unique_value]
best_groundings[ns].append((unique_value, max(scores)))
best_groundings[ns] = \
sorted(best_groundings[ns], key=lambda x: x[1],
reverse=True)
# 2. All the entries in the list are unscored in which case we
# get the highest frequency entry
elif all([not isinstance(v, (tuple, list)) for v in values]):
best_groundings[ns] = max(set(values), key=values.count)
# 3. There is a mixture, which can happen when some entries were
# mapped with scores and others had no scores to begin with.
# In this case, we again pick the highest frequency non-scored
# entry assuming that the unmapped version is more reliable.
else:
unscored_vals = [v for v in values
if not isinstance(v, (tuple, list))]
best_groundings[ns] = max(set(unscored_vals),
key=unscored_vals.count)
return best_groundings
stmts_out = []
for stmt in stmts_in:
if not isinstance(stmt, (Complex, Conversion)):
surface_grounding(stmt)
stmts_out.append(stmt)
return stmts_out | Gather and merge original grounding information from evidences.
Each Statement's evidences are traversed to find original grounding
information. These groundings are then merged into an overall consensus
grounding dict with as much detail as possible.
The current implementation is only applicable to Statements whose
concept/agent roles are fixed. Complexes, Associations and Conversions
cannot be handled correctly.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of INDRA Statements whose groundings should be merged. These
Statements are meant to have been preassembled and potentially have
multiple pieces of evidence.
Returns
-------
stmts_out : list[indra.statements.Statement]
The list of Statements now with groundings merged at the Statement
level. | Below is the the instruction that describes the task:
### Input:
Gather and merge original grounding information from evidences.
Each Statement's evidences are traversed to find original grounding
information. These groundings are then merged into an overall consensus
grounding dict with as much detail as possible.
The current implementation is only applicable to Statements whose
concept/agent roles are fixed. Complexes, Associations and Conversions
cannot be handled correctly.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of INDRA Statements whose groundings should be merged. These
Statements are meant to have been preassembled and potentially have
multiple pieces of evidence.
Returns
-------
stmts_out : list[indra.statements.Statement]
The list of Statements now with groundings merged at the Statement
level.
### Response:
def merge_groundings(stmts_in):
"""Gather and merge original grounding information from evidences.
Each Statement's evidences are traversed to find original grounding
information. These groundings are then merged into an overall consensus
grounding dict with as much detail as possible.
The current implementation is only applicable to Statements whose
concept/agent roles are fixed. Complexes, Associations and Conversions
cannot be handled correctly.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of INDRA Statements whose groundings should be merged. These
Statements are meant to have been preassembled and potentially have
multiple pieces of evidence.
Returns
-------
stmts_out : list[indra.statements.Statement]
The list of Statements now with groundings merged at the Statement
level.
"""
def surface_grounding(stmt):
# Find the "best" grounding for a given concept and its evidences
# and surface that
for idx, concept in enumerate(stmt.agent_list()):
if concept is None:
continue
aggregate_groundings = {}
for ev in stmt.evidence:
if 'agents' in ev.annotations:
groundings = ev.annotations['agents']['raw_grounding'][idx]
for ns, value in groundings.items():
if ns not in aggregate_groundings:
aggregate_groundings[ns] = []
if isinstance(value, list):
aggregate_groundings[ns] += value
else:
aggregate_groundings[ns].append(value)
best_groundings = get_best_groundings(aggregate_groundings)
concept.db_refs = best_groundings
def get_best_groundings(aggregate_groundings):
best_groundings = {}
for ns, values in aggregate_groundings.items():
# There are 3 possibilities here
# 1. All the entries in the list are scored in which case we
# get unique entries and sort them by score
if all([isinstance(v, (tuple, list)) for v in values]):
best_groundings[ns] = []
for unique_value in {v[0] for v in values}:
scores = [v[1] for v in values if v[0] == unique_value]
best_groundings[ns].append((unique_value, max(scores)))
best_groundings[ns] = \
sorted(best_groundings[ns], key=lambda x: x[1],
reverse=True)
# 2. All the entries in the list are unscored in which case we
# get the highest frequency entry
elif all([not isinstance(v, (tuple, list)) for v in values]):
best_groundings[ns] = max(set(values), key=values.count)
# 3. There is a mixture, which can happen when some entries were
# mapped with scores and others had no scores to begin with.
# In this case, we again pick the highest frequency non-scored
# entry assuming that the unmapped version is more reliable.
else:
unscored_vals = [v for v in values
if not isinstance(v, (tuple, list))]
best_groundings[ns] = max(set(unscored_vals),
key=unscored_vals.count)
return best_groundings
stmts_out = []
for stmt in stmts_in:
if not isinstance(stmt, (Complex, Conversion)):
surface_grounding(stmt)
stmts_out.append(stmt)
return stmts_out |
def ext_xsect(scatterer, h_pol=True):
"""Extinction cross section for the current setup, with polarization.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The extinction cross section.
"""
if scatterer.psd_integrator is not None:
try:
return scatterer.psd_integrator.get_angular_integrated(
scatterer.psd, scatterer.get_geometry(), "ext_xsect")
except AttributeError:
# Fall back to the usual method of computing this from S
pass
old_geom = scatterer.get_geometry()
(thet0, thet, phi0, phi, alpha, beta) = old_geom
try:
scatterer.set_geometry((thet0, thet0, phi0, phi0, alpha, beta))
S = scatterer.get_S()
finally:
scatterer.set_geometry(old_geom)
if h_pol:
return 2 * scatterer.wavelength * S[1,1].imag
else:
return 2 * scatterer.wavelength * S[0,0].imag | Extinction cross section for the current setup, with polarization.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The extinction cross section. | Below is the the instruction that describes the task:
### Input:
Extinction cross section for the current setup, with polarization.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The extinction cross section.
### Response:
def ext_xsect(scatterer, h_pol=True):
"""Extinction cross section for the current setup, with polarization.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The extinction cross section.
"""
if scatterer.psd_integrator is not None:
try:
return scatterer.psd_integrator.get_angular_integrated(
scatterer.psd, scatterer.get_geometry(), "ext_xsect")
except AttributeError:
# Fall back to the usual method of computing this from S
pass
old_geom = scatterer.get_geometry()
(thet0, thet, phi0, phi, alpha, beta) = old_geom
try:
scatterer.set_geometry((thet0, thet0, phi0, phi0, alpha, beta))
S = scatterer.get_S()
finally:
scatterer.set_geometry(old_geom)
if h_pol:
return 2 * scatterer.wavelength * S[1,1].imag
else:
return 2 * scatterer.wavelength * S[0,0].imag |
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification of the file entry.
Returns:
bool: True if the file entry exists.
"""
location = getattr(path_spec, 'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return False
if len(location) == 1:
return True
try:
self._zip_file.getinfo(location[1:])
return True
except KeyError:
pass
# Check if location could be a virtual directory.
for name in iter(self._zip_file.namelist()):
# The ZIP info name does not have the leading path separator as
# the location string does.
if name.startswith(location[1:]):
return True
return False | Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification of the file entry.
Returns:
bool: True if the file entry exists. | Below is the the instruction that describes the task:
### Input:
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification of the file entry.
Returns:
bool: True if the file entry exists.
### Response:
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification of the file entry.
Returns:
bool: True if the file entry exists.
"""
location = getattr(path_spec, 'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return False
if len(location) == 1:
return True
try:
self._zip_file.getinfo(location[1:])
return True
except KeyError:
pass
# Check if location could be a virtual directory.
for name in iter(self._zip_file.namelist()):
# The ZIP info name does not have the leading path separator as
# the location string does.
if name.startswith(location[1:]):
return True
return False |
def describe_alias(FunctionName, Name, region=None, key=None,
keyid=None, profile=None):
'''
Given a function name and alias name describe the properties of the alias.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.describe_alias myalias
'''
try:
alias = _find_alias(FunctionName, Name,
region=region, key=key, keyid=keyid, profile=profile)
if alias:
keys = ('AliasArn', 'Name', 'FunctionVersion', 'Description')
return {'alias': dict([(k, alias.get(k)) for k in keys])}
else:
return {'alias': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | Given a function name and alias name describe the properties of the alias.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.describe_alias myalias | Below is the the instruction that describes the task:
### Input:
Given a function name and alias name describe the properties of the alias.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.describe_alias myalias
### Response:
def describe_alias(FunctionName, Name, region=None, key=None,
keyid=None, profile=None):
'''
Given a function name and alias name describe the properties of the alias.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.describe_alias myalias
'''
try:
alias = _find_alias(FunctionName, Name,
region=region, key=key, keyid=keyid, profile=profile)
if alias:
keys = ('AliasArn', 'Name', 'FunctionVersion', 'Description')
return {'alias': dict([(k, alias.get(k)) for k in keys])}
else:
return {'alias': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} |
def _get_seo_content_types(seo_models):
"""Returns a list of content types from the models defined in settings."""
try:
return [ContentType.objects.get_for_model(m).id for m in seo_models]
except Exception: # previously caught DatabaseError
# Return an empty list if this is called too early
return [] | Returns a list of content types from the models defined in settings. | Below is the the instruction that describes the task:
### Input:
Returns a list of content types from the models defined in settings.
### Response:
def _get_seo_content_types(seo_models):
"""Returns a list of content types from the models defined in settings."""
try:
return [ContentType.objects.get_for_model(m).id for m in seo_models]
except Exception: # previously caught DatabaseError
# Return an empty list if this is called too early
return [] |
def set_poll_func(self, func, func_err_handler=None):
'''Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that.'''
if not func_err_handler: func_err_handler = traceback.print_exception
self._pa_poll_cb = c.PA_POLL_FUNC_T(ft.partial(self._pulse_poll_cb, func, func_err_handler))
c.pa.mainloop_set_poll_func(self._loop, self._pa_poll_cb, None) | Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that. | Below is the the instruction that describes the task:
### Input:
Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that.
### Response:
def set_poll_func(self, func, func_err_handler=None):
'''Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that.'''
if not func_err_handler: func_err_handler = traceback.print_exception
self._pa_poll_cb = c.PA_POLL_FUNC_T(ft.partial(self._pulse_poll_cb, func, func_err_handler))
c.pa.mainloop_set_poll_func(self._loop, self._pa_poll_cb, None) |
def create_networking_resource_from_context(shell_name, supported_os, context):
"""
Creates an instance of Networking Resource by given context
:param shell_name: Shell Name
:type shell_name: str
:param supported_os: list of supported OS
:type supported_os: list
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype GenericNetworkingResource
"""
result = GenericNetworkingResource(shell_name=shell_name, name=context.resource.name, supported_os=supported_os)
result.address = context.resource.address
result.family = context.resource.family
result.fullname = context.resource.fullname
result.attributes = dict(context.resource.attributes)
return result | Creates an instance of Networking Resource by given context
:param shell_name: Shell Name
:type shell_name: str
:param supported_os: list of supported OS
:type supported_os: list
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype GenericNetworkingResource | Below is the the instruction that describes the task:
### Input:
Creates an instance of Networking Resource by given context
:param shell_name: Shell Name
:type shell_name: str
:param supported_os: list of supported OS
:type supported_os: list
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype GenericNetworkingResource
### Response:
def create_networking_resource_from_context(shell_name, supported_os, context):
"""
Creates an instance of Networking Resource by given context
:param shell_name: Shell Name
:type shell_name: str
:param supported_os: list of supported OS
:type supported_os: list
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype GenericNetworkingResource
"""
result = GenericNetworkingResource(shell_name=shell_name, name=context.resource.name, supported_os=supported_os)
result.address = context.resource.address
result.family = context.resource.family
result.fullname = context.resource.fullname
result.attributes = dict(context.resource.attributes)
return result |
def pre_dissect(self, s):
"""
Check that the payload is long enough to build a NTP packet.
"""
length = len(s)
if length < _NTP_PACKET_MIN_SIZE:
err = " ({}".format(length) + " is < _NTP_PACKET_MIN_SIZE "
err += "({})).".format(_NTP_PACKET_MIN_SIZE)
raise _NTPInvalidDataException(err)
return s | Check that the payload is long enough to build a NTP packet. | Below is the the instruction that describes the task:
### Input:
Check that the payload is long enough to build a NTP packet.
### Response:
def pre_dissect(self, s):
"""
Check that the payload is long enough to build a NTP packet.
"""
length = len(s)
if length < _NTP_PACKET_MIN_SIZE:
err = " ({}".format(length) + " is < _NTP_PACKET_MIN_SIZE "
err += "({})).".format(_NTP_PACKET_MIN_SIZE)
raise _NTPInvalidDataException(err)
return s |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the QueryResponsePayload object and decode it
into its constituent parts.
Args:
input_buffer (Stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(QueryResponsePayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
operations = []
while(self.is_tag_next(enums.Tags.OPERATION, local_buffer)):
operation = primitives.Enumeration(
enums.Operation,
tag=enums.Tags.OPERATION
)
operation.read(local_buffer, kmip_version=kmip_version)
operations.append(operation)
self._operations = operations
object_types = []
while(self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer)):
object_type = primitives.Enumeration(
enums.ObjectType,
tag=enums.Tags.OBJECT_TYPE
)
object_type.read(local_buffer, kmip_version=kmip_version)
object_types.append(object_type)
self._object_types = object_types
if self.is_tag_next(enums.Tags.VENDOR_IDENTIFICATION, local_buffer):
vendor_identification = primitives.TextString(
tag=enums.Tags.VENDOR_IDENTIFICATION
)
vendor_identification.read(
local_buffer,
kmip_version=kmip_version
)
self._vendor_identification = vendor_identification
if self.is_tag_next(enums.Tags.SERVER_INFORMATION, local_buffer):
server_information = misc.ServerInformation()
server_information.read(
local_buffer,
kmip_version=kmip_version
)
self._server_information = server_information
application_namespaces = []
while(self.is_tag_next(
enums.Tags.APPLICATION_NAMESPACE,
local_buffer
)
):
application_namespace = primitives.TextString(
tag=enums.Tags.APPLICATION_NAMESPACE
)
application_namespace.read(local_buffer, kmip_version=kmip_version)
application_namespaces.append(application_namespace)
self._application_namespaces = application_namespaces
if kmip_version >= enums.KMIPVersion.KMIP_1_1:
extensions_information = []
while(self.is_tag_next(
enums.Tags.EXTENSION_INFORMATION,
local_buffer
)
):
extension_information = objects.ExtensionInformation()
extension_information.read(
local_buffer,
kmip_version=kmip_version
)
extensions_information.append(extension_information)
self._extension_information = extensions_information
if kmip_version >= enums.KMIPVersion.KMIP_1_2:
attestation_types = []
while(self.is_tag_next(enums.Tags.ATTESTATION_TYPE, local_buffer)):
attestation_type = primitives.Enumeration(
enums.AttestationType,
tag=enums.Tags.ATTESTATION_TYPE
)
attestation_type.read(local_buffer, kmip_version=kmip_version)
attestation_types.append(attestation_type)
self._attestation_types = attestation_types
if kmip_version >= enums.KMIPVersion.KMIP_1_3:
rngs_parameters = []
while(self.is_tag_next(enums.Tags.RNG_PARAMETERS, local_buffer)):
rng_parameters = objects.RNGParameters()
rng_parameters.read(local_buffer, kmip_version=kmip_version)
rngs_parameters.append(rng_parameters)
self._rng_parameters = rngs_parameters
profiles_information = []
while(self.is_tag_next(
enums.Tags.PROFILE_INFORMATION,
local_buffer
)
):
profile_information = objects.ProfileInformation()
profile_information.read(
local_buffer,
kmip_version=kmip_version
)
profiles_information.append(profile_information)
self._profile_information = profiles_information
validations_information = []
while(self.is_tag_next(
enums.Tags.VALIDATION_INFORMATION,
local_buffer
)
):
validation_information = objects.ValidationInformation()
validation_information.read(
local_buffer,
kmip_version=kmip_version
)
validations_information.append(validation_information)
self._validation_information = validations_information
capabilities_information = []
while(self.is_tag_next(
enums.Tags.CAPABILITY_INFORMATION,
local_buffer
)
):
capability_information = objects.CapabilityInformation()
capability_information.read(
local_buffer,
kmip_version=kmip_version
)
capabilities_information.append(capability_information)
self._capability_information = capabilities_information
client_registration_methods = []
while(self.is_tag_next(
enums.Tags.CLIENT_REGISTRATION_METHOD,
local_buffer
)
):
client_registration_method = primitives.Enumeration(
enums.ClientRegistrationMethod,
tag=enums.Tags.CLIENT_REGISTRATION_METHOD
)
client_registration_method.read(
local_buffer,
kmip_version=kmip_version
)
client_registration_methods.append(client_registration_method)
self._client_registration_methods = client_registration_methods
if kmip_version >= enums.KMIPVersion.KMIP_2_0:
if self.is_tag_next(enums.Tags.DEFAULTS_INFORMATION, local_buffer):
defaults_information = objects.DefaultsInformation()
defaults_information.read(
local_buffer,
kmip_version=kmip_version
)
self._defaults_information = defaults_information
storage_protection_masks = []
while(self.is_tag_next(
enums.Tags.PROTECTION_STORAGE_MASK,
local_buffer
)
):
storage_protection_mask = primitives.Integer(
tag=enums.Tags.PROTECTION_STORAGE_MASK
)
storage_protection_mask.read(
local_buffer,
kmip_version=kmip_version
)
storage_protection_masks.append(storage_protection_mask)
self._storage_protection_masks = storage_protection_masks
self.is_oversized(local_buffer) | Read the data encoding the QueryResponsePayload object and decode it
into its constituent parts.
Args:
input_buffer (Stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0. | Below is the the instruction that describes the task:
### Input:
Read the data encoding the QueryResponsePayload object and decode it
into its constituent parts.
Args:
input_buffer (Stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
### Response:
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the QueryResponsePayload object and decode it
into its constituent parts.
Args:
input_buffer (Stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
"""
super(QueryResponsePayload, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
operations = []
while(self.is_tag_next(enums.Tags.OPERATION, local_buffer)):
operation = primitives.Enumeration(
enums.Operation,
tag=enums.Tags.OPERATION
)
operation.read(local_buffer, kmip_version=kmip_version)
operations.append(operation)
self._operations = operations
object_types = []
while(self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer)):
object_type = primitives.Enumeration(
enums.ObjectType,
tag=enums.Tags.OBJECT_TYPE
)
object_type.read(local_buffer, kmip_version=kmip_version)
object_types.append(object_type)
self._object_types = object_types
if self.is_tag_next(enums.Tags.VENDOR_IDENTIFICATION, local_buffer):
vendor_identification = primitives.TextString(
tag=enums.Tags.VENDOR_IDENTIFICATION
)
vendor_identification.read(
local_buffer,
kmip_version=kmip_version
)
self._vendor_identification = vendor_identification
if self.is_tag_next(enums.Tags.SERVER_INFORMATION, local_buffer):
server_information = misc.ServerInformation()
server_information.read(
local_buffer,
kmip_version=kmip_version
)
self._server_information = server_information
application_namespaces = []
while(self.is_tag_next(
enums.Tags.APPLICATION_NAMESPACE,
local_buffer
)
):
application_namespace = primitives.TextString(
tag=enums.Tags.APPLICATION_NAMESPACE
)
application_namespace.read(local_buffer, kmip_version=kmip_version)
application_namespaces.append(application_namespace)
self._application_namespaces = application_namespaces
if kmip_version >= enums.KMIPVersion.KMIP_1_1:
extensions_information = []
while(self.is_tag_next(
enums.Tags.EXTENSION_INFORMATION,
local_buffer
)
):
extension_information = objects.ExtensionInformation()
extension_information.read(
local_buffer,
kmip_version=kmip_version
)
extensions_information.append(extension_information)
self._extension_information = extensions_information
if kmip_version >= enums.KMIPVersion.KMIP_1_2:
attestation_types = []
while(self.is_tag_next(enums.Tags.ATTESTATION_TYPE, local_buffer)):
attestation_type = primitives.Enumeration(
enums.AttestationType,
tag=enums.Tags.ATTESTATION_TYPE
)
attestation_type.read(local_buffer, kmip_version=kmip_version)
attestation_types.append(attestation_type)
self._attestation_types = attestation_types
if kmip_version >= enums.KMIPVersion.KMIP_1_3:
rngs_parameters = []
while(self.is_tag_next(enums.Tags.RNG_PARAMETERS, local_buffer)):
rng_parameters = objects.RNGParameters()
rng_parameters.read(local_buffer, kmip_version=kmip_version)
rngs_parameters.append(rng_parameters)
self._rng_parameters = rngs_parameters
profiles_information = []
while(self.is_tag_next(
enums.Tags.PROFILE_INFORMATION,
local_buffer
)
):
profile_information = objects.ProfileInformation()
profile_information.read(
local_buffer,
kmip_version=kmip_version
)
profiles_information.append(profile_information)
self._profile_information = profiles_information
validations_information = []
while(self.is_tag_next(
enums.Tags.VALIDATION_INFORMATION,
local_buffer
)
):
validation_information = objects.ValidationInformation()
validation_information.read(
local_buffer,
kmip_version=kmip_version
)
validations_information.append(validation_information)
self._validation_information = validations_information
capabilities_information = []
while(self.is_tag_next(
enums.Tags.CAPABILITY_INFORMATION,
local_buffer
)
):
capability_information = objects.CapabilityInformation()
capability_information.read(
local_buffer,
kmip_version=kmip_version
)
capabilities_information.append(capability_information)
self._capability_information = capabilities_information
client_registration_methods = []
while(self.is_tag_next(
enums.Tags.CLIENT_REGISTRATION_METHOD,
local_buffer
)
):
client_registration_method = primitives.Enumeration(
enums.ClientRegistrationMethod,
tag=enums.Tags.CLIENT_REGISTRATION_METHOD
)
client_registration_method.read(
local_buffer,
kmip_version=kmip_version
)
client_registration_methods.append(client_registration_method)
self._client_registration_methods = client_registration_methods
if kmip_version >= enums.KMIPVersion.KMIP_2_0:
if self.is_tag_next(enums.Tags.DEFAULTS_INFORMATION, local_buffer):
defaults_information = objects.DefaultsInformation()
defaults_information.read(
local_buffer,
kmip_version=kmip_version
)
self._defaults_information = defaults_information
storage_protection_masks = []
while(self.is_tag_next(
enums.Tags.PROTECTION_STORAGE_MASK,
local_buffer
)
):
storage_protection_mask = primitives.Integer(
tag=enums.Tags.PROTECTION_STORAGE_MASK
)
storage_protection_mask.read(
local_buffer,
kmip_version=kmip_version
)
storage_protection_masks.append(storage_protection_mask)
self._storage_protection_masks = storage_protection_masks
self.is_oversized(local_buffer) |
async def send(self, sender, **kwargs):
''' send a signal from the sender to all connected receivers '''
if not self.receivers:
return []
responses = []
futures = []
for receiver in self._get_receivers(sender):
method = receiver()
if callable(method):
futures.append(method(sender=sender, **kwargs))
if len(futures) > 0:
responses = await asyncio.gather(*futures)
return responses | send a signal from the sender to all connected receivers | Below is the the instruction that describes the task:
### Input:
send a signal from the sender to all connected receivers
### Response:
async def send(self, sender, **kwargs):
''' send a signal from the sender to all connected receivers '''
if not self.receivers:
return []
responses = []
futures = []
for receiver in self._get_receivers(sender):
method = receiver()
if callable(method):
futures.append(method(sender=sender, **kwargs))
if len(futures) > 0:
responses = await asyncio.gather(*futures)
return responses |
def _parse_keys(row, line_num):
""" Perform some sanity checks on they keys
Each key in the row should not be named None cause
(that's an overrun). A key named `type` MUST be
present on the row & have a string value.
:param row: dict
:param line_num: int
"""
link = 'tools.ietf.org/html/rfc4180#section-2'
none_keys = [key for key in row.keys() if key is None]
if none_keys:
fail('You have more fields defined on row number {} '
'than field headers in your CSV data. Please fix '
'your request body.'.format(line_num), link)
elif not row.get('type'):
fail('Row number {} does not have a type value defined. '
'Please fix your request body.'.format(line_num), link) | Perform some sanity checks on they keys
Each key in the row should not be named None cause
(that's an overrun). A key named `type` MUST be
present on the row & have a string value.
:param row: dict
:param line_num: int | Below is the the instruction that describes the task:
### Input:
Perform some sanity checks on they keys
Each key in the row should not be named None cause
(that's an overrun). A key named `type` MUST be
present on the row & have a string value.
:param row: dict
:param line_num: int
### Response:
def _parse_keys(row, line_num):
""" Perform some sanity checks on they keys
Each key in the row should not be named None cause
(that's an overrun). A key named `type` MUST be
present on the row & have a string value.
:param row: dict
:param line_num: int
"""
link = 'tools.ietf.org/html/rfc4180#section-2'
none_keys = [key for key in row.keys() if key is None]
if none_keys:
fail('You have more fields defined on row number {} '
'than field headers in your CSV data. Please fix '
'your request body.'.format(line_num), link)
elif not row.get('type'):
fail('Row number {} does not have a type value defined. '
'Please fix your request body.'.format(line_num), link) |
def serve_rpc(self):
"""Launches configured # of workers per loaded plugin."""
if cfg.CONF.QUARK_ASYNC.rpc_workers < 1:
cfg.CONF.set_override('rpc_workers', 1, "QUARK_ASYNC")
try:
rpc = service.RpcWorker(self.plugins)
launcher = common_service.ProcessLauncher(CONF, wait_interval=1.0)
launcher.launch_service(rpc, workers=CONF.QUARK_ASYNC.rpc_workers)
return launcher
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unrecoverable error: please check log for '
'details.')) | Launches configured # of workers per loaded plugin. | Below is the the instruction that describes the task:
### Input:
Launches configured # of workers per loaded plugin.
### Response:
def serve_rpc(self):
"""Launches configured # of workers per loaded plugin."""
if cfg.CONF.QUARK_ASYNC.rpc_workers < 1:
cfg.CONF.set_override('rpc_workers', 1, "QUARK_ASYNC")
try:
rpc = service.RpcWorker(self.plugins)
launcher = common_service.ProcessLauncher(CONF, wait_interval=1.0)
launcher.launch_service(rpc, workers=CONF.QUARK_ASYNC.rpc_workers)
return launcher
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Unrecoverable error: please check log for '
'details.')) |
def write_pid_file(self, overwrite=False):
"""Create a .pid file in the pid_dir with my pid.
This must be called after pre_construct, which sets `self.pid_dir`.
This raises :exc:`PIDFileError` if the pid file exists already.
"""
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
pid = self.get_pid_from_file()
if not overwrite:
raise PIDFileError(
'The pid file [%s] already exists. \nThis could mean that this '
'server is already running with [pid=%s].' % (pid_file, pid)
)
with open(pid_file, 'w') as f:
self.log.info("Creating pid file: %s" % pid_file)
f.write(repr(os.getpid())+'\n') | Create a .pid file in the pid_dir with my pid.
This must be called after pre_construct, which sets `self.pid_dir`.
This raises :exc:`PIDFileError` if the pid file exists already. | Below is the the instruction that describes the task:
### Input:
Create a .pid file in the pid_dir with my pid.
This must be called after pre_construct, which sets `self.pid_dir`.
This raises :exc:`PIDFileError` if the pid file exists already.
### Response:
def write_pid_file(self, overwrite=False):
"""Create a .pid file in the pid_dir with my pid.
This must be called after pre_construct, which sets `self.pid_dir`.
This raises :exc:`PIDFileError` if the pid file exists already.
"""
pid_file = os.path.join(self.profile_dir.pid_dir, self.name + u'.pid')
if os.path.isfile(pid_file):
pid = self.get_pid_from_file()
if not overwrite:
raise PIDFileError(
'The pid file [%s] already exists. \nThis could mean that this '
'server is already running with [pid=%s].' % (pid_file, pid)
)
with open(pid_file, 'w') as f:
self.log.info("Creating pid file: %s" % pid_file)
f.write(repr(os.getpid())+'\n') |
def process_response(self, request, response, resource):
"""
Do response processing
"""
origin = request.get_header('Origin')
if not settings.DEBUG:
if origin in settings.ALLOWED_ORIGINS or not origin:
response.set_header('Access-Control-Allow-Origin', origin)
else:
log.debug("CORS ERROR: %s not allowed, allowed hosts: %s" % (origin,
settings.ALLOWED_ORIGINS))
raise falcon.HTTPForbidden("Denied", "Origin not in ALLOWED_ORIGINS: %s" % origin)
# response.status = falcon.HTTP_403
else:
response.set_header('Access-Control-Allow-Origin', origin or '*')
response.set_header('Access-Control-Allow-Credentials', "true")
response.set_header('Access-Control-Allow-Headers', 'Content-Type')
# This could be overridden in the resource level
response.set_header('Access-Control-Allow-Methods', 'OPTIONS') | Do response processing | Below is the the instruction that describes the task:
### Input:
Do response processing
### Response:
def process_response(self, request, response, resource):
"""
Do response processing
"""
origin = request.get_header('Origin')
if not settings.DEBUG:
if origin in settings.ALLOWED_ORIGINS or not origin:
response.set_header('Access-Control-Allow-Origin', origin)
else:
log.debug("CORS ERROR: %s not allowed, allowed hosts: %s" % (origin,
settings.ALLOWED_ORIGINS))
raise falcon.HTTPForbidden("Denied", "Origin not in ALLOWED_ORIGINS: %s" % origin)
# response.status = falcon.HTTP_403
else:
response.set_header('Access-Control-Allow-Origin', origin or '*')
response.set_header('Access-Control-Allow-Credentials', "true")
response.set_header('Access-Control-Allow-Headers', 'Content-Type')
# This could be overridden in the resource level
response.set_header('Access-Control-Allow-Methods', 'OPTIONS') |
def get_workflow_actions_for(brain_or_object):
"""Returns a list with the actions (transitions) supported by the workflows
the object pass in is bound to. Note it returns all actions, not only those
allowed for the object based on its current state and permissions.
"""
portal_type = api.get_portal_type(brain_or_object)
actions = actions_by_type.get(portal_type, None)
if actions:
return actions
# Retrieve the actions from the workflows this object is bound to
actions = []
wf_tool = api.get_tool("portal_workflow")
for wf_id in get_workflow_ids_for(brain_or_object):
workflow = wf_tool.getWorkflowById(wf_id)
wf_actions = map(lambda action: action[0], workflow.transitions.items())
actions.extend(wf_actions)
actions = list(set(actions))
actions_by_type[portal_type] = actions
return actions | Returns a list with the actions (transitions) supported by the workflows
the object pass in is bound to. Note it returns all actions, not only those
allowed for the object based on its current state and permissions. | Below is the the instruction that describes the task:
### Input:
Returns a list with the actions (transitions) supported by the workflows
the object pass in is bound to. Note it returns all actions, not only those
allowed for the object based on its current state and permissions.
### Response:
def get_workflow_actions_for(brain_or_object):
"""Returns a list with the actions (transitions) supported by the workflows
the object pass in is bound to. Note it returns all actions, not only those
allowed for the object based on its current state and permissions.
"""
portal_type = api.get_portal_type(brain_or_object)
actions = actions_by_type.get(portal_type, None)
if actions:
return actions
# Retrieve the actions from the workflows this object is bound to
actions = []
wf_tool = api.get_tool("portal_workflow")
for wf_id in get_workflow_ids_for(brain_or_object):
workflow = wf_tool.getWorkflowById(wf_id)
wf_actions = map(lambda action: action[0], workflow.transitions.items())
actions.extend(wf_actions)
actions = list(set(actions))
actions_by_type[portal_type] = actions
return actions |
def run(samples, run_parallel, stage):
"""Run structural variation detection.
The stage indicates which level of structural variant calling to run.
- initial, callers that can be used in subsequent structural variation steps (cnvkit -> lumpy)
- standard, regular batch calling
- ensemble, post-calling, combine other callers or prioritize results
"""
to_process, extras, background = _batch_split_by_sv(samples, stage)
processed = run_parallel("detect_sv", ([xs, background, stage]
for xs in to_process.values()))
finalized = (run_parallel("finalize_sv", [([xs[0] for xs in processed], processed[0][0]["config"])])
if len(processed) > 0 else [])
return extras + finalized | Run structural variation detection.
The stage indicates which level of structural variant calling to run.
- initial, callers that can be used in subsequent structural variation steps (cnvkit -> lumpy)
- standard, regular batch calling
- ensemble, post-calling, combine other callers or prioritize results | Below is the the instruction that describes the task:
### Input:
Run structural variation detection.
The stage indicates which level of structural variant calling to run.
- initial, callers that can be used in subsequent structural variation steps (cnvkit -> lumpy)
- standard, regular batch calling
- ensemble, post-calling, combine other callers or prioritize results
### Response:
def run(samples, run_parallel, stage):
"""Run structural variation detection.
The stage indicates which level of structural variant calling to run.
- initial, callers that can be used in subsequent structural variation steps (cnvkit -> lumpy)
- standard, regular batch calling
- ensemble, post-calling, combine other callers or prioritize results
"""
to_process, extras, background = _batch_split_by_sv(samples, stage)
processed = run_parallel("detect_sv", ([xs, background, stage]
for xs in to_process.values()))
finalized = (run_parallel("finalize_sv", [([xs[0] for xs in processed], processed[0][0]["config"])])
if len(processed) > 0 else [])
return extras + finalized |
def count(cls, session: Optional[Session] = None) -> int:
"""Count all actions."""
if session is None:
session = _make_session()
count = session.query(cls).count()
session.close()
return count | Count all actions. | Below is the the instruction that describes the task:
### Input:
Count all actions.
### Response:
def count(cls, session: Optional[Session] = None) -> int:
"""Count all actions."""
if session is None:
session = _make_session()
count = session.query(cls).count()
session.close()
return count |
def restart(name, timeout=10):
'''
Restarts a container
name
Container name or ID
timeout : 10
Timeout in seconds after which the container will be killed (if it has
not yet gracefully shut down)
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``restarted`` - If restart was successful, this key will be present and
will be set to ``True``.
CLI Examples:
.. code-block:: bash
salt myminion docker.restart mycontainer
salt myminion docker.restart mycontainer timeout=20
'''
ret = _change_state(name, 'restart', 'running', timeout=timeout)
if ret['result']:
ret['restarted'] = True
return ret | Restarts a container
name
Container name or ID
timeout : 10
Timeout in seconds after which the container will be killed (if it has
not yet gracefully shut down)
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``restarted`` - If restart was successful, this key will be present and
will be set to ``True``.
CLI Examples:
.. code-block:: bash
salt myminion docker.restart mycontainer
salt myminion docker.restart mycontainer timeout=20 | Below is the the instruction that describes the task:
### Input:
Restarts a container
name
Container name or ID
timeout : 10
Timeout in seconds after which the container will be killed (if it has
not yet gracefully shut down)
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``restarted`` - If restart was successful, this key will be present and
will be set to ``True``.
CLI Examples:
.. code-block:: bash
salt myminion docker.restart mycontainer
salt myminion docker.restart mycontainer timeout=20
### Response:
def restart(name, timeout=10):
'''
Restarts a container
name
Container name or ID
timeout : 10
Timeout in seconds after which the container will be killed (if it has
not yet gracefully shut down)
**RETURN DATA**
A dictionary will be returned, containing the following keys:
- ``status`` - A dictionary showing the prior state of the container as
well as the new state
- ``result`` - A boolean noting whether or not the action was successful
- ``restarted`` - If restart was successful, this key will be present and
will be set to ``True``.
CLI Examples:
.. code-block:: bash
salt myminion docker.restart mycontainer
salt myminion docker.restart mycontainer timeout=20
'''
ret = _change_state(name, 'restart', 'running', timeout=timeout)
if ret['result']:
ret['restarted'] = True
return ret |
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in iteritems(kwarg):
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self | Append a new pathspec component to this pathspec. | Below is the the instruction that describes the task:
### Input:
Append a new pathspec component to this pathspec.
### Response:
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in iteritems(kwarg):
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self |
def changeHS(self):
"""
Change health system interventions
https://github.com/SwissTPH/openmalaria/wiki/GeneratedSchema32Doc#change-health-system
Returns: list of HealthSystems together with timestep when they are applied
"""
health_systems = []
change_hs = self.et.find("changeHS")
if change_hs is None:
return health_systems
for health_system in change_hs.findall("timedDeployment"):
health_systems.append([int(health_system.attrib("time")), HealthSystem(self.et)])
return health_systems | Change health system interventions
https://github.com/SwissTPH/openmalaria/wiki/GeneratedSchema32Doc#change-health-system
Returns: list of HealthSystems together with timestep when they are applied | Below is the the instruction that describes the task:
### Input:
Change health system interventions
https://github.com/SwissTPH/openmalaria/wiki/GeneratedSchema32Doc#change-health-system
Returns: list of HealthSystems together with timestep when they are applied
### Response:
def changeHS(self):
"""
Change health system interventions
https://github.com/SwissTPH/openmalaria/wiki/GeneratedSchema32Doc#change-health-system
Returns: list of HealthSystems together with timestep when they are applied
"""
health_systems = []
change_hs = self.et.find("changeHS")
if change_hs is None:
return health_systems
for health_system in change_hs.findall("timedDeployment"):
health_systems.append([int(health_system.attrib("time")), HealthSystem(self.et)])
return health_systems |
def PUSH(self, params):
"""
PUSH {RPushList}
Push to the stack from a list of registers
List must contain only low registers or LR
"""
# TODO what registers are allowed to PUSH to? Low registers and LR
# TODO PUSH should reverse the list, not POP
RPushList = self.get_one_parameter(r'\s*{(.*)}(.*)', params).split(',')
RPushList = [i.strip() for i in RPushList]
# TODO should we make sure the register exists? probably not
def PUSH_func():
for register in RPushList:
self.register['SP'] -= 4
for i in range(4):
# TODO is this the same as with POP?
self.memory[self.register['SP'] + i] = ((self.register[register] >> (8 * i)) & 0xFF)
return PUSH_func | PUSH {RPushList}
Push to the stack from a list of registers
List must contain only low registers or LR | Below is the the instruction that describes the task:
### Input:
PUSH {RPushList}
Push to the stack from a list of registers
List must contain only low registers or LR
### Response:
def PUSH(self, params):
"""
PUSH {RPushList}
Push to the stack from a list of registers
List must contain only low registers or LR
"""
# TODO what registers are allowed to PUSH to? Low registers and LR
# TODO PUSH should reverse the list, not POP
RPushList = self.get_one_parameter(r'\s*{(.*)}(.*)', params).split(',')
RPushList = [i.strip() for i in RPushList]
# TODO should we make sure the register exists? probably not
def PUSH_func():
for register in RPushList:
self.register['SP'] -= 4
for i in range(4):
# TODO is this the same as with POP?
self.memory[self.register['SP'] + i] = ((self.register[register] >> (8 * i)) & 0xFF)
return PUSH_func |
def add_page(self, orientation=''):
"Start a new page"
if(self.state==0):
self.open()
family=self.font_family
if self.underline:
style = self.font_style + 'U'
else:
style = self.font_style
size=self.font_size_pt
lw=self.line_width
dc=self.draw_color
fc=self.fill_color
tc=self.text_color
cf=self.color_flag
if(self.page>0):
#Page footer
self.in_footer=1
self.footer()
self.in_footer=0
#close page
self._endpage()
#Start new page
self._beginpage(orientation)
#Set line cap style to square
self._out('2 J')
#Set line width
self.line_width=lw
self._out(sprintf('%.2f w',lw*self.k))
#Set font
if(family):
self.set_font(family,style,size)
#Set colors
self.draw_color=dc
if(dc!='0 G'):
self._out(dc)
self.fill_color=fc
if(fc!='0 g'):
self._out(fc)
self.text_color=tc
self.color_flag=cf
#Page header
self.header()
#Restore line width
if(self.line_width!=lw):
self.line_width=lw
self._out(sprintf('%.2f w',lw*self.k))
#Restore font
if(family):
self.set_font(family,style,size)
#Restore colors
if(self.draw_color!=dc):
self.draw_color=dc
self._out(dc)
if(self.fill_color!=fc):
self.fill_color=fc
self._out(fc)
self.text_color=tc
self.color_flag=cf | Start a new page | Below is the the instruction that describes the task:
### Input:
Start a new page
### Response:
def add_page(self, orientation=''):
"Start a new page"
if(self.state==0):
self.open()
family=self.font_family
if self.underline:
style = self.font_style + 'U'
else:
style = self.font_style
size=self.font_size_pt
lw=self.line_width
dc=self.draw_color
fc=self.fill_color
tc=self.text_color
cf=self.color_flag
if(self.page>0):
#Page footer
self.in_footer=1
self.footer()
self.in_footer=0
#close page
self._endpage()
#Start new page
self._beginpage(orientation)
#Set line cap style to square
self._out('2 J')
#Set line width
self.line_width=lw
self._out(sprintf('%.2f w',lw*self.k))
#Set font
if(family):
self.set_font(family,style,size)
#Set colors
self.draw_color=dc
if(dc!='0 G'):
self._out(dc)
self.fill_color=fc
if(fc!='0 g'):
self._out(fc)
self.text_color=tc
self.color_flag=cf
#Page header
self.header()
#Restore line width
if(self.line_width!=lw):
self.line_width=lw
self._out(sprintf('%.2f w',lw*self.k))
#Restore font
if(family):
self.set_font(family,style,size)
#Restore colors
if(self.draw_color!=dc):
self.draw_color=dc
self._out(dc)
if(self.fill_color!=fc):
self.fill_color=fc
self._out(fc)
self.text_color=tc
self.color_flag=cf |
def unwrap(value):
"""Iterate an NTTable
:returns: An iterator yielding an OrderedDict for each column
"""
ret = []
# build lists of column names, and value
lbl, cols = [], []
for cname, cval in value.value.items():
lbl.append(cname)
cols.append(cval)
# zip together column arrays to iterate over rows
for rval in izip(*cols):
# zip together column names and row values
ret.append(OrderedDict(zip(lbl, rval)))
return ret | Iterate an NTTable
:returns: An iterator yielding an OrderedDict for each column | Below is the the instruction that describes the task:
### Input:
Iterate an NTTable
:returns: An iterator yielding an OrderedDict for each column
### Response:
def unwrap(value):
"""Iterate an NTTable
:returns: An iterator yielding an OrderedDict for each column
"""
ret = []
# build lists of column names, and value
lbl, cols = [], []
for cname, cval in value.value.items():
lbl.append(cname)
cols.append(cval)
# zip together column arrays to iterate over rows
for rval in izip(*cols):
# zip together column names and row values
ret.append(OrderedDict(zip(lbl, rval)))
return ret |
def require_mapping(self) -> None:
"""Require the node to be a mapping."""
if not isinstance(self.yaml_node, yaml.MappingNode):
raise RecognitionError(('{}{}A mapping is required here').format(
self.yaml_node.start_mark, os.linesep)) | Require the node to be a mapping. | Below is the the instruction that describes the task:
### Input:
Require the node to be a mapping.
### Response:
def require_mapping(self) -> None:
"""Require the node to be a mapping."""
if not isinstance(self.yaml_node, yaml.MappingNode):
raise RecognitionError(('{}{}A mapping is required here').format(
self.yaml_node.start_mark, os.linesep)) |
def analysis_question_extractor(impact_report, component_metadata):
"""Extracting analysis question from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
multi_exposure = impact_report.multi_exposure_impact_function
if multi_exposure:
return multi_exposure_analysis_question_extractor(
impact_report, component_metadata)
context = {}
extra_args = component_metadata.extra_args
provenance = impact_report.impact_function.provenance
header = resolve_from_dictionary(extra_args, 'header')
analysis_question = provenance['analysis_question']
context['component_key'] = component_metadata.key
context['header'] = header
context['analysis_questions'] = [analysis_question]
return context | Extracting analysis question from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0 | Below is the the instruction that describes the task:
### Input:
Extracting analysis question from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
### Response:
def analysis_question_extractor(impact_report, component_metadata):
"""Extracting analysis question from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
multi_exposure = impact_report.multi_exposure_impact_function
if multi_exposure:
return multi_exposure_analysis_question_extractor(
impact_report, component_metadata)
context = {}
extra_args = component_metadata.extra_args
provenance = impact_report.impact_function.provenance
header = resolve_from_dictionary(extra_args, 'header')
analysis_question = provenance['analysis_question']
context['component_key'] = component_metadata.key
context['header'] = header
context['analysis_questions'] = [analysis_question]
return context |
def license_name(self, license_name):
"""Sets the license_name of this DatasetNewRequest.
The license that should be associated with the dataset # noqa: E501
:param license_name: The license_name of this DatasetNewRequest. # noqa: E501
:type: str
"""
allowed_values = ["CC0-1.0", "CC-BY-SA-4.0", "GPL-2.0", "ODbL-1.0", "CC-BY-NC-SA-4.0", "unknown", "DbCL-1.0", "CC-BY-SA-3.0", "copyright-authors", "other", "reddit-api", "world-bank"] # noqa: E501
if license_name not in allowed_values:
raise ValueError(
"Invalid value for `license_name` ({0}), must be one of {1}" # noqa: E501
.format(license_name, allowed_values)
)
self._license_name = license_name | Sets the license_name of this DatasetNewRequest.
The license that should be associated with the dataset # noqa: E501
:param license_name: The license_name of this DatasetNewRequest. # noqa: E501
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the license_name of this DatasetNewRequest.
The license that should be associated with the dataset # noqa: E501
:param license_name: The license_name of this DatasetNewRequest. # noqa: E501
:type: str
### Response:
def license_name(self, license_name):
"""Sets the license_name of this DatasetNewRequest.
The license that should be associated with the dataset # noqa: E501
:param license_name: The license_name of this DatasetNewRequest. # noqa: E501
:type: str
"""
allowed_values = ["CC0-1.0", "CC-BY-SA-4.0", "GPL-2.0", "ODbL-1.0", "CC-BY-NC-SA-4.0", "unknown", "DbCL-1.0", "CC-BY-SA-3.0", "copyright-authors", "other", "reddit-api", "world-bank"] # noqa: E501
if license_name not in allowed_values:
raise ValueError(
"Invalid value for `license_name` ({0}), must be one of {1}" # noqa: E501
.format(license_name, allowed_values)
)
self._license_name = license_name |
def write_taxonomy_and_seqinfo_files(self, taxonomies, output_taxonomy_file, output_seqinfo_file):
'''Write out taxonomy and seqinfo files as required by taxtastic
from known taxonomies
Parameters
----------
taxonomies:
hash of taxon_id to array of taxonomic information
output_taxonomy_file:
write taxtastic-compatible 'taxonomy' file here
output_seqinfo_file:
write taxtastic-compatible 'seqinfo' file here'''
first_pass_id_and_taxonomies = []
tc=TaxonomyCleaner()
max_number_of_ranks = 0
for taxon_id, tax_split in taxonomies.iteritems():
# Replace spaces with underscores e.g. 'Candidatus my_genus'
for idx, item in enumerate(tax_split):
tax_split[idx] = re.sub('\s+', '_', item.strip())
# Remove 'empty' taxononomies e.g. 's__'
tax_split = tc.remove_empty_ranks(tax_split)
# Add this fixed up list to the list
first_pass_id_and_taxonomies.append([taxon_id]+tax_split)
if len(tax_split) > max_number_of_ranks:
max_number_of_ranks = len(tax_split)
# Find taxons that have multiple parents, building a hash of parents as we go (i.e. a tree of taxonomies embedded in a hash)
#
# Assumes that no two taxonomic labels are the same when they are from different
# taxonomic levels. When there are children with multiple parents at the
# same taxonomic label then these are warned about and worked around.
parents = {} #hash of taxon to immediate parent
known_duplicates = sets.Set([])
for j, array in enumerate(first_pass_id_and_taxonomies):
taxonomy = array[1:]
for i, tax in enumerate(taxonomy):
if i==0: continue #top levels don't have parents
ancestry = taxonomy[i-1]
if parents.has_key(tax):
if parents[tax] != ancestry:
dup = "%s%s" %(parents[tax], ancestry)
# don't report the same problem several times
if dup not in known_duplicates:
print " %s '%s' with multiple parents %s and %s" % (array[0], tax, parents[tax], ancestry)
known_duplicates.add(dup)
# fix the current one
new_name_id = 1
new_name = "%se%s" % (tax, new_name_id)
while parents.has_key(new_name) and parents[new_name] != ancestry:
new_name_id += 1
new_name = "%se%s" % (tax, new_name_id)
first_pass_id_and_taxonomies[j][i+1] = new_name
taxonomy[i] = new_name
parents[new_name] = ancestry
else:
# normal case, seeing a new taxon and parent for the first time
parents[tax] = ancestry
# Write the sequence file
with open(output_seqinfo_file, 'w') as seqout:
# write header
seqout.write('seqname,tax_id\n')
# write each taxonomic association
for array in first_pass_id_and_taxonomies:
sequence_id = array[0]
if len(array)==1:
most_specific_taxonomic_affiliation = 'Root'
else:
most_specific_taxonomic_affiliation = array[-1]
seqout.write("%s,%s\n" % (array[0],
most_specific_taxonomic_affiliation))
# Write the taxonomy file
noted_taxonomies = sets.Set([])
taxonomic_level_names = ["rank_%i" % rank for rank in range(max_number_of_ranks)]
with open(output_taxonomy_file, 'w') as seqout:
# write header and root line
seqout.write(','.join(['tax_id','parent_id','rank','tax_name','root'] + taxonomic_level_names) +'\n')
seqout.write(','.join(['Root','Root','root','Root','Root'])+ ''.join([',']*max_number_of_ranks) + '\n')
# write all the taxonomies
for array in first_pass_id_and_taxonomies:
taxons = array[1:]
for i, tax in enumerate(taxons):
line = self._taxonomy_line(i, taxons[:(i+1)], taxonomic_level_names)
if line not in noted_taxonomies:
seqout.write(line+"\n")
noted_taxonomies.add(line) | Write out taxonomy and seqinfo files as required by taxtastic
from known taxonomies
Parameters
----------
taxonomies:
hash of taxon_id to array of taxonomic information
output_taxonomy_file:
write taxtastic-compatible 'taxonomy' file here
output_seqinfo_file:
write taxtastic-compatible 'seqinfo' file here | Below is the the instruction that describes the task:
### Input:
Write out taxonomy and seqinfo files as required by taxtastic
from known taxonomies
Parameters
----------
taxonomies:
hash of taxon_id to array of taxonomic information
output_taxonomy_file:
write taxtastic-compatible 'taxonomy' file here
output_seqinfo_file:
write taxtastic-compatible 'seqinfo' file here
### Response:
def write_taxonomy_and_seqinfo_files(self, taxonomies, output_taxonomy_file, output_seqinfo_file):
'''Write out taxonomy and seqinfo files as required by taxtastic
from known taxonomies
Parameters
----------
taxonomies:
hash of taxon_id to array of taxonomic information
output_taxonomy_file:
write taxtastic-compatible 'taxonomy' file here
output_seqinfo_file:
write taxtastic-compatible 'seqinfo' file here'''
first_pass_id_and_taxonomies = []
tc=TaxonomyCleaner()
max_number_of_ranks = 0
for taxon_id, tax_split in taxonomies.iteritems():
# Replace spaces with underscores e.g. 'Candidatus my_genus'
for idx, item in enumerate(tax_split):
tax_split[idx] = re.sub('\s+', '_', item.strip())
# Remove 'empty' taxononomies e.g. 's__'
tax_split = tc.remove_empty_ranks(tax_split)
# Add this fixed up list to the list
first_pass_id_and_taxonomies.append([taxon_id]+tax_split)
if len(tax_split) > max_number_of_ranks:
max_number_of_ranks = len(tax_split)
# Find taxons that have multiple parents, building a hash of parents as we go (i.e. a tree of taxonomies embedded in a hash)
#
# Assumes that no two taxonomic labels are the same when they are from different
# taxonomic levels. When there are children with multiple parents at the
# same taxonomic label then these are warned about and worked around.
parents = {} #hash of taxon to immediate parent
known_duplicates = sets.Set([])
for j, array in enumerate(first_pass_id_and_taxonomies):
taxonomy = array[1:]
for i, tax in enumerate(taxonomy):
if i==0: continue #top levels don't have parents
ancestry = taxonomy[i-1]
if parents.has_key(tax):
if parents[tax] != ancestry:
dup = "%s%s" %(parents[tax], ancestry)
# don't report the same problem several times
if dup not in known_duplicates:
print " %s '%s' with multiple parents %s and %s" % (array[0], tax, parents[tax], ancestry)
known_duplicates.add(dup)
# fix the current one
new_name_id = 1
new_name = "%se%s" % (tax, new_name_id)
while parents.has_key(new_name) and parents[new_name] != ancestry:
new_name_id += 1
new_name = "%se%s" % (tax, new_name_id)
first_pass_id_and_taxonomies[j][i+1] = new_name
taxonomy[i] = new_name
parents[new_name] = ancestry
else:
# normal case, seeing a new taxon and parent for the first time
parents[tax] = ancestry
# Write the sequence file
with open(output_seqinfo_file, 'w') as seqout:
# write header
seqout.write('seqname,tax_id\n')
# write each taxonomic association
for array in first_pass_id_and_taxonomies:
sequence_id = array[0]
if len(array)==1:
most_specific_taxonomic_affiliation = 'Root'
else:
most_specific_taxonomic_affiliation = array[-1]
seqout.write("%s,%s\n" % (array[0],
most_specific_taxonomic_affiliation))
# Write the taxonomy file
noted_taxonomies = sets.Set([])
taxonomic_level_names = ["rank_%i" % rank for rank in range(max_number_of_ranks)]
with open(output_taxonomy_file, 'w') as seqout:
# write header and root line
seqout.write(','.join(['tax_id','parent_id','rank','tax_name','root'] + taxonomic_level_names) +'\n')
seqout.write(','.join(['Root','Root','root','Root','Root'])+ ''.join([',']*max_number_of_ranks) + '\n')
# write all the taxonomies
for array in first_pass_id_and_taxonomies:
taxons = array[1:]
for i, tax in enumerate(taxons):
line = self._taxonomy_line(i, taxons[:(i+1)], taxonomic_level_names)
if line not in noted_taxonomies:
seqout.write(line+"\n")
noted_taxonomies.add(line) |
def finalize_sv(orig_vcf, data, items):
"""Finalize structural variants, adding effects and splitting if needed.
"""
paired = vcfutils.get_paired(items)
# For paired/somatic, attach combined calls to tumor sample
if paired:
sample_vcf = orig_vcf if paired.tumor_name == dd.get_sample_name(data) else None
else:
sample_vcf = "%s-%s.vcf.gz" % (utils.splitext_plus(orig_vcf)[0], dd.get_sample_name(data))
sample_vcf = vcfutils.select_sample(orig_vcf, dd.get_sample_name(data), sample_vcf, data["config"])
if sample_vcf:
effects_vcf, _ = effects.add_to_vcf(sample_vcf, data, "snpeff")
else:
effects_vcf = None
return effects_vcf or sample_vcf | Finalize structural variants, adding effects and splitting if needed. | Below is the the instruction that describes the task:
### Input:
Finalize structural variants, adding effects and splitting if needed.
### Response:
def finalize_sv(orig_vcf, data, items):
"""Finalize structural variants, adding effects and splitting if needed.
"""
paired = vcfutils.get_paired(items)
# For paired/somatic, attach combined calls to tumor sample
if paired:
sample_vcf = orig_vcf if paired.tumor_name == dd.get_sample_name(data) else None
else:
sample_vcf = "%s-%s.vcf.gz" % (utils.splitext_plus(orig_vcf)[0], dd.get_sample_name(data))
sample_vcf = vcfutils.select_sample(orig_vcf, dd.get_sample_name(data), sample_vcf, data["config"])
if sample_vcf:
effects_vcf, _ = effects.add_to_vcf(sample_vcf, data, "snpeff")
else:
effects_vcf = None
return effects_vcf or sample_vcf |
def update_item(self, jid, name = NO_CHANGE, groups = NO_CHANGE,
callback = None, error_callback = None):
"""Modify a contact in the roster.
:Parameters:
- `jid`: contact's jid
- `name`: a new name for the contact
- `groups`: a sequence of group names the contact should belong to
- `callback`: function to call when the request succeeds. It should
accept a single argument - a `RosterItem` describing the
requested change
- `error_callback`: function to call when the request fails. It
should accept a single argument - an error stanza received
(`None` in case of timeout)
:Types:
- `jid`: `JID`
- `name`: `unicode`
- `groups`: sequence of `unicode`
"""
# pylint: disable=R0913
item = self.roster[jid]
if name is NO_CHANGE and groups is NO_CHANGE:
return
if name is NO_CHANGE:
name = item.name
if groups is NO_CHANGE:
groups = item.groups
item = RosterItem(jid, name, groups)
self._roster_set(item, callback, error_callback) | Modify a contact in the roster.
:Parameters:
- `jid`: contact's jid
- `name`: a new name for the contact
- `groups`: a sequence of group names the contact should belong to
- `callback`: function to call when the request succeeds. It should
accept a single argument - a `RosterItem` describing the
requested change
- `error_callback`: function to call when the request fails. It
should accept a single argument - an error stanza received
(`None` in case of timeout)
:Types:
- `jid`: `JID`
- `name`: `unicode`
- `groups`: sequence of `unicode` | Below is the the instruction that describes the task:
### Input:
Modify a contact in the roster.
:Parameters:
- `jid`: contact's jid
- `name`: a new name for the contact
- `groups`: a sequence of group names the contact should belong to
- `callback`: function to call when the request succeeds. It should
accept a single argument - a `RosterItem` describing the
requested change
- `error_callback`: function to call when the request fails. It
should accept a single argument - an error stanza received
(`None` in case of timeout)
:Types:
- `jid`: `JID`
- `name`: `unicode`
- `groups`: sequence of `unicode`
### Response:
def update_item(self, jid, name = NO_CHANGE, groups = NO_CHANGE,
callback = None, error_callback = None):
"""Modify a contact in the roster.
:Parameters:
- `jid`: contact's jid
- `name`: a new name for the contact
- `groups`: a sequence of group names the contact should belong to
- `callback`: function to call when the request succeeds. It should
accept a single argument - a `RosterItem` describing the
requested change
- `error_callback`: function to call when the request fails. It
should accept a single argument - an error stanza received
(`None` in case of timeout)
:Types:
- `jid`: `JID`
- `name`: `unicode`
- `groups`: sequence of `unicode`
"""
# pylint: disable=R0913
item = self.roster[jid]
if name is NO_CHANGE and groups is NO_CHANGE:
return
if name is NO_CHANGE:
name = item.name
if groups is NO_CHANGE:
groups = item.groups
item = RosterItem(jid, name, groups)
self._roster_set(item, callback, error_callback) |
def has_blocking_background_send(self):
"""Check whether any blocking background commands are waiting to run.
If any are, return True. If none are, return False.
"""
for background_object in self.background_objects:
# If it's running, or not started yet, it should block other tasks.
if background_object.block_other_commands and background_object.run_state in ('S','N'):
self.shutit_obj.log('All objects are: ' + str(self),level=logging.DEBUG)
self.shutit_obj.log('The current blocking send object is: ' + str(background_object),level=logging.DEBUG)
return True
elif background_object.block_other_commands and background_object.run_state in ('F','C','T'):
assert False, shutit_util.print_debug(msg='Blocking command should have been removed, in run_state: ' + background_object.run_state)
else:
assert background_object.block_other_commands is False, shutit_util.print_debug()
return False | Check whether any blocking background commands are waiting to run.
If any are, return True. If none are, return False. | Below is the the instruction that describes the task:
### Input:
Check whether any blocking background commands are waiting to run.
If any are, return True. If none are, return False.
### Response:
def has_blocking_background_send(self):
"""Check whether any blocking background commands are waiting to run.
If any are, return True. If none are, return False.
"""
for background_object in self.background_objects:
# If it's running, or not started yet, it should block other tasks.
if background_object.block_other_commands and background_object.run_state in ('S','N'):
self.shutit_obj.log('All objects are: ' + str(self),level=logging.DEBUG)
self.shutit_obj.log('The current blocking send object is: ' + str(background_object),level=logging.DEBUG)
return True
elif background_object.block_other_commands and background_object.run_state in ('F','C','T'):
assert False, shutit_util.print_debug(msg='Blocking command should have been removed, in run_state: ' + background_object.run_state)
else:
assert background_object.block_other_commands is False, shutit_util.print_debug()
return False |
def analysis_output_to_dict_entries(
self,
inputfile: AnalysisOutput,
previous_inputfile: Optional[AnalysisOutput],
previous_issue_handles: Optional[AnalysisOutput],
linemapfile: Optional[str],
) -> DictEntries:
"""Here we take input generators and return a dict with issues,
preconditions, and postconditions separated. If there is only a single
generator file, it's simple. If we also pass in a generator from a
previous inputfile then there are a couple extra steps:
1. If an issue was seen in the previous inputfile then we won't return
it, because it's not new.
2. In addition, we take an optional linemap file that maps for each
filename, each new file line position to a list of old file line
position. This is used to adjust handles to we can recognize when issues
moved.
"""
issues = []
previous_handles: Set[str] = set()
# pyre-fixme[9]: conditions has type `Dict[ParseType, Dict[str, List[Dict[str...
conditions: Dict[ParseType, Dict[str, List[Dict[str, Any]]]] = {
ParseType.PRECONDITION: defaultdict(list),
ParseType.POSTCONDITION: defaultdict(list),
}
# If we have a mapfile, create the map.
if linemapfile:
log.info("Parsing linemap file")
with open(linemapfile, "r") as f:
linemap = json.load(f)
else:
linemap = None
# Save entry info from the parent analysis, if there is one.
# If previous issue handles file is provided, use it over
# previous_inputfile (contains the full JSON)
if previous_issue_handles:
log.info("Parsing previous issue handles")
for f in previous_issue_handles.file_handles():
handles = f.read().splitlines()
previous_handles = {handle for handle in handles}
elif previous_inputfile:
log.info("Parsing previous hh_server output")
for typ, master_key, e in self._analysis_output_to_parsed_types(
previous_inputfile
):
if typ == ParseType.ISSUE:
diff_handle = BaseParser.compute_diff_handle(
e["filename"], e["line"], e["code"]
)
previous_handles.add(diff_handle)
# Use exact handle match too in case linemap is missing.
previous_handles.add(master_key)
log.info("Parsing hh_server output")
for typ, key, e in self._analysis_output_to_parsed_types(inputfile):
if typ == ParseType.ISSUE:
# We are only interested in issues that weren't in the previous
# analysis.
if not self._is_existing_issue(linemap, previous_handles, e, key):
issues.append(e)
else:
conditions[typ][key].append(e)
return {
"issues": issues,
"preconditions": conditions[ParseType.PRECONDITION],
"postconditions": conditions[ParseType.POSTCONDITION],
} | Here we take input generators and return a dict with issues,
preconditions, and postconditions separated. If there is only a single
generator file, it's simple. If we also pass in a generator from a
previous inputfile then there are a couple extra steps:
1. If an issue was seen in the previous inputfile then we won't return
it, because it's not new.
2. In addition, we take an optional linemap file that maps for each
filename, each new file line position to a list of old file line
position. This is used to adjust handles to we can recognize when issues
moved. | Below is the the instruction that describes the task:
### Input:
Here we take input generators and return a dict with issues,
preconditions, and postconditions separated. If there is only a single
generator file, it's simple. If we also pass in a generator from a
previous inputfile then there are a couple extra steps:
1. If an issue was seen in the previous inputfile then we won't return
it, because it's not new.
2. In addition, we take an optional linemap file that maps for each
filename, each new file line position to a list of old file line
position. This is used to adjust handles to we can recognize when issues
moved.
### Response:
def analysis_output_to_dict_entries(
self,
inputfile: AnalysisOutput,
previous_inputfile: Optional[AnalysisOutput],
previous_issue_handles: Optional[AnalysisOutput],
linemapfile: Optional[str],
) -> DictEntries:
"""Here we take input generators and return a dict with issues,
preconditions, and postconditions separated. If there is only a single
generator file, it's simple. If we also pass in a generator from a
previous inputfile then there are a couple extra steps:
1. If an issue was seen in the previous inputfile then we won't return
it, because it's not new.
2. In addition, we take an optional linemap file that maps for each
filename, each new file line position to a list of old file line
position. This is used to adjust handles to we can recognize when issues
moved.
"""
issues = []
previous_handles: Set[str] = set()
# pyre-fixme[9]: conditions has type `Dict[ParseType, Dict[str, List[Dict[str...
conditions: Dict[ParseType, Dict[str, List[Dict[str, Any]]]] = {
ParseType.PRECONDITION: defaultdict(list),
ParseType.POSTCONDITION: defaultdict(list),
}
# If we have a mapfile, create the map.
if linemapfile:
log.info("Parsing linemap file")
with open(linemapfile, "r") as f:
linemap = json.load(f)
else:
linemap = None
# Save entry info from the parent analysis, if there is one.
# If previous issue handles file is provided, use it over
# previous_inputfile (contains the full JSON)
if previous_issue_handles:
log.info("Parsing previous issue handles")
for f in previous_issue_handles.file_handles():
handles = f.read().splitlines()
previous_handles = {handle for handle in handles}
elif previous_inputfile:
log.info("Parsing previous hh_server output")
for typ, master_key, e in self._analysis_output_to_parsed_types(
previous_inputfile
):
if typ == ParseType.ISSUE:
diff_handle = BaseParser.compute_diff_handle(
e["filename"], e["line"], e["code"]
)
previous_handles.add(diff_handle)
# Use exact handle match too in case linemap is missing.
previous_handles.add(master_key)
log.info("Parsing hh_server output")
for typ, key, e in self._analysis_output_to_parsed_types(inputfile):
if typ == ParseType.ISSUE:
# We are only interested in issues that weren't in the previous
# analysis.
if not self._is_existing_issue(linemap, previous_handles, e, key):
issues.append(e)
else:
conditions[typ][key].append(e)
return {
"issues": issues,
"preconditions": conditions[ParseType.PRECONDITION],
"postconditions": conditions[ParseType.POSTCONDITION],
} |
def _replace_str_html(s):
"""替换html‘"’等转义内容为正常内容
Args:
s: 文字内容
Returns:
s: 处理反转义后的文字
"""
html_str_list = [
(''', '\''),
('"', '"'),
('&', '&'),
('¥', '¥'),
('amp;', ''),
('<', '<'),
('>', '>'),
(' ', ' '),
('\\', '')
]
for i in html_str_list:
s = s.replace(i[0], i[1])
return s | 替换html‘"’等转义内容为正常内容
Args:
s: 文字内容
Returns:
s: 处理反转义后的文字 | Below is the the instruction that describes the task:
### Input:
替换html‘"’等转义内容为正常内容
Args:
s: 文字内容
Returns:
s: 处理反转义后的文字
### Response:
def _replace_str_html(s):
"""替换html‘"’等转义内容为正常内容
Args:
s: 文字内容
Returns:
s: 处理反转义后的文字
"""
html_str_list = [
(''', '\''),
('"', '"'),
('&', '&'),
('¥', '¥'),
('amp;', ''),
('<', '<'),
('>', '>'),
(' ', ' '),
('\\', '')
]
for i in html_str_list:
s = s.replace(i[0], i[1])
return s |
def service_url_parse(url):
"""
Function that parses from url the service and folder of services.
"""
endpoint = get_sanitized_endpoint(url)
url_split_list = url.split(endpoint + '/')
if len(url_split_list) != 0:
url_split_list = url_split_list[1].split('/')
else:
raise Exception('Wrong url parsed')
# Remove unnecessary items from list of the split url.
parsed_url = [s for s in url_split_list if '?' not in s if 'Server' not in s]
return parsed_url | Function that parses from url the service and folder of services. | Below is the the instruction that describes the task:
### Input:
Function that parses from url the service and folder of services.
### Response:
def service_url_parse(url):
"""
Function that parses from url the service and folder of services.
"""
endpoint = get_sanitized_endpoint(url)
url_split_list = url.split(endpoint + '/')
if len(url_split_list) != 0:
url_split_list = url_split_list[1].split('/')
else:
raise Exception('Wrong url parsed')
# Remove unnecessary items from list of the split url.
parsed_url = [s for s in url_split_list if '?' not in s if 'Server' not in s]
return parsed_url |
def _get_proto():
'''
Checks configuration to see whether the user has SSL turned on. Default is:
.. code-block:: yaml
use_ssl: True
'''
use_ssl = config.get_cloud_config_value(
'use_ssl',
get_configured_provider(),
__opts__,
search_global=False,
default=True
)
if use_ssl is True:
return 'https'
return 'http' | Checks configuration to see whether the user has SSL turned on. Default is:
.. code-block:: yaml
use_ssl: True | Below is the the instruction that describes the task:
### Input:
Checks configuration to see whether the user has SSL turned on. Default is:
.. code-block:: yaml
use_ssl: True
### Response:
def _get_proto():
'''
Checks configuration to see whether the user has SSL turned on. Default is:
.. code-block:: yaml
use_ssl: True
'''
use_ssl = config.get_cloud_config_value(
'use_ssl',
get_configured_provider(),
__opts__,
search_global=False,
default=True
)
if use_ssl is True:
return 'https'
return 'http' |
def replace_certificate_signing_request_approval(self, name, body, **kwargs):
"""
replace approval of the specified CertificateSigningRequest
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_certificate_signing_request_approval(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param V1beta1CertificateSigningRequest body: (required)
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_certificate_signing_request_approval_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_certificate_signing_request_approval_with_http_info(name, body, **kwargs)
return data | replace approval of the specified CertificateSigningRequest
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_certificate_signing_request_approval(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param V1beta1CertificateSigningRequest body: (required)
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
replace approval of the specified CertificateSigningRequest
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_certificate_signing_request_approval(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param V1beta1CertificateSigningRequest body: (required)
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_certificate_signing_request_approval(self, name, body, **kwargs):
"""
replace approval of the specified CertificateSigningRequest
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_certificate_signing_request_approval(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param V1beta1CertificateSigningRequest body: (required)
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_certificate_signing_request_approval_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_certificate_signing_request_approval_with_http_info(name, body, **kwargs)
return data |
def alpha(self, x, y, kwargs, k=None):
"""
deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec
"""
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
bool_list = self._bool_list(k)
x_, y_, kwargs_copy = self._update_foreground(x, y, kwargs)
f_x, f_y = np.zeros_like(x_), np.zeros_like(x_)
for i, func in enumerate(self.func_list):
if bool_list[i] is True:
if self._model_list[i] == 'SHEAR':
f_x_i, f_y_i = func.derivatives(x, y, **kwargs[i])
else:
f_x_i, f_y_i = func.derivatives(x_, y_, **kwargs_copy[i])
f_x += f_x_i
f_y += f_y_i
return f_x, f_y | deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec | Below is the the instruction that describes the task:
### Input:
deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec
### Response:
def alpha(self, x, y, kwargs, k=None):
"""
deflection angles
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param k: only evaluate the k-th lens model
:return: deflection angles in units of arcsec
"""
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
bool_list = self._bool_list(k)
x_, y_, kwargs_copy = self._update_foreground(x, y, kwargs)
f_x, f_y = np.zeros_like(x_), np.zeros_like(x_)
for i, func in enumerate(self.func_list):
if bool_list[i] is True:
if self._model_list[i] == 'SHEAR':
f_x_i, f_y_i = func.derivatives(x, y, **kwargs[i])
else:
f_x_i, f_y_i = func.derivatives(x_, y_, **kwargs_copy[i])
f_x += f_x_i
f_y += f_y_i
return f_x, f_y |
def dump_relation(api, rel_cfg, pid, data):
"""Dump a specific relation to a data dict."""
schema_class = rel_cfg.schema
if schema_class is not None:
schema = schema_class()
schema.context['pid'] = pid
result, errors = schema.dump(api)
data.setdefault(rel_cfg.name, []).append(result) | Dump a specific relation to a data dict. | Below is the the instruction that describes the task:
### Input:
Dump a specific relation to a data dict.
### Response:
def dump_relation(api, rel_cfg, pid, data):
"""Dump a specific relation to a data dict."""
schema_class = rel_cfg.schema
if schema_class is not None:
schema = schema_class()
schema.context['pid'] = pid
result, errors = schema.dump(api)
data.setdefault(rel_cfg.name, []).append(result) |
def encode_character(char):
"""Returns URL encoding for a single character
:param char (str) Single character to encode
:returns (str) URL-encoded character
"""
if char == '!': return '%21'
elif char == '"': return '%22'
elif char == '#': return '%23'
elif char == '$': return '%24'
elif char == '%': return '%25'
elif char == '&': return '%26'
elif char == '\'': return '%27'
elif char == '(': return '%28'
elif char == ')': return '%29'
elif char == '*': return '%2A'
elif char == '+': return '%2B'
elif char == ',': return '%2C'
elif char == '-': return '%2D'
elif char == '.': return '%2E'
elif char == '/': return '%2F'
elif char == ':': return '%3A'
elif char == ';': return '%3B'
elif char == '<': return '%3C'
elif char == '=': return '%3D'
elif char == '>': return '%3E'
elif char == '?': return '%3F'
elif char == '@': return '%40'
elif char == '[': return '%5B'
elif char == '\\': return '%5C'
elif char == ']': return '%5D'
elif char == '^': return '%5E'
elif char == '_': return '%5F'
elif char == '`': return '%60'
elif char == '{': return '%7B'
elif char == '|': return '%7C'
elif char == '}': return '%7D'
elif char == '~': return '%7E'
elif char == ' ': return '%7F'
else: return char | Returns URL encoding for a single character
:param char (str) Single character to encode
:returns (str) URL-encoded character | Below is the the instruction that describes the task:
### Input:
Returns URL encoding for a single character
:param char (str) Single character to encode
:returns (str) URL-encoded character
### Response:
def encode_character(char):
"""Returns URL encoding for a single character
:param char (str) Single character to encode
:returns (str) URL-encoded character
"""
if char == '!': return '%21'
elif char == '"': return '%22'
elif char == '#': return '%23'
elif char == '$': return '%24'
elif char == '%': return '%25'
elif char == '&': return '%26'
elif char == '\'': return '%27'
elif char == '(': return '%28'
elif char == ')': return '%29'
elif char == '*': return '%2A'
elif char == '+': return '%2B'
elif char == ',': return '%2C'
elif char == '-': return '%2D'
elif char == '.': return '%2E'
elif char == '/': return '%2F'
elif char == ':': return '%3A'
elif char == ';': return '%3B'
elif char == '<': return '%3C'
elif char == '=': return '%3D'
elif char == '>': return '%3E'
elif char == '?': return '%3F'
elif char == '@': return '%40'
elif char == '[': return '%5B'
elif char == '\\': return '%5C'
elif char == ']': return '%5D'
elif char == '^': return '%5E'
elif char == '_': return '%5F'
elif char == '`': return '%60'
elif char == '{': return '%7B'
elif char == '|': return '%7C'
elif char == '}': return '%7D'
elif char == '~': return '%7E'
elif char == ' ': return '%7F'
else: return char |
def deserialize_bytearray(attr):
"""Deserialize string into bytearray.
:param str attr: response string to be deserialized.
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
if isinstance(attr, ET.Element):
attr = attr.text
return bytearray(b64decode(attr)) | Deserialize string into bytearray.
:param str attr: response string to be deserialized.
:rtype: bytearray
:raises: TypeError if string format invalid. | Below is the the instruction that describes the task:
### Input:
Deserialize string into bytearray.
:param str attr: response string to be deserialized.
:rtype: bytearray
:raises: TypeError if string format invalid.
### Response:
def deserialize_bytearray(attr):
"""Deserialize string into bytearray.
:param str attr: response string to be deserialized.
:rtype: bytearray
:raises: TypeError if string format invalid.
"""
if isinstance(attr, ET.Element):
attr = attr.text
return bytearray(b64decode(attr)) |
def _create_gates(self, inputs, memory):
"""Create input and forget gates for this step using `inputs` and `memory`.
Args:
inputs: Tensor input.
memory: The current state of memory.
Returns:
input_gate: A LSTM-like insert gate.
forget_gate: A LSTM-like forget gate.
"""
# We'll create the input and forget gates at once. Hence, calculate double
# the gate size.
num_gates = 2 * self._calculate_gate_size()
memory = tf.tanh(memory)
inputs = basic.BatchFlatten()(inputs)
gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs)
gate_inputs = tf.expand_dims(gate_inputs, axis=1)
gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory)
gates = tf.split(gate_memory + gate_inputs, num_or_size_splits=2, axis=2)
input_gate, forget_gate = gates
input_gate = tf.sigmoid(input_gate + self._input_bias)
forget_gate = tf.sigmoid(forget_gate + self._forget_bias)
return input_gate, forget_gate | Create input and forget gates for this step using `inputs` and `memory`.
Args:
inputs: Tensor input.
memory: The current state of memory.
Returns:
input_gate: A LSTM-like insert gate.
forget_gate: A LSTM-like forget gate. | Below is the the instruction that describes the task:
### Input:
Create input and forget gates for this step using `inputs` and `memory`.
Args:
inputs: Tensor input.
memory: The current state of memory.
Returns:
input_gate: A LSTM-like insert gate.
forget_gate: A LSTM-like forget gate.
### Response:
def _create_gates(self, inputs, memory):
"""Create input and forget gates for this step using `inputs` and `memory`.
Args:
inputs: Tensor input.
memory: The current state of memory.
Returns:
input_gate: A LSTM-like insert gate.
forget_gate: A LSTM-like forget gate.
"""
# We'll create the input and forget gates at once. Hence, calculate double
# the gate size.
num_gates = 2 * self._calculate_gate_size()
memory = tf.tanh(memory)
inputs = basic.BatchFlatten()(inputs)
gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs)
gate_inputs = tf.expand_dims(gate_inputs, axis=1)
gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory)
gates = tf.split(gate_memory + gate_inputs, num_or_size_splits=2, axis=2)
input_gate, forget_gate = gates
input_gate = tf.sigmoid(input_gate + self._input_bias)
forget_gate = tf.sigmoid(forget_gate + self._forget_bias)
return input_gate, forget_gate |
def run(self, agent_host):
"""run the agent on the world"""
total_reward = 0
self.prev_s = None
self.prev_a = None
is_first_action = True
# main loop:
world_state = agent_host.getWorldState()
while world_state.is_mission_running:
current_r = 0
if is_first_action:
# wait until have received a valid observation
while True:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
if world_state.is_mission_running and len(world_state.observations)>0 and not world_state.observations[-1].text=="{}":
total_reward += self.act(world_state, agent_host, current_r)
break
if not world_state.is_mission_running:
break
is_first_action = False
else:
# wait for non-zero reward
while world_state.is_mission_running and current_r == 0:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
# allow time to stabilise after action
while True:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
if world_state.is_mission_running and len(world_state.observations)>0 and not world_state.observations[-1].text=="{}":
total_reward += self.act(world_state, agent_host, current_r)
break
if not world_state.is_mission_running:
break
# process final reward
self.logger.debug("Final reward: %d" % current_r)
total_reward += current_r
# update Q values
if self.prev_s is not None and self.prev_a is not None:
self.updateQTableFromTerminatingState( current_r )
self.drawQ()
return total_reward | run the agent on the world | Below is the the instruction that describes the task:
### Input:
run the agent on the world
### Response:
def run(self, agent_host):
"""run the agent on the world"""
total_reward = 0
self.prev_s = None
self.prev_a = None
is_first_action = True
# main loop:
world_state = agent_host.getWorldState()
while world_state.is_mission_running:
current_r = 0
if is_first_action:
# wait until have received a valid observation
while True:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
if world_state.is_mission_running and len(world_state.observations)>0 and not world_state.observations[-1].text=="{}":
total_reward += self.act(world_state, agent_host, current_r)
break
if not world_state.is_mission_running:
break
is_first_action = False
else:
# wait for non-zero reward
while world_state.is_mission_running and current_r == 0:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
# allow time to stabilise after action
while True:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
if world_state.is_mission_running and len(world_state.observations)>0 and not world_state.observations[-1].text=="{}":
total_reward += self.act(world_state, agent_host, current_r)
break
if not world_state.is_mission_running:
break
# process final reward
self.logger.debug("Final reward: %d" % current_r)
total_reward += current_r
# update Q values
if self.prev_s is not None and self.prev_a is not None:
self.updateQTableFromTerminatingState( current_r )
self.drawQ()
return total_reward |
def remove_terms_used_in_less_than_num_docs(self, threshold):
'''
Parameters
----------
threshold: int
Minimum number of documents term should appear in to be kept
Returns
-------
TermDocMatrix, new object with terms removed.
'''
term_counts = self._X.astype(bool).astype(int).sum(axis=0).A[0]
terms_to_remove = np.where(term_counts < threshold)[0]
return self.remove_terms_by_indices(terms_to_remove) | Parameters
----------
threshold: int
Minimum number of documents term should appear in to be kept
Returns
-------
TermDocMatrix, new object with terms removed. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
threshold: int
Minimum number of documents term should appear in to be kept
Returns
-------
TermDocMatrix, new object with terms removed.
### Response:
def remove_terms_used_in_less_than_num_docs(self, threshold):
'''
Parameters
----------
threshold: int
Minimum number of documents term should appear in to be kept
Returns
-------
TermDocMatrix, new object with terms removed.
'''
term_counts = self._X.astype(bool).astype(int).sum(axis=0).A[0]
terms_to_remove = np.where(term_counts < threshold)[0]
return self.remove_terms_by_indices(terms_to_remove) |
def resources_preparing_factory(app, wrapper):
""" Factory which wrap all resources in settings.
"""
settings = app.app.registry.settings
config = settings.get(CONFIG_RESOURCES, None)
if not config:
return
resources = [(k, [wrapper(r, GroupResource(k, v)) for r in v])
for k, v in config]
settings[CONFIG_RESOURCES] = resources | Factory which wrap all resources in settings. | Below is the the instruction that describes the task:
### Input:
Factory which wrap all resources in settings.
### Response:
def resources_preparing_factory(app, wrapper):
""" Factory which wrap all resources in settings.
"""
settings = app.app.registry.settings
config = settings.get(CONFIG_RESOURCES, None)
if not config:
return
resources = [(k, [wrapper(r, GroupResource(k, v)) for r in v])
for k, v in config]
settings[CONFIG_RESOURCES] = resources |
def nl_connect(sk, protocol):
"""Create file descriptor and bind socket.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L96
Creates a new Netlink socket using `socket.socket()` and binds the socket to the protocol and local port specified
in the `sk` socket object (if any). Fails if the socket is already connected.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
protocol -- Netlink protocol to use (integer).
Returns:
0 on success or a negative error code.
"""
flags = getattr(socket, 'SOCK_CLOEXEC', 0)
if sk.s_fd != -1:
return -NLE_BAD_SOCK
try:
sk.socket_instance = socket.socket(getattr(socket, 'AF_NETLINK', -1), socket.SOCK_RAW | flags, protocol)
except OSError as exc:
return -nl_syserr2nlerr(exc.errno)
if not sk.s_flags & NL_SOCK_BUFSIZE_SET:
err = nl_socket_set_buffer_size(sk, 0, 0)
if err < 0:
sk.socket_instance.close()
return err
try:
sk.socket_instance.bind((sk.s_local.nl_pid, sk.s_local.nl_groups))
except OSError as exc:
sk.socket_instance.close()
return -nl_syserr2nlerr(exc.errno)
sk.s_local.nl_pid = sk.socket_instance.getsockname()[0]
if sk.s_local.nl_family != socket.AF_NETLINK:
sk.socket_instance.close()
return -NLE_AF_NOSUPPORT
sk.s_proto = protocol
return 0 | Create file descriptor and bind socket.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L96
Creates a new Netlink socket using `socket.socket()` and binds the socket to the protocol and local port specified
in the `sk` socket object (if any). Fails if the socket is already connected.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
protocol -- Netlink protocol to use (integer).
Returns:
0 on success or a negative error code. | Below is the the instruction that describes the task:
### Input:
Create file descriptor and bind socket.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L96
Creates a new Netlink socket using `socket.socket()` and binds the socket to the protocol and local port specified
in the `sk` socket object (if any). Fails if the socket is already connected.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
protocol -- Netlink protocol to use (integer).
Returns:
0 on success or a negative error code.
### Response:
def nl_connect(sk, protocol):
"""Create file descriptor and bind socket.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L96
Creates a new Netlink socket using `socket.socket()` and binds the socket to the protocol and local port specified
in the `sk` socket object (if any). Fails if the socket is already connected.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
protocol -- Netlink protocol to use (integer).
Returns:
0 on success or a negative error code.
"""
flags = getattr(socket, 'SOCK_CLOEXEC', 0)
if sk.s_fd != -1:
return -NLE_BAD_SOCK
try:
sk.socket_instance = socket.socket(getattr(socket, 'AF_NETLINK', -1), socket.SOCK_RAW | flags, protocol)
except OSError as exc:
return -nl_syserr2nlerr(exc.errno)
if not sk.s_flags & NL_SOCK_BUFSIZE_SET:
err = nl_socket_set_buffer_size(sk, 0, 0)
if err < 0:
sk.socket_instance.close()
return err
try:
sk.socket_instance.bind((sk.s_local.nl_pid, sk.s_local.nl_groups))
except OSError as exc:
sk.socket_instance.close()
return -nl_syserr2nlerr(exc.errno)
sk.s_local.nl_pid = sk.socket_instance.getsockname()[0]
if sk.s_local.nl_family != socket.AF_NETLINK:
sk.socket_instance.close()
return -NLE_AF_NOSUPPORT
sk.s_proto = protocol
return 0 |
def SLIT_GAUSSIAN(x,g):
"""
Instrumental (slit) function.
B(x) = sqrt(ln(2)/pi)/γ*exp(-ln(2)*(x/γ)**2),
where γ/2 is a gaussian half-width at half-maximum.
"""
g /= 2
return sqrt(log(2))/(sqrt(pi)*g)*exp(-log(2)*(x/g)**2) | Instrumental (slit) function.
B(x) = sqrt(ln(2)/pi)/γ*exp(-ln(2)*(x/γ)**2),
where γ/2 is a gaussian half-width at half-maximum. | Below is the the instruction that describes the task:
### Input:
Instrumental (slit) function.
B(x) = sqrt(ln(2)/pi)/γ*exp(-ln(2)*(x/γ)**2),
where γ/2 is a gaussian half-width at half-maximum.
### Response:
def SLIT_GAUSSIAN(x,g):
"""
Instrumental (slit) function.
B(x) = sqrt(ln(2)/pi)/γ*exp(-ln(2)*(x/γ)**2),
where γ/2 is a gaussian half-width at half-maximum.
"""
g /= 2
return sqrt(log(2))/(sqrt(pi)*g)*exp(-log(2)*(x/g)**2) |
def OnSafeModeEntry(self, event):
"""Safe mode entry event handler"""
# Enable menu item for leaving safe mode
self.main_window.main_menu.enable_file_approve(True)
self.main_window.grid.Refresh()
event.Skip() | Safe mode entry event handler | Below is the the instruction that describes the task:
### Input:
Safe mode entry event handler
### Response:
def OnSafeModeEntry(self, event):
"""Safe mode entry event handler"""
# Enable menu item for leaving safe mode
self.main_window.main_menu.enable_file_approve(True)
self.main_window.grid.Refresh()
event.Skip() |
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
json = {}
json["hash"] = self.Hash.To0xString()
json["size"] = self.Size()
json["version"] = self.Version
json["previousblockhash"] = self.PrevHash.To0xString()
json["merkleroot"] = self.MerkleRoot.To0xString()
json["time"] = self.Timestamp
json["index"] = self.Index
nonce = bytearray(self.ConsensusData.to_bytes(8, 'little'))
nonce.reverse()
json["nonce"] = nonce.hex()
json['nextconsensus'] = Crypto.ToAddress(self.NextConsensus)
# json["consensus data"] = self.ConsensusData
json["script"] = '' if not self.Script else self.Script.ToJson()
return json | Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict: | Below is the the instruction that describes the task:
### Input:
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
### Response:
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
json = {}
json["hash"] = self.Hash.To0xString()
json["size"] = self.Size()
json["version"] = self.Version
json["previousblockhash"] = self.PrevHash.To0xString()
json["merkleroot"] = self.MerkleRoot.To0xString()
json["time"] = self.Timestamp
json["index"] = self.Index
nonce = bytearray(self.ConsensusData.to_bytes(8, 'little'))
nonce.reverse()
json["nonce"] = nonce.hex()
json['nextconsensus'] = Crypto.ToAddress(self.NextConsensus)
# json["consensus data"] = self.ConsensusData
json["script"] = '' if not self.Script else self.Script.ToJson()
return json |
def return_resource_name(self, record, resource_type):
""" Removes the trailing AWS domain from a DNS record
to return the resource name
e.g bucketname.s3.amazonaws.com will return bucketname
Args:
record (str): DNS record
resource_type: AWS Resource type (i.e. S3 Bucket, Elastic Beanstalk, etc..)
"""
try:
if resource_type == 's3':
regex = re.compile('.*(\.(?:s3-|s3){1}(?:.*)?\.amazonaws\.com)')
bucket_name = record.replace(regex.match(record).group(1), '')
return bucket_name
except Exception as e:
self.log.error('Unable to parse DNS record {} for resource type {}/{}'.format(record, resource_type, e))
return record | Removes the trailing AWS domain from a DNS record
to return the resource name
e.g bucketname.s3.amazonaws.com will return bucketname
Args:
record (str): DNS record
resource_type: AWS Resource type (i.e. S3 Bucket, Elastic Beanstalk, etc..) | Below is the the instruction that describes the task:
### Input:
Removes the trailing AWS domain from a DNS record
to return the resource name
e.g bucketname.s3.amazonaws.com will return bucketname
Args:
record (str): DNS record
resource_type: AWS Resource type (i.e. S3 Bucket, Elastic Beanstalk, etc..)
### Response:
def return_resource_name(self, record, resource_type):
""" Removes the trailing AWS domain from a DNS record
to return the resource name
e.g bucketname.s3.amazonaws.com will return bucketname
Args:
record (str): DNS record
resource_type: AWS Resource type (i.e. S3 Bucket, Elastic Beanstalk, etc..)
"""
try:
if resource_type == 's3':
regex = re.compile('.*(\.(?:s3-|s3){1}(?:.*)?\.amazonaws\.com)')
bucket_name = record.replace(regex.match(record).group(1), '')
return bucket_name
except Exception as e:
self.log.error('Unable to parse DNS record {} for resource type {}/{}'.format(record, resource_type, e))
return record |
def _waitfor(self, mode, mask, may_block, timeout=2):
"""
Wait for the YubiKey to either turn ON or OFF certain bits in the status byte.
mode is either 'and' or 'nand'
timeout is a number of seconds (precision about ~0.5 seconds)
"""
finished = False
sleep = 0.01
# After six sleeps, we've slept 0.64 seconds.
wait_num = (timeout * 2) - 1 + 6
resp_timeout = False # YubiKey hasn't indicated RESP_TIMEOUT (yet)
while not finished:
time.sleep(sleep)
this = self._read()
flags = yubico_util.ord_byte(this[7])
if flags & yubikey_defs.RESP_TIMEOUT_WAIT_FLAG:
if not resp_timeout:
resp_timeout = True
seconds_left = flags & yubikey_defs.RESP_TIMEOUT_WAIT_MASK
self._debug("Device indicates RESP_TIMEOUT (%i seconds left)\n" \
% (seconds_left))
if may_block:
# calculate new wait_num - never more than 20 seconds
seconds_left = min(20, seconds_left)
wait_num = (seconds_left * 2) - 1 + 6
if mode is 'nand':
if not flags & mask == mask:
finished = True
else:
self._debug("Status %s (0x%x) has not cleared bits %s (0x%x)\n"
% (bin(flags), flags, bin(mask), mask))
elif mode is 'and':
if flags & mask == mask:
finished = True
else:
self._debug("Status %s (0x%x) has not set bits %s (0x%x)\n"
% (bin(flags), flags, bin(mask), mask))
else:
assert()
if not finished:
wait_num -= 1
if wait_num == 0:
if mode is 'nand':
reason = 'Timed out waiting for YubiKey to clear status 0x%x' % mask
else:
reason = 'Timed out waiting for YubiKey to set status 0x%x' % mask
raise yubikey_base.YubiKeyTimeout(reason)
sleep = min(sleep + sleep, 0.5)
else:
return this | Wait for the YubiKey to either turn ON or OFF certain bits in the status byte.
mode is either 'and' or 'nand'
timeout is a number of seconds (precision about ~0.5 seconds) | Below is the the instruction that describes the task:
### Input:
Wait for the YubiKey to either turn ON or OFF certain bits in the status byte.
mode is either 'and' or 'nand'
timeout is a number of seconds (precision about ~0.5 seconds)
### Response:
def _waitfor(self, mode, mask, may_block, timeout=2):
"""
Wait for the YubiKey to either turn ON or OFF certain bits in the status byte.
mode is either 'and' or 'nand'
timeout is a number of seconds (precision about ~0.5 seconds)
"""
finished = False
sleep = 0.01
# After six sleeps, we've slept 0.64 seconds.
wait_num = (timeout * 2) - 1 + 6
resp_timeout = False # YubiKey hasn't indicated RESP_TIMEOUT (yet)
while not finished:
time.sleep(sleep)
this = self._read()
flags = yubico_util.ord_byte(this[7])
if flags & yubikey_defs.RESP_TIMEOUT_WAIT_FLAG:
if not resp_timeout:
resp_timeout = True
seconds_left = flags & yubikey_defs.RESP_TIMEOUT_WAIT_MASK
self._debug("Device indicates RESP_TIMEOUT (%i seconds left)\n" \
% (seconds_left))
if may_block:
# calculate new wait_num - never more than 20 seconds
seconds_left = min(20, seconds_left)
wait_num = (seconds_left * 2) - 1 + 6
if mode is 'nand':
if not flags & mask == mask:
finished = True
else:
self._debug("Status %s (0x%x) has not cleared bits %s (0x%x)\n"
% (bin(flags), flags, bin(mask), mask))
elif mode is 'and':
if flags & mask == mask:
finished = True
else:
self._debug("Status %s (0x%x) has not set bits %s (0x%x)\n"
% (bin(flags), flags, bin(mask), mask))
else:
assert()
if not finished:
wait_num -= 1
if wait_num == 0:
if mode is 'nand':
reason = 'Timed out waiting for YubiKey to clear status 0x%x' % mask
else:
reason = 'Timed out waiting for YubiKey to set status 0x%x' % mask
raise yubikey_base.YubiKeyTimeout(reason)
sleep = min(sleep + sleep, 0.5)
else:
return this |
def make_federation_entity(config, eid='', httpcli=None, verify_ssl=True):
"""
Construct a :py:class:`fedoidcmsg.entity.FederationEntity` instance based
on given configuration.
:param config: Federation entity configuration
:param eid: Entity ID
:param httpcli: A http client instance to use when sending HTTP requests
:param verify_ssl: Whether TLS/SSL certificates should be verified
:return: A :py:class:`fedoidcmsg.entity.FederationEntity` instance
"""
args = {}
if not eid:
try:
eid = config['entity_id']
except KeyError:
pass
if 'self_signer' in config:
self_signer = make_internal_signing_service(config['self_signer'],
eid)
args['self_signer'] = self_signer
try:
bundle_cnf = config['fo_bundle']
except KeyError:
pass
else:
_args = dict([(k, v) for k, v in bundle_cnf.items() if k in KJ_SPECS])
if _args:
_kj = init_key_jar(**_args)
else:
_kj = None
if 'dir' in bundle_cnf:
jb = FSJWKSBundle(eid, _kj, bundle_cnf['dir'],
key_conv={'to': quote_plus, 'from': unquote_plus})
else:
jb = JWKSBundle(eid, _kj)
args['fo_bundle'] = jb
for item in ['context', 'entity_id', 'fo_priority', 'mds_owner']:
try:
args[item] = config[item]
except KeyError:
pass
if 'entity_id' not in args:
args['entity_id'] = eid
# These are mutually exclusive
if 'sms_dir' in config:
args['sms_dir'] = config['sms_dir']
return FederationEntityOOB(httpcli, iss=eid, **args)
elif 'mds_service' in config:
args['verify_ssl'] = verify_ssl
args['mds_service'] = config['mds_service']
return FederationEntityAMS(httpcli, iss=eid, **args)
elif 'mdss_endpoint' in config:
args['verify_ssl'] = verify_ssl
# These are mandatory for this type of entity
for key in ['mdss_endpoint', 'mdss_owner', 'mdss_keys']:
args[key] = config[key]
return FederationEntitySwamid(httpcli, iss=eid, **args) | Construct a :py:class:`fedoidcmsg.entity.FederationEntity` instance based
on given configuration.
:param config: Federation entity configuration
:param eid: Entity ID
:param httpcli: A http client instance to use when sending HTTP requests
:param verify_ssl: Whether TLS/SSL certificates should be verified
:return: A :py:class:`fedoidcmsg.entity.FederationEntity` instance | Below is the the instruction that describes the task:
### Input:
Construct a :py:class:`fedoidcmsg.entity.FederationEntity` instance based
on given configuration.
:param config: Federation entity configuration
:param eid: Entity ID
:param httpcli: A http client instance to use when sending HTTP requests
:param verify_ssl: Whether TLS/SSL certificates should be verified
:return: A :py:class:`fedoidcmsg.entity.FederationEntity` instance
### Response:
def make_federation_entity(config, eid='', httpcli=None, verify_ssl=True):
"""
Construct a :py:class:`fedoidcmsg.entity.FederationEntity` instance based
on given configuration.
:param config: Federation entity configuration
:param eid: Entity ID
:param httpcli: A http client instance to use when sending HTTP requests
:param verify_ssl: Whether TLS/SSL certificates should be verified
:return: A :py:class:`fedoidcmsg.entity.FederationEntity` instance
"""
args = {}
if not eid:
try:
eid = config['entity_id']
except KeyError:
pass
if 'self_signer' in config:
self_signer = make_internal_signing_service(config['self_signer'],
eid)
args['self_signer'] = self_signer
try:
bundle_cnf = config['fo_bundle']
except KeyError:
pass
else:
_args = dict([(k, v) for k, v in bundle_cnf.items() if k in KJ_SPECS])
if _args:
_kj = init_key_jar(**_args)
else:
_kj = None
if 'dir' in bundle_cnf:
jb = FSJWKSBundle(eid, _kj, bundle_cnf['dir'],
key_conv={'to': quote_plus, 'from': unquote_plus})
else:
jb = JWKSBundle(eid, _kj)
args['fo_bundle'] = jb
for item in ['context', 'entity_id', 'fo_priority', 'mds_owner']:
try:
args[item] = config[item]
except KeyError:
pass
if 'entity_id' not in args:
args['entity_id'] = eid
# These are mutually exclusive
if 'sms_dir' in config:
args['sms_dir'] = config['sms_dir']
return FederationEntityOOB(httpcli, iss=eid, **args)
elif 'mds_service' in config:
args['verify_ssl'] = verify_ssl
args['mds_service'] = config['mds_service']
return FederationEntityAMS(httpcli, iss=eid, **args)
elif 'mdss_endpoint' in config:
args['verify_ssl'] = verify_ssl
# These are mandatory for this type of entity
for key in ['mdss_endpoint', 'mdss_owner', 'mdss_keys']:
args[key] = config[key]
return FederationEntitySwamid(httpcli, iss=eid, **args) |
def stChromaFeaturesInit(nfft, fs):
"""
This function initializes the chroma matrices used in the calculation of the chroma features
"""
freqs = numpy.array([((f + 1) * fs) / (2 * nfft) for f in range(nfft)])
Cp = 27.50
nChroma = numpy.round(12.0 * numpy.log2(freqs / Cp)).astype(int)
nFreqsPerChroma = numpy.zeros((nChroma.shape[0], ))
uChroma = numpy.unique(nChroma)
for u in uChroma:
idx = numpy.nonzero(nChroma == u)
nFreqsPerChroma[idx] = idx[0].shape
return nChroma, nFreqsPerChroma | This function initializes the chroma matrices used in the calculation of the chroma features | Below is the the instruction that describes the task:
### Input:
This function initializes the chroma matrices used in the calculation of the chroma features
### Response:
def stChromaFeaturesInit(nfft, fs):
"""
This function initializes the chroma matrices used in the calculation of the chroma features
"""
freqs = numpy.array([((f + 1) * fs) / (2 * nfft) for f in range(nfft)])
Cp = 27.50
nChroma = numpy.round(12.0 * numpy.log2(freqs / Cp)).astype(int)
nFreqsPerChroma = numpy.zeros((nChroma.shape[0], ))
uChroma = numpy.unique(nChroma)
for u in uChroma:
idx = numpy.nonzero(nChroma == u)
nFreqsPerChroma[idx] = idx[0].shape
return nChroma, nFreqsPerChroma |
def member_add(self, member_id=None, params=None):
"""add new member into existing configuration"""
member_id = member_id or str(uuid4())
if self.enable_ipv6:
common.enable_ipv6_repl(params)
if 'members' in params:
# is replica set
for member in params['members']:
if not member.get('rsParams', {}).get('arbiterOnly', False):
member.setdefault('procParams', {})['shardsvr'] = True
rs_params = params.copy()
# Turn 'rs_id' -> 'id', to be consistent with 'server_id' below.
rs_params['id'] = rs_params.pop('rs_id', None)
rs_params.update({'sslParams': self.sslParams})
rs_params['version'] = params.pop('version', self._version)
rs_params['members'] = [
self._strip_auth(params) for params in rs_params['members']]
rs_id = ReplicaSets().create(rs_params)
members = ReplicaSets().members(rs_id)
cfgs = rs_id + r"/" + ','.join([item['host'] for item in members])
result = self._add(cfgs, member_id)
if result.get('ok', 0) == 1:
self._shards[result['shardAdded']] = {'isReplicaSet': True, '_id': rs_id}
# return self._shards[result['shardAdded']].copy()
return self.member_info(member_id)
else:
# is single server
params.setdefault('procParams', {})['shardsvr'] = True
params.update({'autostart': True, 'sslParams': self.sslParams})
params = params.copy()
params['procParams'] = self._strip_auth(
params.get('procParams', {}))
params.setdefault('version', self._version)
logger.debug("servers create params: {params}".format(**locals()))
server_id = Servers().create('mongod', **params)
result = self._add(Servers().hostname(server_id), member_id)
if result.get('ok', 0) == 1:
self._shards[result['shardAdded']] = {'isServer': True, '_id': server_id}
return self.member_info(member_id) | add new member into existing configuration | Below is the the instruction that describes the task:
### Input:
add new member into existing configuration
### Response:
def member_add(self, member_id=None, params=None):
"""add new member into existing configuration"""
member_id = member_id or str(uuid4())
if self.enable_ipv6:
common.enable_ipv6_repl(params)
if 'members' in params:
# is replica set
for member in params['members']:
if not member.get('rsParams', {}).get('arbiterOnly', False):
member.setdefault('procParams', {})['shardsvr'] = True
rs_params = params.copy()
# Turn 'rs_id' -> 'id', to be consistent with 'server_id' below.
rs_params['id'] = rs_params.pop('rs_id', None)
rs_params.update({'sslParams': self.sslParams})
rs_params['version'] = params.pop('version', self._version)
rs_params['members'] = [
self._strip_auth(params) for params in rs_params['members']]
rs_id = ReplicaSets().create(rs_params)
members = ReplicaSets().members(rs_id)
cfgs = rs_id + r"/" + ','.join([item['host'] for item in members])
result = self._add(cfgs, member_id)
if result.get('ok', 0) == 1:
self._shards[result['shardAdded']] = {'isReplicaSet': True, '_id': rs_id}
# return self._shards[result['shardAdded']].copy()
return self.member_info(member_id)
else:
# is single server
params.setdefault('procParams', {})['shardsvr'] = True
params.update({'autostart': True, 'sslParams': self.sslParams})
params = params.copy()
params['procParams'] = self._strip_auth(
params.get('procParams', {}))
params.setdefault('version', self._version)
logger.debug("servers create params: {params}".format(**locals()))
server_id = Servers().create('mongod', **params)
result = self._add(Servers().hostname(server_id), member_id)
if result.get('ok', 0) == 1:
self._shards[result['shardAdded']] = {'isServer': True, '_id': server_id}
return self.member_info(member_id) |
def getbroker():
'''
return settings dictionnary
'''
if not Configuration.broker_initialized:
Configuration._initconf()
Configuration.broker_settings = Configuration.settings['broker']
Configuration.broker_initialized = True
return Configuration.broker_settings | return settings dictionnary | Below is the the instruction that describes the task:
### Input:
return settings dictionnary
### Response:
def getbroker():
'''
return settings dictionnary
'''
if not Configuration.broker_initialized:
Configuration._initconf()
Configuration.broker_settings = Configuration.settings['broker']
Configuration.broker_initialized = True
return Configuration.broker_settings |
def _validate_token(self, token, scopes_required=None):
"""The actual implementation of validate_token."""
if scopes_required is None:
scopes_required = []
scopes_required = set(scopes_required)
token_info = None
valid_token = False
has_required_scopes = False
if token:
try:
token_info = self._get_token_info(token)
except Exception as ex:
token_info = {'active': False}
logger.error('ERROR: Unable to get token info')
logger.error(str(ex))
valid_token = token_info.get('active', False)
if 'aud' in token_info and \
current_app.config['OIDC_RESOURCE_CHECK_AUD']:
valid_audience = False
aud = token_info['aud']
clid = self.client_secrets['client_id']
if isinstance(aud, list):
valid_audience = clid in aud
else:
valid_audience = clid == aud
if not valid_audience:
logger.error('Refused token because of invalid '
'audience')
valid_token = False
if valid_token:
token_scopes = token_info.get('scope', '').split(' ')
else:
token_scopes = []
has_required_scopes = scopes_required.issubset(
set(token_scopes))
if not has_required_scopes:
logger.debug('Token missed required scopes')
if (valid_token and has_required_scopes):
g.oidc_token_info = token_info
return True
if not valid_token:
return 'Token required but invalid'
elif not has_required_scopes:
return 'Token does not have required scopes'
else:
return 'Something went wrong checking your token' | The actual implementation of validate_token. | Below is the the instruction that describes the task:
### Input:
The actual implementation of validate_token.
### Response:
def _validate_token(self, token, scopes_required=None):
"""The actual implementation of validate_token."""
if scopes_required is None:
scopes_required = []
scopes_required = set(scopes_required)
token_info = None
valid_token = False
has_required_scopes = False
if token:
try:
token_info = self._get_token_info(token)
except Exception as ex:
token_info = {'active': False}
logger.error('ERROR: Unable to get token info')
logger.error(str(ex))
valid_token = token_info.get('active', False)
if 'aud' in token_info and \
current_app.config['OIDC_RESOURCE_CHECK_AUD']:
valid_audience = False
aud = token_info['aud']
clid = self.client_secrets['client_id']
if isinstance(aud, list):
valid_audience = clid in aud
else:
valid_audience = clid == aud
if not valid_audience:
logger.error('Refused token because of invalid '
'audience')
valid_token = False
if valid_token:
token_scopes = token_info.get('scope', '').split(' ')
else:
token_scopes = []
has_required_scopes = scopes_required.issubset(
set(token_scopes))
if not has_required_scopes:
logger.debug('Token missed required scopes')
if (valid_token and has_required_scopes):
g.oidc_token_info = token_info
return True
if not valid_token:
return 'Token required but invalid'
elif not has_required_scopes:
return 'Token does not have required scopes'
else:
return 'Something went wrong checking your token' |
def solve_with_glpsol(glp_prob):
"""Solve glpk problem with glpsol commandline solver. Mainly for testing purposes.
# Examples
# --------
# >>> problem = glp_create_prob()
# ... glp_read_lp(problem, None, "../tests/data/model.lp")
# ... solution = solve_with_glpsol(problem)
# ... print 'asdf'
# 'asdf'
# >>> print solution
# 0.839784
# Returns
# -------
# dict
# A dictionary containing the objective value (key ='objval')
# and variable primals.
"""
from swiglpk import glp_get_row_name, glp_get_col_name, glp_write_lp, glp_get_num_rows, glp_get_num_cols
row_ids = [glp_get_row_name(glp_prob, i) for i in range(1, glp_get_num_rows(glp_prob) + 1)]
col_ids = [glp_get_col_name(glp_prob, i) for i in range(1, glp_get_num_cols(glp_prob) + 1)]
with tempfile.NamedTemporaryFile(suffix=".lp", delete=True) as tmp_file:
tmp_file_name = tmp_file.name
glp_write_lp(glp_prob, None, tmp_file_name)
cmd = ['glpsol', '--lp', tmp_file_name, '-w', tmp_file_name + '.sol', '--log', '/dev/null']
term = check_output(cmd)
log.info(term)
try:
with open(tmp_file_name + '.sol') as sol_handle:
# print sol_handle.read()
solution = dict()
for i, line in enumerate(sol_handle.readlines()):
if i <= 1 or line == '\n':
pass
elif i <= len(row_ids):
solution[row_ids[i - 2]] = line.strip().split(' ')
elif i <= len(row_ids) + len(col_ids) + 1:
solution[col_ids[i - 2 - len(row_ids)]] = line.strip().split(' ')
else:
print(i)
print(line)
raise Exception("Argggh!")
finally:
os.remove(tmp_file_name + ".sol")
return solution | Solve glpk problem with glpsol commandline solver. Mainly for testing purposes.
# Examples
# --------
# >>> problem = glp_create_prob()
# ... glp_read_lp(problem, None, "../tests/data/model.lp")
# ... solution = solve_with_glpsol(problem)
# ... print 'asdf'
# 'asdf'
# >>> print solution
# 0.839784
# Returns
# -------
# dict
# A dictionary containing the objective value (key ='objval')
# and variable primals. | Below is the the instruction that describes the task:
### Input:
Solve glpk problem with glpsol commandline solver. Mainly for testing purposes.
# Examples
# --------
# >>> problem = glp_create_prob()
# ... glp_read_lp(problem, None, "../tests/data/model.lp")
# ... solution = solve_with_glpsol(problem)
# ... print 'asdf'
# 'asdf'
# >>> print solution
# 0.839784
# Returns
# -------
# dict
# A dictionary containing the objective value (key ='objval')
# and variable primals.
### Response:
def solve_with_glpsol(glp_prob):
"""Solve glpk problem with glpsol commandline solver. Mainly for testing purposes.
# Examples
# --------
# >>> problem = glp_create_prob()
# ... glp_read_lp(problem, None, "../tests/data/model.lp")
# ... solution = solve_with_glpsol(problem)
# ... print 'asdf'
# 'asdf'
# >>> print solution
# 0.839784
# Returns
# -------
# dict
# A dictionary containing the objective value (key ='objval')
# and variable primals.
"""
from swiglpk import glp_get_row_name, glp_get_col_name, glp_write_lp, glp_get_num_rows, glp_get_num_cols
row_ids = [glp_get_row_name(glp_prob, i) for i in range(1, glp_get_num_rows(glp_prob) + 1)]
col_ids = [glp_get_col_name(glp_prob, i) for i in range(1, glp_get_num_cols(glp_prob) + 1)]
with tempfile.NamedTemporaryFile(suffix=".lp", delete=True) as tmp_file:
tmp_file_name = tmp_file.name
glp_write_lp(glp_prob, None, tmp_file_name)
cmd = ['glpsol', '--lp', tmp_file_name, '-w', tmp_file_name + '.sol', '--log', '/dev/null']
term = check_output(cmd)
log.info(term)
try:
with open(tmp_file_name + '.sol') as sol_handle:
# print sol_handle.read()
solution = dict()
for i, line in enumerate(sol_handle.readlines()):
if i <= 1 or line == '\n':
pass
elif i <= len(row_ids):
solution[row_ids[i - 2]] = line.strip().split(' ')
elif i <= len(row_ids) + len(col_ids) + 1:
solution[col_ids[i - 2 - len(row_ids)]] = line.strip().split(' ')
else:
print(i)
print(line)
raise Exception("Argggh!")
finally:
os.remove(tmp_file_name + ".sol")
return solution |
def parent(groups,ID):
"""given a groups dictionary and an ID, return its actual parent ID."""
if ID in groups.keys():
return ID # already a parent
if not ID in groups.keys():
for actualParent in groups.keys():
if ID in groups[actualParent]:
return actualParent # found the actual parent
return None | given a groups dictionary and an ID, return its actual parent ID. | Below is the the instruction that describes the task:
### Input:
given a groups dictionary and an ID, return its actual parent ID.
### Response:
def parent(groups,ID):
"""given a groups dictionary and an ID, return its actual parent ID."""
if ID in groups.keys():
return ID # already a parent
if not ID in groups.keys():
for actualParent in groups.keys():
if ID in groups[actualParent]:
return actualParent # found the actual parent
return None |
def coords_from_query(query):
"""Transform a query line into a (lng, lat) pair of coordinates."""
try:
coords = json.loads(query)
except ValueError:
vals = re.split(r'[,\s]+', query.strip())
coords = [float(v) for v in vals]
return tuple(coords[:2]) | Transform a query line into a (lng, lat) pair of coordinates. | Below is the the instruction that describes the task:
### Input:
Transform a query line into a (lng, lat) pair of coordinates.
### Response:
def coords_from_query(query):
"""Transform a query line into a (lng, lat) pair of coordinates."""
try:
coords = json.loads(query)
except ValueError:
vals = re.split(r'[,\s]+', query.strip())
coords = [float(v) for v in vals]
return tuple(coords[:2]) |
def revnet_step(name, x, hparams, reverse=True):
"""One step of glow generative flow.
Actnorm + invertible 1X1 conv + affine_coupling.
Args:
name: used for variable scope.
x: input
hparams: coupling_width is the only hparam that is being used in
this function.
reverse: forward or reverse pass.
Returns:
z: Output of one step of reversible flow.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if hparams.coupling == "additive":
coupling_layer = functools.partial(
additive_coupling, name="additive", reverse=reverse,
mid_channels=hparams.coupling_width,
activation=hparams.activation, dropout=hparams.coupling_dropout)
else:
coupling_layer = functools.partial(
affine_coupling, name="affine", reverse=reverse,
mid_channels=hparams.coupling_width,
activation=hparams.activation, dropout=hparams.coupling_dropout)
ops = [
functools.partial(actnorm, name="actnorm", reverse=reverse),
functools.partial(invertible_1x1_conv, name="invertible",
reverse=reverse), coupling_layer]
if reverse:
ops = ops[::-1]
objective = 0.0
for op in ops:
x, curr_obj = op(x=x)
objective += curr_obj
return x, objective | One step of glow generative flow.
Actnorm + invertible 1X1 conv + affine_coupling.
Args:
name: used for variable scope.
x: input
hparams: coupling_width is the only hparam that is being used in
this function.
reverse: forward or reverse pass.
Returns:
z: Output of one step of reversible flow. | Below is the the instruction that describes the task:
### Input:
One step of glow generative flow.
Actnorm + invertible 1X1 conv + affine_coupling.
Args:
name: used for variable scope.
x: input
hparams: coupling_width is the only hparam that is being used in
this function.
reverse: forward or reverse pass.
Returns:
z: Output of one step of reversible flow.
### Response:
def revnet_step(name, x, hparams, reverse=True):
"""One step of glow generative flow.
Actnorm + invertible 1X1 conv + affine_coupling.
Args:
name: used for variable scope.
x: input
hparams: coupling_width is the only hparam that is being used in
this function.
reverse: forward or reverse pass.
Returns:
z: Output of one step of reversible flow.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if hparams.coupling == "additive":
coupling_layer = functools.partial(
additive_coupling, name="additive", reverse=reverse,
mid_channels=hparams.coupling_width,
activation=hparams.activation, dropout=hparams.coupling_dropout)
else:
coupling_layer = functools.partial(
affine_coupling, name="affine", reverse=reverse,
mid_channels=hparams.coupling_width,
activation=hparams.activation, dropout=hparams.coupling_dropout)
ops = [
functools.partial(actnorm, name="actnorm", reverse=reverse),
functools.partial(invertible_1x1_conv, name="invertible",
reverse=reverse), coupling_layer]
if reverse:
ops = ops[::-1]
objective = 0.0
for op in ops:
x, curr_obj = op(x=x)
objective += curr_obj
return x, objective |
def reduce_operators(source):
"""
Remove spaces between operators in *source* and returns the result.
Example::
def foo(foo, bar, blah):
test = "This is a %s" % foo
Will become::
def foo(foo,bar,blah):
test="This is a %s"%foo
.. note::
Also removes trailing commas and joins disjointed strings like
``("foo" "bar")``.
"""
io_obj = io.StringIO(source)
prev_tok = None
out_tokens = []
out = ""
last_lineno = -1
last_col = 0
nl_types = (tokenize.NL, tokenize.NEWLINE)
joining_strings = False
new_string = ""
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
if start_line > last_lineno:
last_col = 0
if token_type != tokenize.OP:
if start_col > last_col and token_type not in nl_types:
if prev_tok[0] != tokenize.OP:
out += (" " * (start_col - last_col))
if token_type == tokenize.STRING:
if prev_tok[0] == tokenize.STRING:
# Join the strings into one
string_type = token_string[0] # '' or ""
prev_string_type = prev_tok[1][0]
out = out.rstrip(" ") # Remove any spaces we inserted prev
if not joining_strings:
# Remove prev token and start the new combined string
out = out[:(len(out)-len(prev_tok[1]))]
prev_string = prev_tok[1].strip(prev_string_type)
new_string = (
prev_string + token_string.strip(string_type))
joining_strings = True
else:
new_string += token_string.strip(string_type)
else:
if token_string in ('}', ')', ']'):
if prev_tok[1] == ',':
out = out.rstrip(',')
if joining_strings:
# NOTE: Using triple quotes so that this logic works with
# mixed strings using both single quotes and double quotes.
out += "'''" + new_string + "'''"
joining_strings = False
if token_string == '@': # Decorators need special handling
if prev_tok[0] == tokenize.NEWLINE:
# Ensure it gets indented properly
out += (" " * (start_col - last_col))
if not joining_strings:
out += token_string
last_col = end_col
last_lineno = end_line
prev_tok = tok
return out | Remove spaces between operators in *source* and returns the result.
Example::
def foo(foo, bar, blah):
test = "This is a %s" % foo
Will become::
def foo(foo,bar,blah):
test="This is a %s"%foo
.. note::
Also removes trailing commas and joins disjointed strings like
``("foo" "bar")``. | Below is the the instruction that describes the task:
### Input:
Remove spaces between operators in *source* and returns the result.
Example::
def foo(foo, bar, blah):
test = "This is a %s" % foo
Will become::
def foo(foo,bar,blah):
test="This is a %s"%foo
.. note::
Also removes trailing commas and joins disjointed strings like
``("foo" "bar")``.
### Response:
def reduce_operators(source):
"""
Remove spaces between operators in *source* and returns the result.
Example::
def foo(foo, bar, blah):
test = "This is a %s" % foo
Will become::
def foo(foo,bar,blah):
test="This is a %s"%foo
.. note::
Also removes trailing commas and joins disjointed strings like
``("foo" "bar")``.
"""
io_obj = io.StringIO(source)
prev_tok = None
out_tokens = []
out = ""
last_lineno = -1
last_col = 0
nl_types = (tokenize.NL, tokenize.NEWLINE)
joining_strings = False
new_string = ""
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
if start_line > last_lineno:
last_col = 0
if token_type != tokenize.OP:
if start_col > last_col and token_type not in nl_types:
if prev_tok[0] != tokenize.OP:
out += (" " * (start_col - last_col))
if token_type == tokenize.STRING:
if prev_tok[0] == tokenize.STRING:
# Join the strings into one
string_type = token_string[0] # '' or ""
prev_string_type = prev_tok[1][0]
out = out.rstrip(" ") # Remove any spaces we inserted prev
if not joining_strings:
# Remove prev token and start the new combined string
out = out[:(len(out)-len(prev_tok[1]))]
prev_string = prev_tok[1].strip(prev_string_type)
new_string = (
prev_string + token_string.strip(string_type))
joining_strings = True
else:
new_string += token_string.strip(string_type)
else:
if token_string in ('}', ')', ']'):
if prev_tok[1] == ',':
out = out.rstrip(',')
if joining_strings:
# NOTE: Using triple quotes so that this logic works with
# mixed strings using both single quotes and double quotes.
out += "'''" + new_string + "'''"
joining_strings = False
if token_string == '@': # Decorators need special handling
if prev_tok[0] == tokenize.NEWLINE:
# Ensure it gets indented properly
out += (" " * (start_col - last_col))
if not joining_strings:
out += token_string
last_col = end_col
last_lineno = end_line
prev_tok = tok
return out |
def output_channels(self):
"""Returns the number of output channels."""
if callable(self._output_channels):
self._output_channels = self._output_channels()
# Channel must be integer.
self._output_channels = int(self._output_channels)
return self._output_channels | Returns the number of output channels. | Below is the the instruction that describes the task:
### Input:
Returns the number of output channels.
### Response:
def output_channels(self):
"""Returns the number of output channels."""
if callable(self._output_channels):
self._output_channels = self._output_channels()
# Channel must be integer.
self._output_channels = int(self._output_channels)
return self._output_channels |
def slice(self, tf_tensor, tensor_shape):
""""Slice out the corresponding part of tensor given the pnum variable."""
tensor_layout = self.tensor_layout(tensor_shape)
if tensor_layout.is_fully_replicated:
return self.LaidOutTensor([tf_tensor])
else:
slice_shape = self.slice_shape(tensor_shape)
slice_begins = [
self.slice_begin(tensor_shape, pnum) for pnum in xrange(self.size)
]
slice_begins_tensor = tf.stack(slice_begins)
# slice on source device
selected_slice_begin = tf.gather(slice_begins_tensor, self.pnum_tensor)
return self.LaidOutTensor(
[tf.slice(tf_tensor, selected_slice_begin, slice_shape)]) | Slice out the corresponding part of tensor given the pnum variable. | Below is the the instruction that describes the task:
### Input:
Slice out the corresponding part of tensor given the pnum variable.
### Response:
def slice(self, tf_tensor, tensor_shape):
""""Slice out the corresponding part of tensor given the pnum variable."""
tensor_layout = self.tensor_layout(tensor_shape)
if tensor_layout.is_fully_replicated:
return self.LaidOutTensor([tf_tensor])
else:
slice_shape = self.slice_shape(tensor_shape)
slice_begins = [
self.slice_begin(tensor_shape, pnum) for pnum in xrange(self.size)
]
slice_begins_tensor = tf.stack(slice_begins)
# slice on source device
selected_slice_begin = tf.gather(slice_begins_tensor, self.pnum_tensor)
return self.LaidOutTensor(
[tf.slice(tf_tensor, selected_slice_begin, slice_shape)]) |
def remove_plugin_filepaths(self, filepaths):
"""
Removes `filepaths` from `self.plugin_filepaths`.
Recommend passing in absolute filepaths. Method will
attempt to convert to absolute paths if not passed in.
`filepaths` can be a single object or an iterable.
"""
filepaths = util.to_absolute_paths(filepaths)
self.plugin_filepaths = util.remove_from_set(self.plugin_filepaths,
filepaths) | Removes `filepaths` from `self.plugin_filepaths`.
Recommend passing in absolute filepaths. Method will
attempt to convert to absolute paths if not passed in.
`filepaths` can be a single object or an iterable. | Below is the the instruction that describes the task:
### Input:
Removes `filepaths` from `self.plugin_filepaths`.
Recommend passing in absolute filepaths. Method will
attempt to convert to absolute paths if not passed in.
`filepaths` can be a single object or an iterable.
### Response:
def remove_plugin_filepaths(self, filepaths):
"""
Removes `filepaths` from `self.plugin_filepaths`.
Recommend passing in absolute filepaths. Method will
attempt to convert to absolute paths if not passed in.
`filepaths` can be a single object or an iterable.
"""
filepaths = util.to_absolute_paths(filepaths)
self.plugin_filepaths = util.remove_from_set(self.plugin_filepaths,
filepaths) |
def publish_workflow_status(self, workflow_uuid, status,
logs='', message=None):
"""Publish workflow status using the configured.
:param workflow_uudid: String which represents the workflow UUID.
:param status: Integer which represents the status of the workflow,
this is defined in the `reana-db` `Workflow` models.
:param logs: String which represents the logs which the workflow
has produced as output.
:param message: Dictionary which includes additional information
can be attached such as the overall progress of the workflow.
"""
msg = {
"workflow_uuid": workflow_uuid,
"logs": logs,
"status": status,
"message": message
}
self._publish(msg) | Publish workflow status using the configured.
:param workflow_uudid: String which represents the workflow UUID.
:param status: Integer which represents the status of the workflow,
this is defined in the `reana-db` `Workflow` models.
:param logs: String which represents the logs which the workflow
has produced as output.
:param message: Dictionary which includes additional information
can be attached such as the overall progress of the workflow. | Below is the the instruction that describes the task:
### Input:
Publish workflow status using the configured.
:param workflow_uudid: String which represents the workflow UUID.
:param status: Integer which represents the status of the workflow,
this is defined in the `reana-db` `Workflow` models.
:param logs: String which represents the logs which the workflow
has produced as output.
:param message: Dictionary which includes additional information
can be attached such as the overall progress of the workflow.
### Response:
def publish_workflow_status(self, workflow_uuid, status,
logs='', message=None):
"""Publish workflow status using the configured.
:param workflow_uudid: String which represents the workflow UUID.
:param status: Integer which represents the status of the workflow,
this is defined in the `reana-db` `Workflow` models.
:param logs: String which represents the logs which the workflow
has produced as output.
:param message: Dictionary which includes additional information
can be attached such as the overall progress of the workflow.
"""
msg = {
"workflow_uuid": workflow_uuid,
"logs": logs,
"status": status,
"message": message
}
self._publish(msg) |
def azm(self):
"""Corrected azimuth, taking into account backsight, declination, and compass corrections."""
azm1 = self.get('BEARING', None)
azm2 = self.get('AZM2', None)
if azm1 is None and azm2 is None:
return None
if azm2 is None:
return azm1 + self.declination
if azm1 is None:
return (azm2 + 180) % 360 + self.declination
return (azm1 + (azm2 + 180) % 360) / 2.0 + self.declination | Corrected azimuth, taking into account backsight, declination, and compass corrections. | Below is the the instruction that describes the task:
### Input:
Corrected azimuth, taking into account backsight, declination, and compass corrections.
### Response:
def azm(self):
"""Corrected azimuth, taking into account backsight, declination, and compass corrections."""
azm1 = self.get('BEARING', None)
azm2 = self.get('AZM2', None)
if azm1 is None and azm2 is None:
return None
if azm2 is None:
return azm1 + self.declination
if azm1 is None:
return (azm2 + 180) % 360 + self.declination
return (azm1 + (azm2 + 180) % 360) / 2.0 + self.declination |
def render_tile(cells, ti, tj, render, params, metadata, layout, summary):
"""
Render each cell in the tile and stitch it into a single image
"""
image_size = params["cell_size"] * params["n_tile"]
tile = Image.new("RGB", (image_size, image_size), (255,255,255))
keys = cells.keys()
for i,key in enumerate(keys):
print("cell", i+1, "/", len(keys), end='\r')
cell_image = render(cells[key], params, metadata, layout, summary)
# stitch this rendering into the tile image
ci = key[0] % params["n_tile"]
cj = key[1] % params["n_tile"]
xmin = ci*params["cell_size"]
ymin = cj*params["cell_size"]
xmax = (ci+1)*params["cell_size"]
ymax = (cj+1)*params["cell_size"]
if params.get("scale_density", False):
density = len(cells[key]["gi"])
# scale = density/summary["max_density"]
scale = math.log(density)/(math.log(summary["max_density"]) or 1)
owidth = xmax - xmin
width = int(round(owidth * scale))
if(width < 1):
width = 1
offsetL = int(round((owidth - width)/2))
offsetR = owidth - width - offsetL # handle odd numbers
# print("\n")
# print("width", width, offsetL, offsetR)
box = [xmin + offsetL, ymin + offsetL, xmax - offsetR, ymax - offsetR]
resample = params.get("scale_type", Image.NEAREST)
cell_image = cell_image.resize(size=(width,width), resample=resample)
# print(cell_image)
else:
box = [xmin, ymin, xmax, ymax]
# print("box", box)
tile.paste(cell_image, box)
print("\n")
return tile | Render each cell in the tile and stitch it into a single image | Below is the the instruction that describes the task:
### Input:
Render each cell in the tile and stitch it into a single image
### Response:
def render_tile(cells, ti, tj, render, params, metadata, layout, summary):
"""
Render each cell in the tile and stitch it into a single image
"""
image_size = params["cell_size"] * params["n_tile"]
tile = Image.new("RGB", (image_size, image_size), (255,255,255))
keys = cells.keys()
for i,key in enumerate(keys):
print("cell", i+1, "/", len(keys), end='\r')
cell_image = render(cells[key], params, metadata, layout, summary)
# stitch this rendering into the tile image
ci = key[0] % params["n_tile"]
cj = key[1] % params["n_tile"]
xmin = ci*params["cell_size"]
ymin = cj*params["cell_size"]
xmax = (ci+1)*params["cell_size"]
ymax = (cj+1)*params["cell_size"]
if params.get("scale_density", False):
density = len(cells[key]["gi"])
# scale = density/summary["max_density"]
scale = math.log(density)/(math.log(summary["max_density"]) or 1)
owidth = xmax - xmin
width = int(round(owidth * scale))
if(width < 1):
width = 1
offsetL = int(round((owidth - width)/2))
offsetR = owidth - width - offsetL # handle odd numbers
# print("\n")
# print("width", width, offsetL, offsetR)
box = [xmin + offsetL, ymin + offsetL, xmax - offsetR, ymax - offsetR]
resample = params.get("scale_type", Image.NEAREST)
cell_image = cell_image.resize(size=(width,width), resample=resample)
# print(cell_image)
else:
box = [xmin, ymin, xmax, ymax]
# print("box", box)
tile.paste(cell_image, box)
print("\n")
return tile |
def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
target=None, rel=None):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
to that number. Also a third argument exists that makes the urls
"nofollow":
.. sourcecode:: jinja
{{ mytext|urlize(40, true) }}
links are shortened to 40 chars and defined with rel="nofollow"
If *target* is specified, the ``target`` attribute will be added to the
``<a>`` tag:
.. sourcecode:: jinja
{{ mytext|urlize(40, target='_blank') }}
.. versionchanged:: 2.8+
The *target* parameter was added.
"""
policies = eval_ctx.environment.policies
rel = set((rel or '').split() or [])
if nofollow:
rel.add('nofollow')
rel.update((policies['urlize.rel'] or '').split())
if target is None:
target = policies['urlize.target']
rel = ' '.join(sorted(rel)) or None
rv = urlize(value, trim_url_limit, rel=rel, target=target)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv | Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
to that number. Also a third argument exists that makes the urls
"nofollow":
.. sourcecode:: jinja
{{ mytext|urlize(40, true) }}
links are shortened to 40 chars and defined with rel="nofollow"
If *target* is specified, the ``target`` attribute will be added to the
``<a>`` tag:
.. sourcecode:: jinja
{{ mytext|urlize(40, target='_blank') }}
.. versionchanged:: 2.8+
The *target* parameter was added. | Below is the the instruction that describes the task:
### Input:
Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
to that number. Also a third argument exists that makes the urls
"nofollow":
.. sourcecode:: jinja
{{ mytext|urlize(40, true) }}
links are shortened to 40 chars and defined with rel="nofollow"
If *target* is specified, the ``target`` attribute will be added to the
``<a>`` tag:
.. sourcecode:: jinja
{{ mytext|urlize(40, target='_blank') }}
.. versionchanged:: 2.8+
The *target* parameter was added.
### Response:
def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
target=None, rel=None):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
to that number. Also a third argument exists that makes the urls
"nofollow":
.. sourcecode:: jinja
{{ mytext|urlize(40, true) }}
links are shortened to 40 chars and defined with rel="nofollow"
If *target* is specified, the ``target`` attribute will be added to the
``<a>`` tag:
.. sourcecode:: jinja
{{ mytext|urlize(40, target='_blank') }}
.. versionchanged:: 2.8+
The *target* parameter was added.
"""
policies = eval_ctx.environment.policies
rel = set((rel or '').split() or [])
if nofollow:
rel.add('nofollow')
rel.update((policies['urlize.rel'] or '').split())
if target is None:
target = policies['urlize.target']
rel = ' '.join(sorted(rel)) or None
rv = urlize(value, trim_url_limit, rel=rel, target=target)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv |
def sext(self, width):
"""Sign-extends a word to a larger width. It is an error to specify
a smaller width (use ``extract`` instead to crop off the extra bits).
"""
width = operator.index(width)
if width < self._width:
raise ValueError('sign extending to a smaller width')
return BinWord(width, self.to_sint(), trunc=True) | Sign-extends a word to a larger width. It is an error to specify
a smaller width (use ``extract`` instead to crop off the extra bits). | Below is the the instruction that describes the task:
### Input:
Sign-extends a word to a larger width. It is an error to specify
a smaller width (use ``extract`` instead to crop off the extra bits).
### Response:
def sext(self, width):
"""Sign-extends a word to a larger width. It is an error to specify
a smaller width (use ``extract`` instead to crop off the extra bits).
"""
width = operator.index(width)
if width < self._width:
raise ValueError('sign extending to a smaller width')
return BinWord(width, self.to_sint(), trunc=True) |
def _compress(self):
"""Internal method to compress the cache. This method will
expire any old items in the cache, making the cache smaller"""
# Don't compress too often
now = time.time()
if self._last_compression + self._compression_timer < now:
self._last_compression = now
for key in list(self._store.keys()):
self.get(key) | Internal method to compress the cache. This method will
expire any old items in the cache, making the cache smaller | Below is the the instruction that describes the task:
### Input:
Internal method to compress the cache. This method will
expire any old items in the cache, making the cache smaller
### Response:
def _compress(self):
"""Internal method to compress the cache. This method will
expire any old items in the cache, making the cache smaller"""
# Don't compress too often
now = time.time()
if self._last_compression + self._compression_timer < now:
self._last_compression = now
for key in list(self._store.keys()):
self.get(key) |
def human_sorting(the_list):
"""Sort the given list in the way that humans expect.
From http://stackoverflow.com/a/4623518
:param the_list: The list to sort.
:type the_list: list
:return: The new sorted list.
:rtype: list
"""
def try_int(s):
try:
return int(s)
except ValueError:
return s
def alphanum_key(s):
"""Turn a string into a list of string and number chunks.
For instance : "z23a" -> ["z", 23, "a"]
"""
return [try_int(c) for c in re.split('([0-9]+)', s)]
the_list.sort(key=alphanum_key)
return the_list | Sort the given list in the way that humans expect.
From http://stackoverflow.com/a/4623518
:param the_list: The list to sort.
:type the_list: list
:return: The new sorted list.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Sort the given list in the way that humans expect.
From http://stackoverflow.com/a/4623518
:param the_list: The list to sort.
:type the_list: list
:return: The new sorted list.
:rtype: list
### Response:
def human_sorting(the_list):
"""Sort the given list in the way that humans expect.
From http://stackoverflow.com/a/4623518
:param the_list: The list to sort.
:type the_list: list
:return: The new sorted list.
:rtype: list
"""
def try_int(s):
try:
return int(s)
except ValueError:
return s
def alphanum_key(s):
"""Turn a string into a list of string and number chunks.
For instance : "z23a" -> ["z", 23, "a"]
"""
return [try_int(c) for c in re.split('([0-9]+)', s)]
the_list.sort(key=alphanum_key)
return the_list |
def getRnaQuantificationSetByName(self, name):
"""
Returns the RnaQuantification set with the specified name, or raises
an exception otherwise.
"""
if name not in self._rnaQuantificationSetNameMap:
raise exceptions.RnaQuantificationSetNameNotFoundException(name)
return self._rnaQuantificationSetNameMap[name] | Returns the RnaQuantification set with the specified name, or raises
an exception otherwise. | Below is the the instruction that describes the task:
### Input:
Returns the RnaQuantification set with the specified name, or raises
an exception otherwise.
### Response:
def getRnaQuantificationSetByName(self, name):
"""
Returns the RnaQuantification set with the specified name, or raises
an exception otherwise.
"""
if name not in self._rnaQuantificationSetNameMap:
raise exceptions.RnaQuantificationSetNameNotFoundException(name)
return self._rnaQuantificationSetNameMap[name] |
def apply_func_to_select_indices(self, axis, func, indices, keep_remaining=False):
"""Applies a function to select indices.
Note: Your internal function must take a kwarg `internal_indices` for
this to work correctly. This prevents information leakage of the
internal index to the external representation.
Args:
axis: The axis to apply the func over.
func: The function to apply to these indices.
indices: The indices to apply the function to.
keep_remaining: Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns:
A new BaseFrameManager object, the type of object that called this.
"""
if self.partitions.size == 0:
return np.array([[]])
# Handling dictionaries has to be done differently, but we still want
# to figure out the partitions that need to be applied to, so we will
# store the dictionary in a separate variable and assign `indices` to
# the keys to handle it the same as we normally would.
if isinstance(indices, dict):
dict_indices = indices
indices = list(indices.keys())
else:
dict_indices = None
if not isinstance(indices, list):
indices = [indices]
partitions_dict = self._get_dict_of_block_index(
axis, indices, ordered=not keep_remaining
)
if not axis:
partitions_for_apply = self.partitions.T
else:
partitions_for_apply = self.partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_indices is not None:
def local_to_global_idx(partition_id, local_idx):
if partition_id == 0:
return local_idx
if axis == 0:
cumulative_axis = np.cumsum(self.block_widths)
else:
cumulative_axis = np.cumsum(self.block_lengths)
return cumulative_axis[partition_id - 1] + local_idx
if not keep_remaining:
result = np.array(
[
self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[o_idx],
func_dict={
i_idx: dict_indices[local_to_global_idx(o_idx, i_idx)]
for i_idx in list_to_apply
if i_idx >= 0
},
)
for o_idx, list_to_apply in partitions_dict
]
)
else:
result = np.array(
[
partitions_for_apply[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
func_dict={
idx: dict_indices[local_to_global_idx(i, idx)]
for idx in partitions_dict[i]
if idx >= 0
},
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# We are passing internal indices in here. In order for func to
# actually be able to use this information, it must be able to take in
# the internal indices. This might mean an iloc in the case of Pandas
# or some other way to index into the internal representation.
result = np.array(
[
self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[idx],
internal_indices=list_to_apply,
)
for idx, list_to_apply in partitions_dict
]
)
else:
# The difference here is that we modify a subset and return the
# remaining (non-updated) blocks in their original position.
result = np.array(
[
partitions_for_apply[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
internal_indices=partitions_dict[i],
)
for i in range(len(partitions_for_apply))
]
)
return (
self.__constructor__(result.T) if not axis else self.__constructor__(result)
) | Applies a function to select indices.
Note: Your internal function must take a kwarg `internal_indices` for
this to work correctly. This prevents information leakage of the
internal index to the external representation.
Args:
axis: The axis to apply the func over.
func: The function to apply to these indices.
indices: The indices to apply the function to.
keep_remaining: Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns:
A new BaseFrameManager object, the type of object that called this. | Below is the the instruction that describes the task:
### Input:
Applies a function to select indices.
Note: Your internal function must take a kwarg `internal_indices` for
this to work correctly. This prevents information leakage of the
internal index to the external representation.
Args:
axis: The axis to apply the func over.
func: The function to apply to these indices.
indices: The indices to apply the function to.
keep_remaining: Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns:
A new BaseFrameManager object, the type of object that called this.
### Response:
def apply_func_to_select_indices(self, axis, func, indices, keep_remaining=False):
"""Applies a function to select indices.
Note: Your internal function must take a kwarg `internal_indices` for
this to work correctly. This prevents information leakage of the
internal index to the external representation.
Args:
axis: The axis to apply the func over.
func: The function to apply to these indices.
indices: The indices to apply the function to.
keep_remaining: Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns:
A new BaseFrameManager object, the type of object that called this.
"""
if self.partitions.size == 0:
return np.array([[]])
# Handling dictionaries has to be done differently, but we still want
# to figure out the partitions that need to be applied to, so we will
# store the dictionary in a separate variable and assign `indices` to
# the keys to handle it the same as we normally would.
if isinstance(indices, dict):
dict_indices = indices
indices = list(indices.keys())
else:
dict_indices = None
if not isinstance(indices, list):
indices = [indices]
partitions_dict = self._get_dict_of_block_index(
axis, indices, ordered=not keep_remaining
)
if not axis:
partitions_for_apply = self.partitions.T
else:
partitions_for_apply = self.partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_indices is not None:
def local_to_global_idx(partition_id, local_idx):
if partition_id == 0:
return local_idx
if axis == 0:
cumulative_axis = np.cumsum(self.block_widths)
else:
cumulative_axis = np.cumsum(self.block_lengths)
return cumulative_axis[partition_id - 1] + local_idx
if not keep_remaining:
result = np.array(
[
self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[o_idx],
func_dict={
i_idx: dict_indices[local_to_global_idx(o_idx, i_idx)]
for i_idx in list_to_apply
if i_idx >= 0
},
)
for o_idx, list_to_apply in partitions_dict
]
)
else:
result = np.array(
[
partitions_for_apply[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
func_dict={
idx: dict_indices[local_to_global_idx(i, idx)]
for idx in partitions_dict[i]
if idx >= 0
},
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# We are passing internal indices in here. In order for func to
# actually be able to use this information, it must be able to take in
# the internal indices. This might mean an iloc in the case of Pandas
# or some other way to index into the internal representation.
result = np.array(
[
self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[idx],
internal_indices=list_to_apply,
)
for idx, list_to_apply in partitions_dict
]
)
else:
# The difference here is that we modify a subset and return the
# remaining (non-updated) blocks in their original position.
result = np.array(
[
partitions_for_apply[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
internal_indices=partitions_dict[i],
)
for i in range(len(partitions_for_apply))
]
)
return (
self.__constructor__(result.T) if not axis else self.__constructor__(result)
) |
def trigger_event(self, event, *args):
"""Dispatch an event to the proper handler method.
In the most common usage, this method is not overloaded by subclasses,
as it performs the routing of events to methods. However, this
method can be overriden if special dispatching rules are needed, or if
having a single method that catches all events is desired.
"""
handler_name = 'on_' + event
if hasattr(self, handler_name):
return getattr(self, handler_name)(*args) | Dispatch an event to the proper handler method.
In the most common usage, this method is not overloaded by subclasses,
as it performs the routing of events to methods. However, this
method can be overriden if special dispatching rules are needed, or if
having a single method that catches all events is desired. | Below is the the instruction that describes the task:
### Input:
Dispatch an event to the proper handler method.
In the most common usage, this method is not overloaded by subclasses,
as it performs the routing of events to methods. However, this
method can be overriden if special dispatching rules are needed, or if
having a single method that catches all events is desired.
### Response:
def trigger_event(self, event, *args):
"""Dispatch an event to the proper handler method.
In the most common usage, this method is not overloaded by subclasses,
as it performs the routing of events to methods. However, this
method can be overriden if special dispatching rules are needed, or if
having a single method that catches all events is desired.
"""
handler_name = 'on_' + event
if hasattr(self, handler_name):
return getattr(self, handler_name)(*args) |
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit | Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it | Below is the the instruction that describes the task:
### Input:
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
### Response:
def timeit(stat_tracker_func, name):
"""
Pass in a function and the name of the stat
Will time the function that this is a decorator to and send
the `name` as well as the value (in seconds) to `stat_tracker_func`
`stat_tracker_func` can be used to either print out the data or save it
"""
def _timeit(func):
def wrapper(*args, **kw):
start_time = time.time()
result = func(*args, **kw)
stop_time = time.time()
stat_tracker_func(name, stop_time - start_time)
return result
return wrapper
return _timeit |
def file_exists(
download_url,
filename=None,
decompress=False,
subdir=None):
"""
Return True if a local file corresponding to these arguments
exists.
"""
filename = build_local_filename(download_url, filename, decompress)
full_path = build_path(filename, subdir)
return os.path.exists(full_path) | Return True if a local file corresponding to these arguments
exists. | Below is the the instruction that describes the task:
### Input:
Return True if a local file corresponding to these arguments
exists.
### Response:
def file_exists(
download_url,
filename=None,
decompress=False,
subdir=None):
"""
Return True if a local file corresponding to these arguments
exists.
"""
filename = build_local_filename(download_url, filename, decompress)
full_path = build_path(filename, subdir)
return os.path.exists(full_path) |
def _check_remote_command(self, destination, timeout_ms, success_msgs=None):
"""Open a stream to destination, check for remote errors.
Used for reboot, remount, and root services. If this method returns, the
command was successful, otherwise an appropriate error will have been
raised.
Args:
destination: Stream destination to open.
timeout_ms: Timeout in milliseconds for the operation.
success_msgs: If provided, a list of messages that, if returned from the
device, indicate success, so don't treat them as errors.
Raises:
AdbRemoteError: If the remote command fails, will contain any message we
got back from the device.
AdbStreamUnavailableError: The service requested isn't supported.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
stream = self._adb_connection.open_stream(destination, timeout)
if not stream:
raise usb_exceptions.AdbStreamUnavailableError(
'Service %s not supported', destination)
try:
message = stream.read(timeout_ms=timeout)
# Some commands report success messages, ignore them.
if any([m in message for m in success_msgs]):
return
except usb_exceptions.CommonUsbError:
if destination.startswith('reboot:'):
# We expect this if the device is rebooting.
return
raise
raise usb_exceptions.AdbRemoteError('Device message: %s', message) | Open a stream to destination, check for remote errors.
Used for reboot, remount, and root services. If this method returns, the
command was successful, otherwise an appropriate error will have been
raised.
Args:
destination: Stream destination to open.
timeout_ms: Timeout in milliseconds for the operation.
success_msgs: If provided, a list of messages that, if returned from the
device, indicate success, so don't treat them as errors.
Raises:
AdbRemoteError: If the remote command fails, will contain any message we
got back from the device.
AdbStreamUnavailableError: The service requested isn't supported. | Below is the the instruction that describes the task:
### Input:
Open a stream to destination, check for remote errors.
Used for reboot, remount, and root services. If this method returns, the
command was successful, otherwise an appropriate error will have been
raised.
Args:
destination: Stream destination to open.
timeout_ms: Timeout in milliseconds for the operation.
success_msgs: If provided, a list of messages that, if returned from the
device, indicate success, so don't treat them as errors.
Raises:
AdbRemoteError: If the remote command fails, will contain any message we
got back from the device.
AdbStreamUnavailableError: The service requested isn't supported.
### Response:
def _check_remote_command(self, destination, timeout_ms, success_msgs=None):
"""Open a stream to destination, check for remote errors.
Used for reboot, remount, and root services. If this method returns, the
command was successful, otherwise an appropriate error will have been
raised.
Args:
destination: Stream destination to open.
timeout_ms: Timeout in milliseconds for the operation.
success_msgs: If provided, a list of messages that, if returned from the
device, indicate success, so don't treat them as errors.
Raises:
AdbRemoteError: If the remote command fails, will contain any message we
got back from the device.
AdbStreamUnavailableError: The service requested isn't supported.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
stream = self._adb_connection.open_stream(destination, timeout)
if not stream:
raise usb_exceptions.AdbStreamUnavailableError(
'Service %s not supported', destination)
try:
message = stream.read(timeout_ms=timeout)
# Some commands report success messages, ignore them.
if any([m in message for m in success_msgs]):
return
except usb_exceptions.CommonUsbError:
if destination.startswith('reboot:'):
# We expect this if the device is rebooting.
return
raise
raise usb_exceptions.AdbRemoteError('Device message: %s', message) |
def find_in_tree(tree, key, perfect=False):
"""
Helper to perform find in dictionary tree.
"""
if len(key) == 0:
if tree['item'] is not None:
return tree['item'], ()
else:
for i in range(len(tree['subtrees'])):
if not perfect and tree['subtrees'][i][0] == '*':
item, trace = find_in_tree(tree['subtrees'][i][1],
(), perfect)
return item, (i,) + trace
raise KeyError(key)
else:
head, tail = key[0], key[1:]
for i in range(len(tree['subtrees'])):
if tree['subtrees'][i][0] == head or \
not perfect and tree['subtrees'][i][0] == '*':
try:
item, trace = find_in_tree(tree['subtrees'][i][1],
tail, perfect)
return item, (i,) + trace
except KeyError:
pass
raise KeyError(key) | Helper to perform find in dictionary tree. | Below is the the instruction that describes the task:
### Input:
Helper to perform find in dictionary tree.
### Response:
def find_in_tree(tree, key, perfect=False):
"""
Helper to perform find in dictionary tree.
"""
if len(key) == 0:
if tree['item'] is not None:
return tree['item'], ()
else:
for i in range(len(tree['subtrees'])):
if not perfect and tree['subtrees'][i][0] == '*':
item, trace = find_in_tree(tree['subtrees'][i][1],
(), perfect)
return item, (i,) + trace
raise KeyError(key)
else:
head, tail = key[0], key[1:]
for i in range(len(tree['subtrees'])):
if tree['subtrees'][i][0] == head or \
not perfect and tree['subtrees'][i][0] == '*':
try:
item, trace = find_in_tree(tree['subtrees'][i][1],
tail, perfect)
return item, (i,) + trace
except KeyError:
pass
raise KeyError(key) |
def Eg(self, **kwargs):
'''
Returns the strain-shifted bandgap, ``Eg``.
'''
return self.unstrained.Eg(**kwargs) + self.Eg_strain_shift(**kwargs) | Returns the strain-shifted bandgap, ``Eg``. | Below is the the instruction that describes the task:
### Input:
Returns the strain-shifted bandgap, ``Eg``.
### Response:
def Eg(self, **kwargs):
'''
Returns the strain-shifted bandgap, ``Eg``.
'''
return self.unstrained.Eg(**kwargs) + self.Eg_strain_shift(**kwargs) |
def _mkdir_p(path):
"""mkdir -p path"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
else:
logger.info("New: %s%s", path, os.path.sep) | mkdir -p path | Below is the the instruction that describes the task:
### Input:
mkdir -p path
### Response:
def _mkdir_p(path):
"""mkdir -p path"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
else:
logger.info("New: %s%s", path, os.path.sep) |
def uma_rp_get_claims_gathering_url(self, ticket):
"""UMA RP function to get the claims gathering URL.
Parameters:
* **ticket (str):** ticket to pass to the auth server. for 90% of the cases, this will be obtained from 'need_info' error of get_rpt
Returns:
**string** specifying the claims gathering url
"""
params = {
'oxd_id': self.oxd_id,
'claims_redirect_uri': self.config.get('client',
'claims_redirect_uri'),
'ticket': ticket
}
logger.debug("Sending command `uma_rp_get_claims_gathering_url` with "
"params %s", params)
response = self.msgr.request("uma_rp_get_claims_gathering_url",
**params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']['url'] | UMA RP function to get the claims gathering URL.
Parameters:
* **ticket (str):** ticket to pass to the auth server. for 90% of the cases, this will be obtained from 'need_info' error of get_rpt
Returns:
**string** specifying the claims gathering url | Below is the the instruction that describes the task:
### Input:
UMA RP function to get the claims gathering URL.
Parameters:
* **ticket (str):** ticket to pass to the auth server. for 90% of the cases, this will be obtained from 'need_info' error of get_rpt
Returns:
**string** specifying the claims gathering url
### Response:
def uma_rp_get_claims_gathering_url(self, ticket):
"""UMA RP function to get the claims gathering URL.
Parameters:
* **ticket (str):** ticket to pass to the auth server. for 90% of the cases, this will be obtained from 'need_info' error of get_rpt
Returns:
**string** specifying the claims gathering url
"""
params = {
'oxd_id': self.oxd_id,
'claims_redirect_uri': self.config.get('client',
'claims_redirect_uri'),
'ticket': ticket
}
logger.debug("Sending command `uma_rp_get_claims_gathering_url` with "
"params %s", params)
response = self.msgr.request("uma_rp_get_claims_gathering_url",
**params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']['url'] |
def connect(self, ctx):
"""
establish xbahn connection and store on click context
"""
if hasattr(ctx,"conn") or "host" not in ctx.params:
return
ctx.conn = conn = connect(ctx.params["host"])
lnk = link.Link()
lnk.wire("main", receive=conn, send=conn)
ctx.client = api.Client(link=lnk)
ctx.widget = ClientWidget(ctx.client, "engineer") | establish xbahn connection and store on click context | Below is the the instruction that describes the task:
### Input:
establish xbahn connection and store on click context
### Response:
def connect(self, ctx):
"""
establish xbahn connection and store on click context
"""
if hasattr(ctx,"conn") or "host" not in ctx.params:
return
ctx.conn = conn = connect(ctx.params["host"])
lnk = link.Link()
lnk.wire("main", receive=conn, send=conn)
ctx.client = api.Client(link=lnk)
ctx.widget = ClientWidget(ctx.client, "engineer") |
def derive_temporalnetwork(data, params):
"""
Derives connectivity from the data. A lot of data is inherently built with edges
(e.g. communication between two individuals).
However other networks are derived from the covariance of time series
(e.g. brain networks between two regions).
Covariance based metrics deriving time-resolved networks can be done in multiple ways.
There are other methods apart from covariance based.
Derive a weight vector for each time point and then the corrrelation coefficient
for each time point.
Paramters
----------
data : array
Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below).
params : dict
Parameters for each method (see below).
Necessary paramters
===================
method : str
method: "distance","slidingwindow", "taperedslidingwindow",
"jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time.
**Different methods have method specific paramaters (see below)**
Params for all methods (optional)
=================================
postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize"
and any combination seperated by a + (e,g, "fisher+boxcox").
See postpro_pipeline for more information.
dimord : str
Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way.
analysis_id : str or int
add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html
report : bool
False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes"
report_path : str
String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html
Methods specific parameters
===========================
method == "distance"
~~~~~~~~~~~~~~~~~~~
Distance metric calculates 1/Distance metric weights, and scales between 0 and 1.
W[t,t] is excluded from the scaling and then set to 1.
params['distance']: str
Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info
When method == "slidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "taperedslidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
params['distribution'] : str
Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html
params['distribution_params'] : list
Each parameter, excluding the data "x" (in their scipy function order) to generate pdf.
NOTE
!!!!!!!!!!
The data x should be considered to be centered at 0 and have a length of window size.
(i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5])
Given x params['distribution_params'] contains the remaining parameters.
e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std.
This means that the mean and std have to be provided in distribution_params.
Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5].
This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5.
Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5.
When method == "temporalderivative"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "jackknife"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
No parameters are necessary.
Optional parameters:
params['weight-var'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro.
params['weight-mean'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro.
Returns
-------
G : array
Connectivity estimates (nodes x nodes x time)
READ MORE
---------
About the general weighted pearson approach used for most methods, see:
Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity.
Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057)
SEE ALSO
--------
*postpro_pipeline*, *gen_report*
"""
report = {}
if 'dimord' not in params.keys():
params['dimord'] = 'node,time'
if 'report' not in params.keys():
params['report'] = False
if 'analysis_id' not in params.keys():
params['analysis_id'] = ''
if 'postpro' not in params.keys():
params['postpro'] = 'no'
if params['report'] == 'yes' or params['report'] == True:
if 'analysis_id' not in params.keys():
params['analysis_id'] = ''
if 'report_path' not in params.keys():
params['report_path'] = './report/' + params['analysis_id']
if 'report_filename' not in params.keys():
params['report_filename'] = 'derivation_report.html'
if params['dimord'] == 'node,time':
data = data.transpose()
if isinstance(params['method'], str):
if params['method'] == 'jackknife':
weights, report = _weightfun_jackknife(data.shape[0], report)
relation = 'weight'
elif params['method'] == 'sliding window' or params['method'] == 'slidingwindow':
weights, report = _weightfun_sliding_window(
data.shape[0], params, report)
relation = 'weight'
elif params['method'] == 'tapered sliding window' or params['method'] == 'taperedslidingwindow':
weights, report = _weightfun_tapered_sliding_window(
data.shape[0], params, report)
relation = 'weight'
elif params['method'] == 'distance' or params['method'] == "spatial distance" or params['method'] == "node distance" or params['method'] == "nodedistance" or params['method'] == "spatialdistance":
weights, report = _weightfun_spatial_distance(data, params, report)
relation = 'weight'
elif params['method'] == 'mtd' or params['method'] == 'multiply temporal derivative' or params['method'] == 'multiplytemporalderivative' or params['method'] == 'temporal derivative' or params['method'] == "temporalderivative":
R, report = _temporal_derivative(data, params, report)
relation = 'coupling'
else:
raise ValueError(
'Unrecognoized method. See derive_with_weighted_pearson documentation for predefined methods or enter own weight matrix')
else:
try:
weights = np.array(params['method'])
relation = 'weight'
except:
raise ValueError(
'Unrecognoized method. See documentation for predefined methods')
if weights.shape[0] != weights.shape[1]:
raise ValueError("weight matrix should be square")
if weights.shape[0] != data.shape[0]:
raise ValueError("weight matrix must equal number of time points")
if relation == 'weight':
# Loop over each weight vector and calculate pearson correlation.
# Note, should see if this can be made quicker in future.
R = np.array(
[DescrStatsW(data, weights[i, :]).corrcoef for i in range(0, weights.shape[0])])
# Make node,node,time
R = R.transpose([1, 2, 0])
# Correct jackknife direction
if params['method'] == 'jackknife':
# Correct inversion
R = R * -1
jc_z = 0
if 'weight-var' in params.keys():
R = np.transpose(R, [2, 0, 1])
R = (R - R.mean(axis=0)) / R.std(axis=0)
jc_z = 1
R = R * params['weight-var']
R = R.transpose([1, 2, 0])
if 'weight-mean' in params.keys():
R = np.transpose(R, [2, 0, 1])
if jc_z == 0:
R = (R - R.mean(axis=0)) / R.std(axis=0)
R = R + params['weight-mean']
R = np.transpose(R, [1, 2, 0])
R = set_diagonal(R, 1)
if params['postpro'] != 'no':
R, report = postpro_pipeline(
R, params['postpro'], report)
R = set_diagonal(R, 1)
if params['report'] == 'yes' or params['report'] == True:
gen_report(report, params['report_path'], params['report_filename'])
return R | Derives connectivity from the data. A lot of data is inherently built with edges
(e.g. communication between two individuals).
However other networks are derived from the covariance of time series
(e.g. brain networks between two regions).
Covariance based metrics deriving time-resolved networks can be done in multiple ways.
There are other methods apart from covariance based.
Derive a weight vector for each time point and then the corrrelation coefficient
for each time point.
Paramters
----------
data : array
Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below).
params : dict
Parameters for each method (see below).
Necessary paramters
===================
method : str
method: "distance","slidingwindow", "taperedslidingwindow",
"jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time.
**Different methods have method specific paramaters (see below)**
Params for all methods (optional)
=================================
postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize"
and any combination seperated by a + (e,g, "fisher+boxcox").
See postpro_pipeline for more information.
dimord : str
Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way.
analysis_id : str or int
add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html
report : bool
False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes"
report_path : str
String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html
Methods specific parameters
===========================
method == "distance"
~~~~~~~~~~~~~~~~~~~
Distance metric calculates 1/Distance metric weights, and scales between 0 and 1.
W[t,t] is excluded from the scaling and then set to 1.
params['distance']: str
Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info
When method == "slidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "taperedslidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
params['distribution'] : str
Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html
params['distribution_params'] : list
Each parameter, excluding the data "x" (in their scipy function order) to generate pdf.
NOTE
!!!!!!!!!!
The data x should be considered to be centered at 0 and have a length of window size.
(i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5])
Given x params['distribution_params'] contains the remaining parameters.
e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std.
This means that the mean and std have to be provided in distribution_params.
Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5].
This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5.
Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5.
When method == "temporalderivative"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "jackknife"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
No parameters are necessary.
Optional parameters:
params['weight-var'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro.
params['weight-mean'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro.
Returns
-------
G : array
Connectivity estimates (nodes x nodes x time)
READ MORE
---------
About the general weighted pearson approach used for most methods, see:
Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity.
Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057)
SEE ALSO
--------
*postpro_pipeline*, *gen_report* | Below is the the instruction that describes the task:
### Input:
Derives connectivity from the data. A lot of data is inherently built with edges
(e.g. communication between two individuals).
However other networks are derived from the covariance of time series
(e.g. brain networks between two regions).
Covariance based metrics deriving time-resolved networks can be done in multiple ways.
There are other methods apart from covariance based.
Derive a weight vector for each time point and then the corrrelation coefficient
for each time point.
Paramters
----------
data : array
Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below).
params : dict
Parameters for each method (see below).
Necessary paramters
===================
method : str
method: "distance","slidingwindow", "taperedslidingwindow",
"jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time.
**Different methods have method specific paramaters (see below)**
Params for all methods (optional)
=================================
postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize"
and any combination seperated by a + (e,g, "fisher+boxcox").
See postpro_pipeline for more information.
dimord : str
Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way.
analysis_id : str or int
add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html
report : bool
False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes"
report_path : str
String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html
Methods specific parameters
===========================
method == "distance"
~~~~~~~~~~~~~~~~~~~
Distance metric calculates 1/Distance metric weights, and scales between 0 and 1.
W[t,t] is excluded from the scaling and then set to 1.
params['distance']: str
Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info
When method == "slidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "taperedslidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
params['distribution'] : str
Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html
params['distribution_params'] : list
Each parameter, excluding the data "x" (in their scipy function order) to generate pdf.
NOTE
!!!!!!!!!!
The data x should be considered to be centered at 0 and have a length of window size.
(i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5])
Given x params['distribution_params'] contains the remaining parameters.
e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std.
This means that the mean and std have to be provided in distribution_params.
Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5].
This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5.
Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5.
When method == "temporalderivative"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "jackknife"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
No parameters are necessary.
Optional parameters:
params['weight-var'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro.
params['weight-mean'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro.
Returns
-------
G : array
Connectivity estimates (nodes x nodes x time)
READ MORE
---------
About the general weighted pearson approach used for most methods, see:
Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity.
Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057)
SEE ALSO
--------
*postpro_pipeline*, *gen_report*
### Response:
def derive_temporalnetwork(data, params):
"""
Derives connectivity from the data. A lot of data is inherently built with edges
(e.g. communication between two individuals).
However other networks are derived from the covariance of time series
(e.g. brain networks between two regions).
Covariance based metrics deriving time-resolved networks can be done in multiple ways.
There are other methods apart from covariance based.
Derive a weight vector for each time point and then the corrrelation coefficient
for each time point.
Paramters
----------
data : array
Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below).
params : dict
Parameters for each method (see below).
Necessary paramters
===================
method : str
method: "distance","slidingwindow", "taperedslidingwindow",
"jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time.
**Different methods have method specific paramaters (see below)**
Params for all methods (optional)
=================================
postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize"
and any combination seperated by a + (e,g, "fisher+boxcox").
See postpro_pipeline for more information.
dimord : str
Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way.
analysis_id : str or int
add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html
report : bool
False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes"
report_path : str
String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html
Methods specific parameters
===========================
method == "distance"
~~~~~~~~~~~~~~~~~~~
Distance metric calculates 1/Distance metric weights, and scales between 0 and 1.
W[t,t] is excluded from the scaling and then set to 1.
params['distance']: str
Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info
When method == "slidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "taperedslidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
params['distribution'] : str
Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html
params['distribution_params'] : list
Each parameter, excluding the data "x" (in their scipy function order) to generate pdf.
NOTE
!!!!!!!!!!
The data x should be considered to be centered at 0 and have a length of window size.
(i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5])
Given x params['distribution_params'] contains the remaining parameters.
e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std.
This means that the mean and std have to be provided in distribution_params.
Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5].
This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5.
Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5.
When method == "temporalderivative"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "jackknife"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
No parameters are necessary.
Optional parameters:
params['weight-var'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro.
params['weight-mean'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro.
Returns
-------
G : array
Connectivity estimates (nodes x nodes x time)
READ MORE
---------
About the general weighted pearson approach used for most methods, see:
Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity.
Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057)
SEE ALSO
--------
*postpro_pipeline*, *gen_report*
"""
report = {}
if 'dimord' not in params.keys():
params['dimord'] = 'node,time'
if 'report' not in params.keys():
params['report'] = False
if 'analysis_id' not in params.keys():
params['analysis_id'] = ''
if 'postpro' not in params.keys():
params['postpro'] = 'no'
if params['report'] == 'yes' or params['report'] == True:
if 'analysis_id' not in params.keys():
params['analysis_id'] = ''
if 'report_path' not in params.keys():
params['report_path'] = './report/' + params['analysis_id']
if 'report_filename' not in params.keys():
params['report_filename'] = 'derivation_report.html'
if params['dimord'] == 'node,time':
data = data.transpose()
if isinstance(params['method'], str):
if params['method'] == 'jackknife':
weights, report = _weightfun_jackknife(data.shape[0], report)
relation = 'weight'
elif params['method'] == 'sliding window' or params['method'] == 'slidingwindow':
weights, report = _weightfun_sliding_window(
data.shape[0], params, report)
relation = 'weight'
elif params['method'] == 'tapered sliding window' or params['method'] == 'taperedslidingwindow':
weights, report = _weightfun_tapered_sliding_window(
data.shape[0], params, report)
relation = 'weight'
elif params['method'] == 'distance' or params['method'] == "spatial distance" or params['method'] == "node distance" or params['method'] == "nodedistance" or params['method'] == "spatialdistance":
weights, report = _weightfun_spatial_distance(data, params, report)
relation = 'weight'
elif params['method'] == 'mtd' or params['method'] == 'multiply temporal derivative' or params['method'] == 'multiplytemporalderivative' or params['method'] == 'temporal derivative' or params['method'] == "temporalderivative":
R, report = _temporal_derivative(data, params, report)
relation = 'coupling'
else:
raise ValueError(
'Unrecognoized method. See derive_with_weighted_pearson documentation for predefined methods or enter own weight matrix')
else:
try:
weights = np.array(params['method'])
relation = 'weight'
except:
raise ValueError(
'Unrecognoized method. See documentation for predefined methods')
if weights.shape[0] != weights.shape[1]:
raise ValueError("weight matrix should be square")
if weights.shape[0] != data.shape[0]:
raise ValueError("weight matrix must equal number of time points")
if relation == 'weight':
# Loop over each weight vector and calculate pearson correlation.
# Note, should see if this can be made quicker in future.
R = np.array(
[DescrStatsW(data, weights[i, :]).corrcoef for i in range(0, weights.shape[0])])
# Make node,node,time
R = R.transpose([1, 2, 0])
# Correct jackknife direction
if params['method'] == 'jackknife':
# Correct inversion
R = R * -1
jc_z = 0
if 'weight-var' in params.keys():
R = np.transpose(R, [2, 0, 1])
R = (R - R.mean(axis=0)) / R.std(axis=0)
jc_z = 1
R = R * params['weight-var']
R = R.transpose([1, 2, 0])
if 'weight-mean' in params.keys():
R = np.transpose(R, [2, 0, 1])
if jc_z == 0:
R = (R - R.mean(axis=0)) / R.std(axis=0)
R = R + params['weight-mean']
R = np.transpose(R, [1, 2, 0])
R = set_diagonal(R, 1)
if params['postpro'] != 'no':
R, report = postpro_pipeline(
R, params['postpro'], report)
R = set_diagonal(R, 1)
if params['report'] == 'yes' or params['report'] == True:
gen_report(report, params['report_path'], params['report_filename'])
return R |
def socket_options(instance):
"""Ensure the keys of the 'options' property of the socket-ext extension of
network-traffic objects are only valid socket options (SO_*).
"""
for key, obj in instance['objects'].items():
if ('type' in obj and obj['type'] == 'network-traffic'):
try:
options = obj['extensions']['socket-ext']['options']
except KeyError:
continue
for opt in options:
if opt not in enums.SOCKET_OPTIONS:
yield JSONError("The 'options' property of object '%s' "
"contains a key ('%s') that is not a valid"
" socket option (SO_*)."
% (key, opt), instance['id'], 'socket-options') | Ensure the keys of the 'options' property of the socket-ext extension of
network-traffic objects are only valid socket options (SO_*). | Below is the the instruction that describes the task:
### Input:
Ensure the keys of the 'options' property of the socket-ext extension of
network-traffic objects are only valid socket options (SO_*).
### Response:
def socket_options(instance):
"""Ensure the keys of the 'options' property of the socket-ext extension of
network-traffic objects are only valid socket options (SO_*).
"""
for key, obj in instance['objects'].items():
if ('type' in obj and obj['type'] == 'network-traffic'):
try:
options = obj['extensions']['socket-ext']['options']
except KeyError:
continue
for opt in options:
if opt not in enums.SOCKET_OPTIONS:
yield JSONError("The 'options' property of object '%s' "
"contains a key ('%s') that is not a valid"
" socket option (SO_*)."
% (key, opt), instance['id'], 'socket-options') |
def cellCenters(self):
"""Get the list of cell centers of the mesh surface.
.. hint:: |delaunay2d| |delaunay2d.py|_
"""
vcen = vtk.vtkCellCenters()
vcen.SetInputData(self.polydata(True))
vcen.Update()
return vtk_to_numpy(vcen.GetOutput().GetPoints().GetData()) | Get the list of cell centers of the mesh surface.
.. hint:: |delaunay2d| |delaunay2d.py|_ | Below is the the instruction that describes the task:
### Input:
Get the list of cell centers of the mesh surface.
.. hint:: |delaunay2d| |delaunay2d.py|_
### Response:
def cellCenters(self):
"""Get the list of cell centers of the mesh surface.
.. hint:: |delaunay2d| |delaunay2d.py|_
"""
vcen = vtk.vtkCellCenters()
vcen.SetInputData(self.polydata(True))
vcen.Update()
return vtk_to_numpy(vcen.GetOutput().GetPoints().GetData()) |
def closeEvent(self, event):
"""Verifies the stimulus before closing, warns user with a
dialog if there are any problems"""
self.ok.setText("Checking...")
QtGui.QApplication.processEvents()
self.model().cleanComponents()
self.model().purgeAutoSelected()
msg = self.model().verify()
if not msg:
msg = self.model().warning()
if msg:
warnBox = QtGui.QMessageBox( QtGui.QMessageBox.Warning, 'Warning - Invalid Settings', '{}. Do you want to change this?'.format(msg) )
yesButton = warnBox.addButton(self.tr('Edit'), QtGui.QMessageBox.YesRole)
noButton = warnBox.addButton(self.tr('Ignore'), QtGui.QMessageBox.NoRole)
warnBox.exec_()
if warnBox.clickedButton() == yesButton:
event.ignore()
self.ok.setText("OK") | Verifies the stimulus before closing, warns user with a
dialog if there are any problems | Below is the the instruction that describes the task:
### Input:
Verifies the stimulus before closing, warns user with a
dialog if there are any problems
### Response:
def closeEvent(self, event):
"""Verifies the stimulus before closing, warns user with a
dialog if there are any problems"""
self.ok.setText("Checking...")
QtGui.QApplication.processEvents()
self.model().cleanComponents()
self.model().purgeAutoSelected()
msg = self.model().verify()
if not msg:
msg = self.model().warning()
if msg:
warnBox = QtGui.QMessageBox( QtGui.QMessageBox.Warning, 'Warning - Invalid Settings', '{}. Do you want to change this?'.format(msg) )
yesButton = warnBox.addButton(self.tr('Edit'), QtGui.QMessageBox.YesRole)
noButton = warnBox.addButton(self.tr('Ignore'), QtGui.QMessageBox.NoRole)
warnBox.exec_()
if warnBox.clickedButton() == yesButton:
event.ignore()
self.ok.setText("OK") |
def memset(self, allocation, value, size):
"""set the memory in allocation to the value in value
:param allocation: An Argument for some memory allocation unit
:type allocation: Argument
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
C.memset(allocation.ctypes, value, size) | set the memory in allocation to the value in value
:param allocation: An Argument for some memory allocation unit
:type allocation: Argument
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int | Below is the the instruction that describes the task:
### Input:
set the memory in allocation to the value in value
:param allocation: An Argument for some memory allocation unit
:type allocation: Argument
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int
### Response:
def memset(self, allocation, value, size):
"""set the memory in allocation to the value in value
:param allocation: An Argument for some memory allocation unit
:type allocation: Argument
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
C.memset(allocation.ctypes, value, size) |
def execute(self, eopatch):
""" Mask values of `feature` according to the `mask_values` in `mask_feature`
:param eopatch: `eopatch` to be processed
:return: Same `eopatch` instance with masked `feature`
"""
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
mask_feature_type, mask_feature_name = next(self.mask_feature(eopatch))
data = np.copy(eopatch[feature_type][feature_name])
mask = eopatch[mask_feature_type][mask_feature_name]
if not isinstance(self.mask_values, list):
raise ValueError('Incorrect format or values of argument `mask_values`')
for value in self.mask_values:
data[mask.squeeze() == value] = self.no_data_value
eopatch.add_feature(feature_type, new_feature_name, data)
return eopatch | Mask values of `feature` according to the `mask_values` in `mask_feature`
:param eopatch: `eopatch` to be processed
:return: Same `eopatch` instance with masked `feature` | Below is the the instruction that describes the task:
### Input:
Mask values of `feature` according to the `mask_values` in `mask_feature`
:param eopatch: `eopatch` to be processed
:return: Same `eopatch` instance with masked `feature`
### Response:
def execute(self, eopatch):
""" Mask values of `feature` according to the `mask_values` in `mask_feature`
:param eopatch: `eopatch` to be processed
:return: Same `eopatch` instance with masked `feature`
"""
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
mask_feature_type, mask_feature_name = next(self.mask_feature(eopatch))
data = np.copy(eopatch[feature_type][feature_name])
mask = eopatch[mask_feature_type][mask_feature_name]
if not isinstance(self.mask_values, list):
raise ValueError('Incorrect format or values of argument `mask_values`')
for value in self.mask_values:
data[mask.squeeze() == value] = self.no_data_value
eopatch.add_feature(feature_type, new_feature_name, data)
return eopatch |
def DataProcessorsDelete(self, dataProcessorId):
"""
Delete a data processor in CommonSense.
@param dataProcessorId - The id of the data processor that will be deleted.
@return (bool) - Boolean indicating whether GroupsPost was successful.
"""
if self.__SenseApiCall__('/dataprocessors/{id}.json'.format(id = dataProcessorId), 'DELETE'):
return True
else:
self.__error__ = "api call unsuccessful"
return False | Delete a data processor in CommonSense.
@param dataProcessorId - The id of the data processor that will be deleted.
@return (bool) - Boolean indicating whether GroupsPost was successful. | Below is the the instruction that describes the task:
### Input:
Delete a data processor in CommonSense.
@param dataProcessorId - The id of the data processor that will be deleted.
@return (bool) - Boolean indicating whether GroupsPost was successful.
### Response:
def DataProcessorsDelete(self, dataProcessorId):
"""
Delete a data processor in CommonSense.
@param dataProcessorId - The id of the data processor that will be deleted.
@return (bool) - Boolean indicating whether GroupsPost was successful.
"""
if self.__SenseApiCall__('/dataprocessors/{id}.json'.format(id = dataProcessorId), 'DELETE'):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
def initiateCompilation(args, file):
"""
Starts the entire compilation procedure
"""
####commands = finalizeCommands(args, file)
commands = makeCommands(0, file)
if not args['concise'] and args['print_args']:
print_commands = bool(args['watch'])
response = multiCall(*commands, print_commands=print_commands)
return response | Starts the entire compilation procedure | Below is the the instruction that describes the task:
### Input:
Starts the entire compilation procedure
### Response:
def initiateCompilation(args, file):
"""
Starts the entire compilation procedure
"""
####commands = finalizeCommands(args, file)
commands = makeCommands(0, file)
if not args['concise'] and args['print_args']:
print_commands = bool(args['watch'])
response = multiCall(*commands, print_commands=print_commands)
return response |
def between(self, left, right, inclusive=True):
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar
Left boundary.
right : scalar
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask | Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar
Left boundary.
right : scalar
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool | Below is the the instruction that describes the task:
### Input:
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar
Left boundary.
right : scalar
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
### Response:
def between(self, left, right, inclusive=True):
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar
Left boundary.
right : scalar
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.