code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def log_error(self, text: str) -> None:
'''
Given some error text it will log the text if self.log_errors is True
:param text: Error text to log
'''
if self.log_errors:
with self._log_fp.open('a+') as log_file:
log_file.write(f'{text}\n') | Given some error text it will log the text if self.log_errors is True
:param text: Error text to log | Below is the the instruction that describes the task:
### Input:
Given some error text it will log the text if self.log_errors is True
:param text: Error text to log
### Response:
def log_error(self, text: str) -> None:
'''
Given some error text it will log the text if self.log_errors is True
:param text: Error text to log
'''
if self.log_errors:
with self._log_fp.open('a+') as log_file:
log_file.write(f'{text}\n') |
def explore(node):
""" Given a node, explores on relatives, siblings and children
:param node: GraphNode from which to explore
:return: set of explored GraphNodes
"""
explored = set()
explored.add(node)
dfs(node, callback=lambda n: explored.add(n))
return explored | Given a node, explores on relatives, siblings and children
:param node: GraphNode from which to explore
:return: set of explored GraphNodes | Below is the the instruction that describes the task:
### Input:
Given a node, explores on relatives, siblings and children
:param node: GraphNode from which to explore
:return: set of explored GraphNodes
### Response:
def explore(node):
""" Given a node, explores on relatives, siblings and children
:param node: GraphNode from which to explore
:return: set of explored GraphNodes
"""
explored = set()
explored.add(node)
dfs(node, callback=lambda n: explored.add(n))
return explored |
def search_template_present(name, definition):
'''
Ensure that the named search template is present.
name
Name of the search template to add
definition
Required dict for creation parameters as per http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html
**Example:**
.. code-block:: yaml
test_pipeline:
elasticsearch.search_template_present:
- definition:
inline:
size: 10
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
template = __salt__['elasticsearch.search_template_get'](id=name)
old = {}
if template:
old = salt.utils.json.loads(template["template"])
ret['changes'] = __utils__['dictdiffer.deep_diff'](old, definition)
if ret['changes'] or not definition:
if __opts__['test']:
if not template:
ret['comment'] = 'Search template {0} does not exist and will be created'.format(name)
else:
ret['comment'] = 'Search template {0} exists with wrong configuration and will be overridden'.format(name)
ret['result'] = None
else:
output = __salt__['elasticsearch.search_template_create'](id=name, body=definition)
if output:
if not template:
ret['comment'] = 'Successfully created search template {0}'.format(name)
else:
ret['comment'] = 'Successfully replaced search template {0}'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Cannot create search template {0}, {1}'.format(name, output)
else:
ret['comment'] = 'Search template {0} is already present'.format(name)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret | Ensure that the named search template is present.
name
Name of the search template to add
definition
Required dict for creation parameters as per http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html
**Example:**
.. code-block:: yaml
test_pipeline:
elasticsearch.search_template_present:
- definition:
inline:
size: 10 | Below is the the instruction that describes the task:
### Input:
Ensure that the named search template is present.
name
Name of the search template to add
definition
Required dict for creation parameters as per http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html
**Example:**
.. code-block:: yaml
test_pipeline:
elasticsearch.search_template_present:
- definition:
inline:
size: 10
### Response:
def search_template_present(name, definition):
'''
Ensure that the named search template is present.
name
Name of the search template to add
definition
Required dict for creation parameters as per http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html
**Example:**
.. code-block:: yaml
test_pipeline:
elasticsearch.search_template_present:
- definition:
inline:
size: 10
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
template = __salt__['elasticsearch.search_template_get'](id=name)
old = {}
if template:
old = salt.utils.json.loads(template["template"])
ret['changes'] = __utils__['dictdiffer.deep_diff'](old, definition)
if ret['changes'] or not definition:
if __opts__['test']:
if not template:
ret['comment'] = 'Search template {0} does not exist and will be created'.format(name)
else:
ret['comment'] = 'Search template {0} exists with wrong configuration and will be overridden'.format(name)
ret['result'] = None
else:
output = __salt__['elasticsearch.search_template_create'](id=name, body=definition)
if output:
if not template:
ret['comment'] = 'Successfully created search template {0}'.format(name)
else:
ret['comment'] = 'Successfully replaced search template {0}'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Cannot create search template {0}, {1}'.format(name, output)
else:
ret['comment'] = 'Search template {0} is already present'.format(name)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret |
def install(pkg=None,
pkgs=None,
dir=None,
runas=None,
registry=None,
env=None,
dry_run=False,
silent=True):
'''
Install an NPM package.
If no directory is specified, the package will be installed globally. If
no package is specified, the dependencies (from package.json) of the
package in the given directory will be installed.
pkg
A package name in any format accepted by NPM, including a version
identifier
pkgs
A list of package names in the same format as the ``name`` parameter
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
runas
The user to run NPM with
registry
The NPM registry to install the package from.
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2016.3.0
dry_run
Whether or not to run NPM install with --dry-run flag.
.. versionadded:: 2015.8.4
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2015.8.5
CLI Example:
.. code-block:: bash
salt '*' npm.install coffee-script
salt '*' npm.install coffee-script@1.0.1
'''
# Protect against injection
if pkg:
pkgs = [_cmd_quote(pkg)]
elif pkgs:
pkgs = [_cmd_quote(v) for v in pkgs]
else:
pkgs = []
if registry:
registry = _cmd_quote(registry)
cmd = ['npm', 'install', '--json']
if silent:
cmd.append('--silent')
if not dir:
cmd.append('--global')
if registry:
cmd.append('--registry="{0}"'.format(registry))
if dry_run:
cmd.append('--dry-run')
cmd.extend(pkgs)
env = env or {}
if runas:
uid = salt.utils.user.get_uid(runas)
if uid:
env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](cmd,
python_shell=True,
cwd=dir,
runas=runas,
env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
# npm >1.2.21 is putting the output to stderr even though retcode is 0
npm_output = result['stdout'] or result['stderr']
try:
return salt.utils.json.find_json(npm_output)
except ValueError:
return npm_output | Install an NPM package.
If no directory is specified, the package will be installed globally. If
no package is specified, the dependencies (from package.json) of the
package in the given directory will be installed.
pkg
A package name in any format accepted by NPM, including a version
identifier
pkgs
A list of package names in the same format as the ``name`` parameter
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
runas
The user to run NPM with
registry
The NPM registry to install the package from.
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2016.3.0
dry_run
Whether or not to run NPM install with --dry-run flag.
.. versionadded:: 2015.8.4
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2015.8.5
CLI Example:
.. code-block:: bash
salt '*' npm.install coffee-script
salt '*' npm.install coffee-script@1.0.1 | Below is the the instruction that describes the task:
### Input:
Install an NPM package.
If no directory is specified, the package will be installed globally. If
no package is specified, the dependencies (from package.json) of the
package in the given directory will be installed.
pkg
A package name in any format accepted by NPM, including a version
identifier
pkgs
A list of package names in the same format as the ``name`` parameter
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
runas
The user to run NPM with
registry
The NPM registry to install the package from.
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2016.3.0
dry_run
Whether or not to run NPM install with --dry-run flag.
.. versionadded:: 2015.8.4
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2015.8.5
CLI Example:
.. code-block:: bash
salt '*' npm.install coffee-script
salt '*' npm.install coffee-script@1.0.1
### Response:
def install(pkg=None,
pkgs=None,
dir=None,
runas=None,
registry=None,
env=None,
dry_run=False,
silent=True):
'''
Install an NPM package.
If no directory is specified, the package will be installed globally. If
no package is specified, the dependencies (from package.json) of the
package in the given directory will be installed.
pkg
A package name in any format accepted by NPM, including a version
identifier
pkgs
A list of package names in the same format as the ``name`` parameter
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
runas
The user to run NPM with
registry
The NPM registry to install the package from.
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2016.3.0
dry_run
Whether or not to run NPM install with --dry-run flag.
.. versionadded:: 2015.8.4
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2015.8.5
CLI Example:
.. code-block:: bash
salt '*' npm.install coffee-script
salt '*' npm.install coffee-script@1.0.1
'''
# Protect against injection
if pkg:
pkgs = [_cmd_quote(pkg)]
elif pkgs:
pkgs = [_cmd_quote(v) for v in pkgs]
else:
pkgs = []
if registry:
registry = _cmd_quote(registry)
cmd = ['npm', 'install', '--json']
if silent:
cmd.append('--silent')
if not dir:
cmd.append('--global')
if registry:
cmd.append('--registry="{0}"'.format(registry))
if dry_run:
cmd.append('--dry-run')
cmd.extend(pkgs)
env = env or {}
if runas:
uid = salt.utils.user.get_uid(runas)
if uid:
env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](cmd,
python_shell=True,
cwd=dir,
runas=runas,
env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
# npm >1.2.21 is putting the output to stderr even though retcode is 0
npm_output = result['stdout'] or result['stderr']
try:
return salt.utils.json.find_json(npm_output)
except ValueError:
return npm_output |
def delete_object(self, object):
""" Delete object specified by ``object``. """
#pdb.set_trace()
self.db.engine.delete_key(object)#, userid='abc123', id='1')
print('dynamo.delete_object(%s)' % object) | Delete object specified by ``object``. | Below is the the instruction that describes the task:
### Input:
Delete object specified by ``object``.
### Response:
def delete_object(self, object):
""" Delete object specified by ``object``. """
#pdb.set_trace()
self.db.engine.delete_key(object)#, userid='abc123', id='1')
print('dynamo.delete_object(%s)' % object) |
def min_depth_img(self, num_img=1):
"""Collect a series of depth images and return the min of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
:obj:`DepthImage`
The min DepthImage collected from the frames.
"""
depths = []
for _ in range(num_img):
_, depth, _ = self.frames()
depths.append(depth)
return Image.min_images(depths) | Collect a series of depth images and return the min of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
:obj:`DepthImage`
The min DepthImage collected from the frames. | Below is the the instruction that describes the task:
### Input:
Collect a series of depth images and return the min of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
:obj:`DepthImage`
The min DepthImage collected from the frames.
### Response:
def min_depth_img(self, num_img=1):
"""Collect a series of depth images and return the min of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
:obj:`DepthImage`
The min DepthImage collected from the frames.
"""
depths = []
for _ in range(num_img):
_, depth, _ = self.frames()
depths.append(depth)
return Image.min_images(depths) |
def _required_args(fn):
"""Returns arguments of fn with default=REQUIRED_ARG."""
spec = getargspec(fn)
if not spec.defaults:
return []
arg_names = spec.args[-len(spec.defaults):]
return [name for name, val in zip(arg_names, spec.defaults)
if val is REQUIRED_ARG] | Returns arguments of fn with default=REQUIRED_ARG. | Below is the the instruction that describes the task:
### Input:
Returns arguments of fn with default=REQUIRED_ARG.
### Response:
def _required_args(fn):
"""Returns arguments of fn with default=REQUIRED_ARG."""
spec = getargspec(fn)
if not spec.defaults:
return []
arg_names = spec.args[-len(spec.defaults):]
return [name for name, val in zip(arg_names, spec.defaults)
if val is REQUIRED_ARG] |
def find(self, uid):
"""Find and load the user from database by uid(user id)"""
data = (db.select(self.table).select('username', 'email', 'real_name',
'password', 'bio', 'status', 'role', 'uid').
condition('uid', uid).execute()
)
if data:
logger.info('data %s', data)
return self.load(data[0], self.model) | Find and load the user from database by uid(user id) | Below is the the instruction that describes the task:
### Input:
Find and load the user from database by uid(user id)
### Response:
def find(self, uid):
"""Find and load the user from database by uid(user id)"""
data = (db.select(self.table).select('username', 'email', 'real_name',
'password', 'bio', 'status', 'role', 'uid').
condition('uid', uid).execute()
)
if data:
logger.info('data %s', data)
return self.load(data[0], self.model) |
def custom_action(sender, action,
instance,
user=None,
**kwargs):
"""
Manually trigger a custom action (or even a standard action).
"""
opts = get_opts(instance)
model = '.'.join([opts.app_label, opts.object_name])
distill_model_event(instance, model, action, user_override=user) | Manually trigger a custom action (or even a standard action). | Below is the the instruction that describes the task:
### Input:
Manually trigger a custom action (or even a standard action).
### Response:
def custom_action(sender, action,
instance,
user=None,
**kwargs):
"""
Manually trigger a custom action (or even a standard action).
"""
opts = get_opts(instance)
model = '.'.join([opts.app_label, opts.object_name])
distill_model_event(instance, model, action, user_override=user) |
def inn(self) -> str:
"""Generate random, but valid ``INN``.
:return: INN.
"""
def control_sum(nums: list, t: str) -> int:
digits = {
'n2': [7, 2, 4, 10, 3, 5, 9, 4, 6, 8],
'n1': [3, 7, 2, 4, 10, 3, 5, 9, 4, 6, 8],
}
number = 0
length = digits[t]
for i in range(0, len(length)):
number += nums[i] * length[i]
return number % 11 % 10
numbers = []
for x in range(0, 10):
numbers.append(self.random.randint(1 if x == 0 else 0, 9))
n2 = control_sum(numbers, 'n2')
numbers.append(n2)
n1 = control_sum(numbers, 'n1')
numbers.append(n1)
return ''.join([str(x) for x in numbers]) | Generate random, but valid ``INN``.
:return: INN. | Below is the the instruction that describes the task:
### Input:
Generate random, but valid ``INN``.
:return: INN.
### Response:
def inn(self) -> str:
"""Generate random, but valid ``INN``.
:return: INN.
"""
def control_sum(nums: list, t: str) -> int:
digits = {
'n2': [7, 2, 4, 10, 3, 5, 9, 4, 6, 8],
'n1': [3, 7, 2, 4, 10, 3, 5, 9, 4, 6, 8],
}
number = 0
length = digits[t]
for i in range(0, len(length)):
number += nums[i] * length[i]
return number % 11 % 10
numbers = []
for x in range(0, 10):
numbers.append(self.random.randint(1 if x == 0 else 0, 9))
n2 = control_sum(numbers, 'n2')
numbers.append(n2)
n1 = control_sum(numbers, 'n1')
numbers.append(n1)
return ''.join([str(x) for x in numbers]) |
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):
"""
Write the ValidationInformation structure encoding to the data stream.
Args:
output_buffer (stream): A data stream in which to encode
ValidationInformation structure data, supporting a write
method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidField: Raised if the validation authority type, validation
version major, validation type, and/or validation level fields
are not defined.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the ValidationInformation structure.
"""
if kmip_version < enums.KMIPVersion.KMIP_1_3:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the ValidationInformation "
"object.".format(
kmip_version.value
)
)
local_buffer = BytearrayStream()
if self._validation_authority_type:
self._validation_authority_type.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The ValidationInformation structure is missing the "
"validation authority type field."
)
if self._validation_authority_country:
self._validation_authority_country.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_authority_uri:
self._validation_authority_uri.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_version_major:
self._validation_version_major.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The ValidationInformation structure is missing the "
"validation version major field."
)
if self._validation_version_minor:
self._validation_version_minor.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_type:
self._validation_type.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The ValidationInformation structure is missing the "
"validation type field."
)
if self._validation_level:
self._validation_level.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The ValidationInformation structure is missing the "
"validation level field."
)
if self._validation_certificate_identifier:
self._validation_certificate_identifier.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_certificate_uri:
self._validation_certificate_uri.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_vendor_uri:
self._validation_vendor_uri.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_profiles:
for validation_profile in self._validation_profiles:
validation_profile.write(
local_buffer,
kmip_version=kmip_version
)
self.length = local_buffer.length()
super(ValidationInformation, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer) | Write the ValidationInformation structure encoding to the data stream.
Args:
output_buffer (stream): A data stream in which to encode
ValidationInformation structure data, supporting a write
method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidField: Raised if the validation authority type, validation
version major, validation type, and/or validation level fields
are not defined.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the ValidationInformation structure. | Below is the the instruction that describes the task:
### Input:
Write the ValidationInformation structure encoding to the data stream.
Args:
output_buffer (stream): A data stream in which to encode
ValidationInformation structure data, supporting a write
method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidField: Raised if the validation authority type, validation
version major, validation type, and/or validation level fields
are not defined.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the ValidationInformation structure.
### Response:
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):
"""
Write the ValidationInformation structure encoding to the data stream.
Args:
output_buffer (stream): A data stream in which to encode
ValidationInformation structure data, supporting a write
method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidField: Raised if the validation authority type, validation
version major, validation type, and/or validation level fields
are not defined.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the ValidationInformation structure.
"""
if kmip_version < enums.KMIPVersion.KMIP_1_3:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the ValidationInformation "
"object.".format(
kmip_version.value
)
)
local_buffer = BytearrayStream()
if self._validation_authority_type:
self._validation_authority_type.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The ValidationInformation structure is missing the "
"validation authority type field."
)
if self._validation_authority_country:
self._validation_authority_country.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_authority_uri:
self._validation_authority_uri.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_version_major:
self._validation_version_major.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The ValidationInformation structure is missing the "
"validation version major field."
)
if self._validation_version_minor:
self._validation_version_minor.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_type:
self._validation_type.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The ValidationInformation structure is missing the "
"validation type field."
)
if self._validation_level:
self._validation_level.write(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidField(
"The ValidationInformation structure is missing the "
"validation level field."
)
if self._validation_certificate_identifier:
self._validation_certificate_identifier.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_certificate_uri:
self._validation_certificate_uri.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_vendor_uri:
self._validation_vendor_uri.write(
local_buffer,
kmip_version=kmip_version
)
if self._validation_profiles:
for validation_profile in self._validation_profiles:
validation_profile.write(
local_buffer,
kmip_version=kmip_version
)
self.length = local_buffer.length()
super(ValidationInformation, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer) |
def _parseSimpleSelector(self, src):
"""simple_selector
: [ namespace_selector ]? element_name? [ HASH | class | attrib | pseudo ]* S*
;
"""
ctxsrc = src.lstrip()
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
name, src = self._getMatchResult(self.re_element_name, src)
if name:
pass # already *successfully* assigned
elif src[:1] in self.SelectorQualifiers:
name = '*'
else:
raise self.ParseError('Selector name or qualifier expected', src, ctxsrc)
name = self.cssBuilder.resolveNamespacePrefix(nsPrefix, name)
selector = self.cssBuilder.selector(name)
while src and src[:1] in self.SelectorQualifiers:
hash_, src = self._getMatchResult(self.re_hash, src)
if hash_ is not None:
selector.addHashId(hash_)
continue
class_, src = self._getMatchResult(self.re_class, src)
if class_ is not None:
selector.addClass(class_)
continue
if src.startswith('['):
src, selector = self._parseSelectorAttribute(src, selector)
elif src.startswith(':'):
src, selector = self._parseSelectorPseudo(src, selector)
else:
break
return src.lstrip(), selector | simple_selector
: [ namespace_selector ]? element_name? [ HASH | class | attrib | pseudo ]* S*
; | Below is the the instruction that describes the task:
### Input:
simple_selector
: [ namespace_selector ]? element_name? [ HASH | class | attrib | pseudo ]* S*
;
### Response:
def _parseSimpleSelector(self, src):
"""simple_selector
: [ namespace_selector ]? element_name? [ HASH | class | attrib | pseudo ]* S*
;
"""
ctxsrc = src.lstrip()
nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src)
name, src = self._getMatchResult(self.re_element_name, src)
if name:
pass # already *successfully* assigned
elif src[:1] in self.SelectorQualifiers:
name = '*'
else:
raise self.ParseError('Selector name or qualifier expected', src, ctxsrc)
name = self.cssBuilder.resolveNamespacePrefix(nsPrefix, name)
selector = self.cssBuilder.selector(name)
while src and src[:1] in self.SelectorQualifiers:
hash_, src = self._getMatchResult(self.re_hash, src)
if hash_ is not None:
selector.addHashId(hash_)
continue
class_, src = self._getMatchResult(self.re_class, src)
if class_ is not None:
selector.addClass(class_)
continue
if src.startswith('['):
src, selector = self._parseSelectorAttribute(src, selector)
elif src.startswith(':'):
src, selector = self._parseSelectorPseudo(src, selector)
else:
break
return src.lstrip(), selector |
def execute(self):
"""
Execute the actions necessary to perform a `molecule init scenario` and
returns None.
:return: None
"""
scenario_name = self._command_args['scenario_name']
role_name = os.getcwd().split(os.sep)[-1]
role_directory = util.abs_path(os.path.join(os.getcwd(), os.pardir))
msg = 'Initializing new scenario {}...'.format(scenario_name)
LOG.info(msg)
molecule_directory = config.molecule_directory(
os.path.join(role_directory, role_name))
scenario_directory = os.path.join(molecule_directory, scenario_name)
scenario_base_directory = os.path.dirname(scenario_directory)
if os.path.isdir(scenario_directory):
msg = ('The directory molecule/{} exists. '
'Cannot create new scenario.').format(scenario_name)
util.sysexit_with_message(msg)
scenario_base_directory = os.path.join(role_directory, role_name)
templates = [
'scenario/driver/{driver_name}'.format(**self._command_args),
'scenario/verifier/{verifier_name}'.format(**self._command_args),
]
for template in templates:
self._process_templates(template, self._command_args,
scenario_base_directory)
self._process_templates('molecule', self._command_args, role_directory)
role_directory = os.path.join(role_directory, role_name)
msg = 'Initialized scenario in {} successfully.'.format(
scenario_directory)
LOG.success(msg) | Execute the actions necessary to perform a `molecule init scenario` and
returns None.
:return: None | Below is the the instruction that describes the task:
### Input:
Execute the actions necessary to perform a `molecule init scenario` and
returns None.
:return: None
### Response:
def execute(self):
"""
Execute the actions necessary to perform a `molecule init scenario` and
returns None.
:return: None
"""
scenario_name = self._command_args['scenario_name']
role_name = os.getcwd().split(os.sep)[-1]
role_directory = util.abs_path(os.path.join(os.getcwd(), os.pardir))
msg = 'Initializing new scenario {}...'.format(scenario_name)
LOG.info(msg)
molecule_directory = config.molecule_directory(
os.path.join(role_directory, role_name))
scenario_directory = os.path.join(molecule_directory, scenario_name)
scenario_base_directory = os.path.dirname(scenario_directory)
if os.path.isdir(scenario_directory):
msg = ('The directory molecule/{} exists. '
'Cannot create new scenario.').format(scenario_name)
util.sysexit_with_message(msg)
scenario_base_directory = os.path.join(role_directory, role_name)
templates = [
'scenario/driver/{driver_name}'.format(**self._command_args),
'scenario/verifier/{verifier_name}'.format(**self._command_args),
]
for template in templates:
self._process_templates(template, self._command_args,
scenario_base_directory)
self._process_templates('molecule', self._command_args, role_directory)
role_directory = os.path.join(role_directory, role_name)
msg = 'Initialized scenario in {} successfully.'.format(
scenario_directory)
LOG.success(msg) |
def export_results(job, fsid, file_name, univ_options, subfolder=None):
"""
Write out a file to a given location. The location can be either a directory on the local
machine, or a folder with a bucket on AWS.
:param str fsid: The file store id for the file to be exported
:param str file_name: The name of the file that neeeds to be exported (path to file is also
acceptable)
:param dict univ_options: Dict of universal options used by almost all tools
:param str subfolder: A sub folder within the main folder where this data should go
:return: None
"""
job.fileStore.logToMaster('Exporting %s to output location' % fsid)
file_name = os.path.basename(file_name)
try:
assert univ_options['output_folder'], 'Need a path to a folder to write out files'
assert univ_options['storage_location'], 'Need to know where the files need to go. ' + \
'Local or AWS/Azure, etc.'
except AssertionError as err:
# This isn't a game killer. Continue the pipeline without erroring out but do inform the
# user about it.
print('ERROR:', err.message, file=sys.stderr)
return
if univ_options['output_folder'] == 'NA':
output_folder = ''
else:
output_folder = univ_options['output_folder']
output_folder = os.path.join(output_folder, univ_options['patient'])
output_folder = os.path.join(output_folder, subfolder) if subfolder else output_folder
if univ_options['storage_location'] == 'local':
# Handle Local
try:
# Create the directory if required
os.makedirs(output_folder, 0755)
except OSError as err:
if err.errno != errno.EEXIST:
raise
output_url = 'file://' + os.path.join(output_folder, file_name)
elif univ_options['storage_location'].startswith('aws'):
# Handle AWS
bucket_name = univ_options['storage_location'].split(':')[-1]
output_url = os.path.join('S3://', bucket_name, output_folder.strip('/'), file_name)
# Can't do Azure or google yet.
else:
# TODO: Azure support
print("Currently doesn't support anything but Local and aws.")
return
job.fileStore.exportFile(fsid, output_url) | Write out a file to a given location. The location can be either a directory on the local
machine, or a folder with a bucket on AWS.
:param str fsid: The file store id for the file to be exported
:param str file_name: The name of the file that neeeds to be exported (path to file is also
acceptable)
:param dict univ_options: Dict of universal options used by almost all tools
:param str subfolder: A sub folder within the main folder where this data should go
:return: None | Below is the the instruction that describes the task:
### Input:
Write out a file to a given location. The location can be either a directory on the local
machine, or a folder with a bucket on AWS.
:param str fsid: The file store id for the file to be exported
:param str file_name: The name of the file that neeeds to be exported (path to file is also
acceptable)
:param dict univ_options: Dict of universal options used by almost all tools
:param str subfolder: A sub folder within the main folder where this data should go
:return: None
### Response:
def export_results(job, fsid, file_name, univ_options, subfolder=None):
"""
Write out a file to a given location. The location can be either a directory on the local
machine, or a folder with a bucket on AWS.
:param str fsid: The file store id for the file to be exported
:param str file_name: The name of the file that neeeds to be exported (path to file is also
acceptable)
:param dict univ_options: Dict of universal options used by almost all tools
:param str subfolder: A sub folder within the main folder where this data should go
:return: None
"""
job.fileStore.logToMaster('Exporting %s to output location' % fsid)
file_name = os.path.basename(file_name)
try:
assert univ_options['output_folder'], 'Need a path to a folder to write out files'
assert univ_options['storage_location'], 'Need to know where the files need to go. ' + \
'Local or AWS/Azure, etc.'
except AssertionError as err:
# This isn't a game killer. Continue the pipeline without erroring out but do inform the
# user about it.
print('ERROR:', err.message, file=sys.stderr)
return
if univ_options['output_folder'] == 'NA':
output_folder = ''
else:
output_folder = univ_options['output_folder']
output_folder = os.path.join(output_folder, univ_options['patient'])
output_folder = os.path.join(output_folder, subfolder) if subfolder else output_folder
if univ_options['storage_location'] == 'local':
# Handle Local
try:
# Create the directory if required
os.makedirs(output_folder, 0755)
except OSError as err:
if err.errno != errno.EEXIST:
raise
output_url = 'file://' + os.path.join(output_folder, file_name)
elif univ_options['storage_location'].startswith('aws'):
# Handle AWS
bucket_name = univ_options['storage_location'].split(':')[-1]
output_url = os.path.join('S3://', bucket_name, output_folder.strip('/'), file_name)
# Can't do Azure or google yet.
else:
# TODO: Azure support
print("Currently doesn't support anything but Local and aws.")
return
job.fileStore.exportFile(fsid, output_url) |
def _get_or_create_user(self, force_populate=False):
"""
Loads the User model object from the database or creates it if it
doesn't exist. Also populates the fields, subject to
AUTH_LDAP_ALWAYS_UPDATE_USER.
"""
save_user = False
username = self.backend.ldap_to_django_username(self._username)
self._user, created = self.backend.get_or_create_user(username, self)
self._user.ldap_user = self
self._user.ldap_username = self._username
should_populate = force_populate or self.settings.ALWAYS_UPDATE_USER or created
if created:
logger.debug("Created Django user %s", username)
self._user.set_unusable_password()
save_user = True
if should_populate:
logger.debug("Populating Django user %s", username)
self._populate_user()
save_user = True
if self.settings.MIRROR_GROUPS:
self._mirror_groups()
# Give the client a chance to finish populating the user just before
# saving.
if should_populate:
signal_responses = populate_user.send(self.backend.__class__, user=self._user, ldap_user=self)
if len(signal_responses) > 0:
save_user = True
if save_user:
self._user.save()
# We populate the profile after the user model is saved to give the
# client a chance to create the profile. Custom user models in Django
# 1.5 probably won't have a get_profile method.
if should_populate and self._should_populate_profile():
self._populate_and_save_user_profile() | Loads the User model object from the database or creates it if it
doesn't exist. Also populates the fields, subject to
AUTH_LDAP_ALWAYS_UPDATE_USER. | Below is the the instruction that describes the task:
### Input:
Loads the User model object from the database or creates it if it
doesn't exist. Also populates the fields, subject to
AUTH_LDAP_ALWAYS_UPDATE_USER.
### Response:
def _get_or_create_user(self, force_populate=False):
"""
Loads the User model object from the database or creates it if it
doesn't exist. Also populates the fields, subject to
AUTH_LDAP_ALWAYS_UPDATE_USER.
"""
save_user = False
username = self.backend.ldap_to_django_username(self._username)
self._user, created = self.backend.get_or_create_user(username, self)
self._user.ldap_user = self
self._user.ldap_username = self._username
should_populate = force_populate or self.settings.ALWAYS_UPDATE_USER or created
if created:
logger.debug("Created Django user %s", username)
self._user.set_unusable_password()
save_user = True
if should_populate:
logger.debug("Populating Django user %s", username)
self._populate_user()
save_user = True
if self.settings.MIRROR_GROUPS:
self._mirror_groups()
# Give the client a chance to finish populating the user just before
# saving.
if should_populate:
signal_responses = populate_user.send(self.backend.__class__, user=self._user, ldap_user=self)
if len(signal_responses) > 0:
save_user = True
if save_user:
self._user.save()
# We populate the profile after the user model is saved to give the
# client a chance to create the profile. Custom user models in Django
# 1.5 probably won't have a get_profile method.
if should_populate and self._should_populate_profile():
self._populate_and_save_user_profile() |
def get_instance(self, payload):
"""
Build an instance of NotificationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.notification.NotificationInstance
:rtype: twilio.rest.api.v2010.account.notification.NotificationInstance
"""
return NotificationInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | Build an instance of NotificationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.notification.NotificationInstance
:rtype: twilio.rest.api.v2010.account.notification.NotificationInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of NotificationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.notification.NotificationInstance
:rtype: twilio.rest.api.v2010.account.notification.NotificationInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of NotificationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.notification.NotificationInstance
:rtype: twilio.rest.api.v2010.account.notification.NotificationInstance
"""
return NotificationInstance(self._version, payload, account_sid=self._solution['account_sid'], ) |
def getrruleset(self, addRDate=False):
"""
Get an rruleset created from self.
If addRDate is True, add an RDATE for dtstart if it's not included in
an RRULE, and count is decremented if it exists.
Note that for rules which don't match DTSTART, DTSTART may not appear
in list(rruleset), although it should. By default, an RDATE is not
created in these cases, and count isn't updated, so dateutil may list
a spurious occurrence.
"""
rruleset = None
for name in DATESANDRULES:
addfunc = None
for line in self.contents.get(name, ()):
# don't bother creating a rruleset unless there's a rule
if rruleset is None:
rruleset = rrule.rruleset()
if addfunc is None:
addfunc = getattr(rruleset, name)
if name in DATENAMES:
if type(line.value[0]) == datetime.datetime:
map(addfunc, line.value)
elif type(line.value[0]) == datetime.date:
for dt in line.value:
addfunc(datetime.datetime(dt.year, dt.month, dt.day))
else:
# ignore RDATEs with PERIOD values for now
pass
elif name in RULENAMES:
try:
dtstart = self.dtstart.value
except (AttributeError, KeyError):
# Special for VTODO - try DUE property instead
try:
if self.name == "VTODO":
dtstart = self.due.value
else:
# if there's no dtstart, just return None
print('failed to get dtstart with VTODO')
return None
except (AttributeError, KeyError):
# if there's no due, just return None
print('failed to find DUE at all.')
return None
# a Ruby iCalendar library escapes semi-colons in rrules,
# so also remove any backslashes
value = str_(line.value).replace('\\', '')
rule = rrule.rrulestr(value, dtstart=dtstart)
until = rule._until
if until is not None and isinstance(dtstart, datetime.datetime) and \
(until.tzinfo != dtstart.tzinfo):
# dateutil converts the UNTIL date to a datetime,
# check to see if the UNTIL parameter value was a date
vals = dict(pair.split('=') for pair in
line.value.upper().split(';'))
if len(vals.get('UNTIL', '')) == 8:
until = datetime.datetime.combine(until.date(), dtstart.time())
# While RFC2445 says UNTIL MUST be UTC, Chandler allows
# floating recurring events, and uses floating UNTIL values.
# Also, some odd floating UNTIL but timezoned DTSTART values
# have shown up in the wild, so put floating UNTIL values
# DTSTART's timezone
if until.tzinfo is None:
until = until.replace(tzinfo=dtstart.tzinfo)
if dtstart.tzinfo is not None:
until = until.astimezone(dtstart.tzinfo)
# RFC2445 actually states that UNTIL must be a UTC value. Whilst the
# changes above work OK, one problem case is if DTSTART is floating but
# UNTIL is properly specified as UTC (or with a TZID). In that case dateutil
# will fail datetime comparisons. There is no easy solution to this as
# there is no obvious timezone (at this point) to do proper floating time
# offset compisons. The best we can do is treat the UNTIL value as floating.
# This could mean incorrect determination of the last instance. The better
# solution here is to encourage clients to use COUNT rather than UNTIL
# when DTSTART is floating.
if dtstart.tzinfo is None:
until = until.replace(tzinfo=None)
rule._until = until
# add the rrule or exrule to the rruleset
addfunc(rule)
if name == 'rrule' and addRDate:
try:
# dateutils does not work with all-day (datetime.date) items
# so we need to convert to a datetime.datetime
# (which is what dateutils does internally)
if not isinstance(dtstart, datetime.datetime):
adddtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
adddtstart = dtstart
if rruleset._rrule[-1][0] != adddtstart:
rruleset.rdate(adddtstart)
added = True
else:
added = False
except IndexError:
# it's conceivable that an rrule might have 0 datetimes
added = False
if added and rruleset._rrule[-1]._count is not None:
rruleset._rrule[-1]._count -= 1
return rruleset | Get an rruleset created from self.
If addRDate is True, add an RDATE for dtstart if it's not included in
an RRULE, and count is decremented if it exists.
Note that for rules which don't match DTSTART, DTSTART may not appear
in list(rruleset), although it should. By default, an RDATE is not
created in these cases, and count isn't updated, so dateutil may list
a spurious occurrence. | Below is the the instruction that describes the task:
### Input:
Get an rruleset created from self.
If addRDate is True, add an RDATE for dtstart if it's not included in
an RRULE, and count is decremented if it exists.
Note that for rules which don't match DTSTART, DTSTART may not appear
in list(rruleset), although it should. By default, an RDATE is not
created in these cases, and count isn't updated, so dateutil may list
a spurious occurrence.
### Response:
def getrruleset(self, addRDate=False):
"""
Get an rruleset created from self.
If addRDate is True, add an RDATE for dtstart if it's not included in
an RRULE, and count is decremented if it exists.
Note that for rules which don't match DTSTART, DTSTART may not appear
in list(rruleset), although it should. By default, an RDATE is not
created in these cases, and count isn't updated, so dateutil may list
a spurious occurrence.
"""
rruleset = None
for name in DATESANDRULES:
addfunc = None
for line in self.contents.get(name, ()):
# don't bother creating a rruleset unless there's a rule
if rruleset is None:
rruleset = rrule.rruleset()
if addfunc is None:
addfunc = getattr(rruleset, name)
if name in DATENAMES:
if type(line.value[0]) == datetime.datetime:
map(addfunc, line.value)
elif type(line.value[0]) == datetime.date:
for dt in line.value:
addfunc(datetime.datetime(dt.year, dt.month, dt.day))
else:
# ignore RDATEs with PERIOD values for now
pass
elif name in RULENAMES:
try:
dtstart = self.dtstart.value
except (AttributeError, KeyError):
# Special for VTODO - try DUE property instead
try:
if self.name == "VTODO":
dtstart = self.due.value
else:
# if there's no dtstart, just return None
print('failed to get dtstart with VTODO')
return None
except (AttributeError, KeyError):
# if there's no due, just return None
print('failed to find DUE at all.')
return None
# a Ruby iCalendar library escapes semi-colons in rrules,
# so also remove any backslashes
value = str_(line.value).replace('\\', '')
rule = rrule.rrulestr(value, dtstart=dtstart)
until = rule._until
if until is not None and isinstance(dtstart, datetime.datetime) and \
(until.tzinfo != dtstart.tzinfo):
# dateutil converts the UNTIL date to a datetime,
# check to see if the UNTIL parameter value was a date
vals = dict(pair.split('=') for pair in
line.value.upper().split(';'))
if len(vals.get('UNTIL', '')) == 8:
until = datetime.datetime.combine(until.date(), dtstart.time())
# While RFC2445 says UNTIL MUST be UTC, Chandler allows
# floating recurring events, and uses floating UNTIL values.
# Also, some odd floating UNTIL but timezoned DTSTART values
# have shown up in the wild, so put floating UNTIL values
# DTSTART's timezone
if until.tzinfo is None:
until = until.replace(tzinfo=dtstart.tzinfo)
if dtstart.tzinfo is not None:
until = until.astimezone(dtstart.tzinfo)
# RFC2445 actually states that UNTIL must be a UTC value. Whilst the
# changes above work OK, one problem case is if DTSTART is floating but
# UNTIL is properly specified as UTC (or with a TZID). In that case dateutil
# will fail datetime comparisons. There is no easy solution to this as
# there is no obvious timezone (at this point) to do proper floating time
# offset compisons. The best we can do is treat the UNTIL value as floating.
# This could mean incorrect determination of the last instance. The better
# solution here is to encourage clients to use COUNT rather than UNTIL
# when DTSTART is floating.
if dtstart.tzinfo is None:
until = until.replace(tzinfo=None)
rule._until = until
# add the rrule or exrule to the rruleset
addfunc(rule)
if name == 'rrule' and addRDate:
try:
# dateutils does not work with all-day (datetime.date) items
# so we need to convert to a datetime.datetime
# (which is what dateutils does internally)
if not isinstance(dtstart, datetime.datetime):
adddtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
adddtstart = dtstart
if rruleset._rrule[-1][0] != adddtstart:
rruleset.rdate(adddtstart)
added = True
else:
added = False
except IndexError:
# it's conceivable that an rrule might have 0 datetimes
added = False
if added and rruleset._rrule[-1]._count is not None:
rruleset._rrule[-1]._count -= 1
return rruleset |
def stop(self):
"""Close websocket connection."""
self.state = STATE_STOPPED
if self.transport:
self.transport.close() | Close websocket connection. | Below is the the instruction that describes the task:
### Input:
Close websocket connection.
### Response:
def stop(self):
"""Close websocket connection."""
self.state = STATE_STOPPED
if self.transport:
self.transport.close() |
def custom_getter_router(custom_getter_map, name_fn):
"""Creates a custom getter than matches requests to dict of custom getters.
Custom getters are callables which implement the
[custom getter API]
(https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/get_variable).
The returned custom getter dispatches calls based on pattern matching the
name of the requested variable to the keys of custom_getter_map. For example,
{
".*/w": snt.custom_getters.stop_gradient,
}
will match all variables named with the suffix "/w". The `name_fn` is
provided to allow processing of the name, such as stripping off a scope prefix
before matching.
Args:
custom_getter_map: Mapping of regular expressions to custom getter
functions.
name_fn: Callable to map variable name through before matching to regular
expressions. This might, for example, strip off a scope prefix.
Returns:
A custom getter.
Raises:
TypeError: If an entry in `custom_getter_map` is not a callable function.
"""
for custom_getter in custom_getter_map.values():
if not callable(custom_getter):
raise TypeError("Given custom_getter is not callable.")
def _custom_getter(getter, name, *args, **kwargs):
"""A custom getter that routes based on pattern matching the variable name.
Args:
getter: The true getter to call.
name: The fully qualified variable name, i.e. including all scopes.
*args: Arguments, in the same format as tf.get_variable.
**kwargs: Keyword arguments, in the same format as tf.get_variable.
Returns:
The return value of the appropriate custom getter. If there are no
matches, it returns the return value of `getter`.
Raises:
KeyError: If more than one pattern matches the variable name.
"""
bare_name = name_fn(name)
matches = [
(custom_getter, pattern)
for pattern, custom_getter in custom_getter_map.items()
if re.match(pattern, bare_name) is not None]
num_matches = len(matches)
if num_matches == 0:
return getter(name, *args, **kwargs)
elif num_matches == 1:
custom_getter, pattern = matches[0]
return custom_getter(getter, name, *args, **kwargs)
else:
raise KeyError("More than one custom_getter matched {} ({}): {}".format(
name, bare_name, [pattern for _, pattern in matches]))
return _custom_getter | Creates a custom getter than matches requests to dict of custom getters.
Custom getters are callables which implement the
[custom getter API]
(https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/get_variable).
The returned custom getter dispatches calls based on pattern matching the
name of the requested variable to the keys of custom_getter_map. For example,
{
".*/w": snt.custom_getters.stop_gradient,
}
will match all variables named with the suffix "/w". The `name_fn` is
provided to allow processing of the name, such as stripping off a scope prefix
before matching.
Args:
custom_getter_map: Mapping of regular expressions to custom getter
functions.
name_fn: Callable to map variable name through before matching to regular
expressions. This might, for example, strip off a scope prefix.
Returns:
A custom getter.
Raises:
TypeError: If an entry in `custom_getter_map` is not a callable function. | Below is the the instruction that describes the task:
### Input:
Creates a custom getter than matches requests to dict of custom getters.
Custom getters are callables which implement the
[custom getter API]
(https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/get_variable).
The returned custom getter dispatches calls based on pattern matching the
name of the requested variable to the keys of custom_getter_map. For example,
{
".*/w": snt.custom_getters.stop_gradient,
}
will match all variables named with the suffix "/w". The `name_fn` is
provided to allow processing of the name, such as stripping off a scope prefix
before matching.
Args:
custom_getter_map: Mapping of regular expressions to custom getter
functions.
name_fn: Callable to map variable name through before matching to regular
expressions. This might, for example, strip off a scope prefix.
Returns:
A custom getter.
Raises:
TypeError: If an entry in `custom_getter_map` is not a callable function.
### Response:
def custom_getter_router(custom_getter_map, name_fn):
"""Creates a custom getter than matches requests to dict of custom getters.
Custom getters are callables which implement the
[custom getter API]
(https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/get_variable).
The returned custom getter dispatches calls based on pattern matching the
name of the requested variable to the keys of custom_getter_map. For example,
{
".*/w": snt.custom_getters.stop_gradient,
}
will match all variables named with the suffix "/w". The `name_fn` is
provided to allow processing of the name, such as stripping off a scope prefix
before matching.
Args:
custom_getter_map: Mapping of regular expressions to custom getter
functions.
name_fn: Callable to map variable name through before matching to regular
expressions. This might, for example, strip off a scope prefix.
Returns:
A custom getter.
Raises:
TypeError: If an entry in `custom_getter_map` is not a callable function.
"""
for custom_getter in custom_getter_map.values():
if not callable(custom_getter):
raise TypeError("Given custom_getter is not callable.")
def _custom_getter(getter, name, *args, **kwargs):
"""A custom getter that routes based on pattern matching the variable name.
Args:
getter: The true getter to call.
name: The fully qualified variable name, i.e. including all scopes.
*args: Arguments, in the same format as tf.get_variable.
**kwargs: Keyword arguments, in the same format as tf.get_variable.
Returns:
The return value of the appropriate custom getter. If there are no
matches, it returns the return value of `getter`.
Raises:
KeyError: If more than one pattern matches the variable name.
"""
bare_name = name_fn(name)
matches = [
(custom_getter, pattern)
for pattern, custom_getter in custom_getter_map.items()
if re.match(pattern, bare_name) is not None]
num_matches = len(matches)
if num_matches == 0:
return getter(name, *args, **kwargs)
elif num_matches == 1:
custom_getter, pattern = matches[0]
return custom_getter(getter, name, *args, **kwargs)
else:
raise KeyError("More than one custom_getter matched {} ({}): {}".format(
name, bare_name, [pattern for _, pattern in matches]))
return _custom_getter |
def coordinates(self, reference=None):
"""
Returns the coordinates of a :Placeable: relative to :reference:
"""
coordinates = [i._coordinates for i in self.get_trace(reference)]
return functools.reduce(lambda a, b: a + b, coordinates) | Returns the coordinates of a :Placeable: relative to :reference: | Below is the the instruction that describes the task:
### Input:
Returns the coordinates of a :Placeable: relative to :reference:
### Response:
def coordinates(self, reference=None):
"""
Returns the coordinates of a :Placeable: relative to :reference:
"""
coordinates = [i._coordinates for i in self.get_trace(reference)]
return functools.reduce(lambda a, b: a + b, coordinates) |
async def _send_recipients(
self,
recipients: List[str],
options: List[str] = None,
timeout: DefaultNumType = _default,
) -> RecipientErrorsType:
"""
Send the recipients given to the server. Used as part of
:meth:`.sendmail`.
"""
recipient_errors = []
for address in recipients:
try:
await self.rcpt(address, timeout=timeout)
except SMTPRecipientRefused as exc:
recipient_errors.append(exc)
if len(recipient_errors) == len(recipients):
raise SMTPRecipientsRefused(recipient_errors)
formatted_errors = {
err.recipient: SMTPResponse(err.code, err.message)
for err in recipient_errors
}
return formatted_errors | Send the recipients given to the server. Used as part of
:meth:`.sendmail`. | Below is the the instruction that describes the task:
### Input:
Send the recipients given to the server. Used as part of
:meth:`.sendmail`.
### Response:
async def _send_recipients(
self,
recipients: List[str],
options: List[str] = None,
timeout: DefaultNumType = _default,
) -> RecipientErrorsType:
"""
Send the recipients given to the server. Used as part of
:meth:`.sendmail`.
"""
recipient_errors = []
for address in recipients:
try:
await self.rcpt(address, timeout=timeout)
except SMTPRecipientRefused as exc:
recipient_errors.append(exc)
if len(recipient_errors) == len(recipients):
raise SMTPRecipientsRefused(recipient_errors)
formatted_errors = {
err.recipient: SMTPResponse(err.code, err.message)
for err in recipient_errors
}
return formatted_errors |
def setStopAction(self, action, *args, **kwargs):
"""
Set a function to call when run() is stopping, after the main action is called.
Parameters
----------
action: function pointer
The function to call.
*args
Positional arguments to pass to action.
**kwargs:
Keyword arguments to pass to action.
"""
self.stop_action = action
self.stop_args = args
self.stop_kwargs = kwargs | Set a function to call when run() is stopping, after the main action is called.
Parameters
----------
action: function pointer
The function to call.
*args
Positional arguments to pass to action.
**kwargs:
Keyword arguments to pass to action. | Below is the the instruction that describes the task:
### Input:
Set a function to call when run() is stopping, after the main action is called.
Parameters
----------
action: function pointer
The function to call.
*args
Positional arguments to pass to action.
**kwargs:
Keyword arguments to pass to action.
### Response:
def setStopAction(self, action, *args, **kwargs):
"""
Set a function to call when run() is stopping, after the main action is called.
Parameters
----------
action: function pointer
The function to call.
*args
Positional arguments to pass to action.
**kwargs:
Keyword arguments to pass to action.
"""
self.stop_action = action
self.stop_args = args
self.stop_kwargs = kwargs |
def crab_request(client, action, *args):
'''
Utility function that helps making requests to the CRAB service.
:param client: A :class:`suds.client.Client` for the CRAB service.
:param string action: Which method to call, eg. `ListGewesten`
:returns: Result of the SOAP call.
.. versionadded:: 0.3.0
'''
log.debug('Calling %s on CRAB service.', action)
return getattr(client.service, action)(*args) | Utility function that helps making requests to the CRAB service.
:param client: A :class:`suds.client.Client` for the CRAB service.
:param string action: Which method to call, eg. `ListGewesten`
:returns: Result of the SOAP call.
.. versionadded:: 0.3.0 | Below is the the instruction that describes the task:
### Input:
Utility function that helps making requests to the CRAB service.
:param client: A :class:`suds.client.Client` for the CRAB service.
:param string action: Which method to call, eg. `ListGewesten`
:returns: Result of the SOAP call.
.. versionadded:: 0.3.0
### Response:
def crab_request(client, action, *args):
'''
Utility function that helps making requests to the CRAB service.
:param client: A :class:`suds.client.Client` for the CRAB service.
:param string action: Which method to call, eg. `ListGewesten`
:returns: Result of the SOAP call.
.. versionadded:: 0.3.0
'''
log.debug('Calling %s on CRAB service.', action)
return getattr(client.service, action)(*args) |
def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s)
j : str
The name of the sub-observation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hyphen by specifying `sep='-'`
.. versionadded:: 0.20.0
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form
A-one, B-two,.., and you have an unrelated column A-rating, you can
ignore the last one by specifying `suffix='(!?one|two)'`
.. versionadded:: 0.20.0
.. versionchanged:: 0.23.0
When all suffixes are numeric, they are cast to int64/float64.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j).
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typical case.
Examples
--------
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multiple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht1 ht2
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.unstack()
>>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3),
... 'A(quarterly)-2011': np.random.rand(3),
... 'B(quarterly)-2010': np.random.rand(3),
... 'B(quarterly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ...
0 0.548814 0.544883 0.437587 ...
1 0.715189 0.423655 0.891773 ...
2 0.602763 0.645894 0.963663 ...
X id
0 0 0
1 1 1
2 1 2
>>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(quarterly) B(quarterly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != [] ])
... )
>>> list(stubnames)
['A(quarterly)', 'B(quarterly)']
All of the above examples have integers as suffixes. It is possible to
have non-integers as suffixes.
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht_one ht_two
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
sep='_', suffix='\w')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 one 2.8
two 3.4
2 one 2.9
two 3.8
3 one 2.2
two 2.9
2 1 one 2.0
two 3.2
2 one 1.8
two 2.8
3 one 1.9
two 2.4
3 1 one 2.2
two 3.3
2 one 2.3
two 3.4
3 one 2.1
two 2.9
"""
def get_var_names(df, stub, sep, suffix):
regex = r'^{stub}{sep}{suffix}$'.format(
stub=re.escape(stub), sep=re.escape(sep), suffix=suffix)
pattern = re.compile(regex)
return [col for col in df.columns if pattern.match(col)]
def melt_stub(df, stub, i, j, value_vars, sep):
newdf = melt(df, id_vars=i, value_vars=value_vars,
value_name=stub.rstrip(sep), var_name=j)
newdf[j] = Categorical(newdf[j])
newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "")
# GH17627 Cast numerics suffixes to int/float
newdf[j] = to_numeric(newdf[j], errors='ignore')
return newdf.set_index(i + [j])
if not is_list_like(stubnames):
stubnames = [stubnames]
else:
stubnames = list(stubnames)
if any(col in stubnames for col in df.columns):
raise ValueError("stubname can't be identical to a column name")
if not is_list_like(i):
i = [i]
else:
i = list(i)
if df[i].duplicated().any():
raise ValueError("the id variables need to uniquely identify each row")
value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames]
value_vars_flattened = [e for sublist in value_vars for e in sublist]
id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened))
melted = [melt_stub(df, s, i, j, v, sep)
for s, v in zip(stubnames, value_vars)]
melted = melted[0].join(melted[1:], how='outer')
if len(i) == 1:
new = df[id_vars].set_index(i).join(melted)
return new
new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j])
return new | r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s)
j : str
The name of the sub-observation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hyphen by specifying `sep='-'`
.. versionadded:: 0.20.0
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form
A-one, B-two,.., and you have an unrelated column A-rating, you can
ignore the last one by specifying `suffix='(!?one|two)'`
.. versionadded:: 0.20.0
.. versionchanged:: 0.23.0
When all suffixes are numeric, they are cast to int64/float64.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j).
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typical case.
Examples
--------
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multiple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht1 ht2
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.unstack()
>>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3),
... 'A(quarterly)-2011': np.random.rand(3),
... 'B(quarterly)-2010': np.random.rand(3),
... 'B(quarterly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ...
0 0.548814 0.544883 0.437587 ...
1 0.715189 0.423655 0.891773 ...
2 0.602763 0.645894 0.963663 ...
X id
0 0 0
1 1 1
2 1 2
>>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(quarterly) B(quarterly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != [] ])
... )
>>> list(stubnames)
['A(quarterly)', 'B(quarterly)']
All of the above examples have integers as suffixes. It is possible to
have non-integers as suffixes.
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht_one ht_two
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
sep='_', suffix='\w')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 one 2.8
two 3.4
2 one 2.9
two 3.8
3 one 2.2
two 2.9
2 1 one 2.0
two 3.2
2 one 1.8
two 2.8
3 one 1.9
two 2.4
3 1 one 2.2
two 3.3
2 one 2.3
two 3.4
3 one 2.1
two 2.9 | Below is the the instruction that describes the task:
### Input:
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s)
j : str
The name of the sub-observation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hyphen by specifying `sep='-'`
.. versionadded:: 0.20.0
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form
A-one, B-two,.., and you have an unrelated column A-rating, you can
ignore the last one by specifying `suffix='(!?one|two)'`
.. versionadded:: 0.20.0
.. versionchanged:: 0.23.0
When all suffixes are numeric, they are cast to int64/float64.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j).
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typical case.
Examples
--------
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multiple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht1 ht2
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.unstack()
>>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3),
... 'A(quarterly)-2011': np.random.rand(3),
... 'B(quarterly)-2010': np.random.rand(3),
... 'B(quarterly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ...
0 0.548814 0.544883 0.437587 ...
1 0.715189 0.423655 0.891773 ...
2 0.602763 0.645894 0.963663 ...
X id
0 0 0
1 1 1
2 1 2
>>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(quarterly) B(quarterly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != [] ])
... )
>>> list(stubnames)
['A(quarterly)', 'B(quarterly)']
All of the above examples have integers as suffixes. It is possible to
have non-integers as suffixes.
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht_one ht_two
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
sep='_', suffix='\w')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 one 2.8
two 3.4
2 one 2.9
two 3.8
3 one 2.2
two 2.9
2 1 one 2.0
two 3.2
2 one 1.8
two 2.8
3 one 1.9
two 2.4
3 1 one 2.2
two 3.3
2 one 2.3
two 3.4
3 one 2.1
two 2.9
### Response:
def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s)
j : str
The name of the sub-observation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hyphen by specifying `sep='-'`
.. versionadded:: 0.20.0
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form
A-one, B-two,.., and you have an unrelated column A-rating, you can
ignore the last one by specifying `suffix='(!?one|two)'`
.. versionadded:: 0.20.0
.. versionchanged:: 0.23.0
When all suffixes are numeric, they are cast to int64/float64.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j).
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typical case.
Examples
--------
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multiple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht1 ht2
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.unstack()
>>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3),
... 'A(quarterly)-2011': np.random.rand(3),
... 'B(quarterly)-2010': np.random.rand(3),
... 'B(quarterly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ...
0 0.548814 0.544883 0.437587 ...
1 0.715189 0.423655 0.891773 ...
2 0.602763 0.645894 0.963663 ...
X id
0 0 0
1 1 1
2 1 2
>>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(quarterly) B(quarterly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != [] ])
... )
>>> list(stubnames)
['A(quarterly)', 'B(quarterly)']
All of the above examples have integers as suffixes. It is possible to
have non-integers as suffixes.
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht_one ht_two
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
sep='_', suffix='\w')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 one 2.8
two 3.4
2 one 2.9
two 3.8
3 one 2.2
two 2.9
2 1 one 2.0
two 3.2
2 one 1.8
two 2.8
3 one 1.9
two 2.4
3 1 one 2.2
two 3.3
2 one 2.3
two 3.4
3 one 2.1
two 2.9
"""
def get_var_names(df, stub, sep, suffix):
regex = r'^{stub}{sep}{suffix}$'.format(
stub=re.escape(stub), sep=re.escape(sep), suffix=suffix)
pattern = re.compile(regex)
return [col for col in df.columns if pattern.match(col)]
def melt_stub(df, stub, i, j, value_vars, sep):
newdf = melt(df, id_vars=i, value_vars=value_vars,
value_name=stub.rstrip(sep), var_name=j)
newdf[j] = Categorical(newdf[j])
newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "")
# GH17627 Cast numerics suffixes to int/float
newdf[j] = to_numeric(newdf[j], errors='ignore')
return newdf.set_index(i + [j])
if not is_list_like(stubnames):
stubnames = [stubnames]
else:
stubnames = list(stubnames)
if any(col in stubnames for col in df.columns):
raise ValueError("stubname can't be identical to a column name")
if not is_list_like(i):
i = [i]
else:
i = list(i)
if df[i].duplicated().any():
raise ValueError("the id variables need to uniquely identify each row")
value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames]
value_vars_flattened = [e for sublist in value_vars for e in sublist]
id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened))
melted = [melt_stub(df, s, i, j, v, sep)
for s, v in zip(stubnames, value_vars)]
melted = melted[0].join(melted[1:], how='outer')
if len(i) == 1:
new = df[id_vars].set_index(i).join(melted)
return new
new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j])
return new |
def _download_predicate_data(self, class_, controller):
"""Get raw predicate information for given request class, and cache for
subsequent calls.
"""
self.authenticate()
url = ('{0}{1}/modeldef/class/{2}'
.format(self.base_url, controller, class_))
logger.debug(requests.utils.requote_uri(url))
resp = self._ratelimited_get(url)
_raise_for_status(resp)
return resp.json()['data'] | Get raw predicate information for given request class, and cache for
subsequent calls. | Below is the the instruction that describes the task:
### Input:
Get raw predicate information for given request class, and cache for
subsequent calls.
### Response:
def _download_predicate_data(self, class_, controller):
"""Get raw predicate information for given request class, and cache for
subsequent calls.
"""
self.authenticate()
url = ('{0}{1}/modeldef/class/{2}'
.format(self.base_url, controller, class_))
logger.debug(requests.utils.requote_uri(url))
resp = self._ratelimited_get(url)
_raise_for_status(resp)
return resp.json()['data'] |
def _split_compound_string_(compound_string):
"""
Split a compound's combined formula and phase into separate strings for
the formula and phase.
:param compound_string: Formula and phase of a chemical compound, e.g.
'SiO2[S1]'.
:returns: Formula of chemical compound.
:returns: Phase of chemical compound.
"""
formula = compound_string.replace(']', '').split('[')[0]
phase = compound_string.replace(']', '').split('[')[1]
return formula, phase | Split a compound's combined formula and phase into separate strings for
the formula and phase.
:param compound_string: Formula and phase of a chemical compound, e.g.
'SiO2[S1]'.
:returns: Formula of chemical compound.
:returns: Phase of chemical compound. | Below is the the instruction that describes the task:
### Input:
Split a compound's combined formula and phase into separate strings for
the formula and phase.
:param compound_string: Formula and phase of a chemical compound, e.g.
'SiO2[S1]'.
:returns: Formula of chemical compound.
:returns: Phase of chemical compound.
### Response:
def _split_compound_string_(compound_string):
"""
Split a compound's combined formula and phase into separate strings for
the formula and phase.
:param compound_string: Formula and phase of a chemical compound, e.g.
'SiO2[S1]'.
:returns: Formula of chemical compound.
:returns: Phase of chemical compound.
"""
formula = compound_string.replace(']', '').split('[')[0]
phase = compound_string.replace(']', '').split('[')[1]
return formula, phase |
def write_color_old( text, attr=None):
u'''write text at current cursor position and interpret color escapes.
return the number of characters written.
'''
res = []
chunks = terminal_escape.split(text)
n = 0 # count the characters we actually write, omitting the escapes
if attr is None:#use attribute from initial console
attr = 15
for chunk in chunks:
m = escape_parts.match(chunk)
if m:
for part in m.group(1).split(u";"):
if part == u"0": # No text attribute
attr = 0
elif part == u"7": # switch on reverse
attr |= 0x4000
if part == u"1": # switch on bold (i.e. intensify foreground color)
attr |= 0x08
elif len(part) == 2 and u"30" <= part <= u"37": # set foreground color
part = int(part)-30
# we have to mirror bits
attr = (attr & ~0x07) | ((part & 0x1) << 2) | (part & 0x2) | ((part & 0x4) >> 2)
elif len(part) == 2 and u"40" <= part <= u"47": # set background color
part = int(part) - 40
# we have to mirror bits
attr = (attr & ~0x70) | ((part & 0x1) << 6) | ((part & 0x2) << 4) | ((part & 0x4) << 2)
# ignore blink, underline and anything we don't understand
continue
n += len(chunk)
if chunk:
res.append((u"0x%x"%attr, chunk))
return res | u'''write text at current cursor position and interpret color escapes.
return the number of characters written. | Below is the the instruction that describes the task:
### Input:
u'''write text at current cursor position and interpret color escapes.
return the number of characters written.
### Response:
def write_color_old( text, attr=None):
u'''write text at current cursor position and interpret color escapes.
return the number of characters written.
'''
res = []
chunks = terminal_escape.split(text)
n = 0 # count the characters we actually write, omitting the escapes
if attr is None:#use attribute from initial console
attr = 15
for chunk in chunks:
m = escape_parts.match(chunk)
if m:
for part in m.group(1).split(u";"):
if part == u"0": # No text attribute
attr = 0
elif part == u"7": # switch on reverse
attr |= 0x4000
if part == u"1": # switch on bold (i.e. intensify foreground color)
attr |= 0x08
elif len(part) == 2 and u"30" <= part <= u"37": # set foreground color
part = int(part)-30
# we have to mirror bits
attr = (attr & ~0x07) | ((part & 0x1) << 2) | (part & 0x2) | ((part & 0x4) >> 2)
elif len(part) == 2 and u"40" <= part <= u"47": # set background color
part = int(part) - 40
# we have to mirror bits
attr = (attr & ~0x70) | ((part & 0x1) << 6) | ((part & 0x2) << 4) | ((part & 0x4) << 2)
# ignore blink, underline and anything we don't understand
continue
n += len(chunk)
if chunk:
res.append((u"0x%x"%attr, chunk))
return res |
def get_usb_controller_by_name(self, name):
"""Returns a USB controller with the given type.
in name of type str
return controller of type :class:`IUSBController`
raises :class:`VBoxErrorObjectNotFound`
A USB controller with given name doesn't exist.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
controller = self._call("getUSBControllerByName",
in_p=[name])
controller = IUSBController(controller)
return controller | Returns a USB controller with the given type.
in name of type str
return controller of type :class:`IUSBController`
raises :class:`VBoxErrorObjectNotFound`
A USB controller with given name doesn't exist. | Below is the the instruction that describes the task:
### Input:
Returns a USB controller with the given type.
in name of type str
return controller of type :class:`IUSBController`
raises :class:`VBoxErrorObjectNotFound`
A USB controller with given name doesn't exist.
### Response:
def get_usb_controller_by_name(self, name):
"""Returns a USB controller with the given type.
in name of type str
return controller of type :class:`IUSBController`
raises :class:`VBoxErrorObjectNotFound`
A USB controller with given name doesn't exist.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
controller = self._call("getUSBControllerByName",
in_p=[name])
controller = IUSBController(controller)
return controller |
def get_json(self):
"""Serialize ratings object as JSON-formatted string"""
ratings_dict = {
'category': self.category,
'date': self.date,
'day': self.weekday,
'next week': self.next_week,
'last week': self.last_week,
'entries': self.entries,
'url': self.url
}
return to_json(ratings_dict) | Serialize ratings object as JSON-formatted string | Below is the the instruction that describes the task:
### Input:
Serialize ratings object as JSON-formatted string
### Response:
def get_json(self):
"""Serialize ratings object as JSON-formatted string"""
ratings_dict = {
'category': self.category,
'date': self.date,
'day': self.weekday,
'next week': self.next_week,
'last week': self.last_week,
'entries': self.entries,
'url': self.url
}
return to_json(ratings_dict) |
def configure(self, organization, base_url='', ttl='', max_ttl='', mount_point=DEFAULT_MOUNT_POINT):
"""Configure the connection parameters for GitHub.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param organization: The organization users must be part of.
:type organization: str | unicode
:param base_url: The API endpoint to use. Useful if you are running GitHub Enterprise or an API-compatible
authentication server.
:type base_url: str | unicode
:param ttl: Duration after which authentication will be expired.
:type ttl: str | unicode
:param max_ttl: Maximum duration after which authentication will
be expired.
:type max_ttl: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure_method request.
:rtype: requests.Response
"""
params = {
'organization': organization,
'base_url': base_url,
'ttl': ttl,
'max_ttl': max_ttl,
}
api_path = '/v1/auth/{mount_point}/config'.format(
mount_point=mount_point
)
return self._adapter.post(
url=api_path,
json=params,
) | Configure the connection parameters for GitHub.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param organization: The organization users must be part of.
:type organization: str | unicode
:param base_url: The API endpoint to use. Useful if you are running GitHub Enterprise or an API-compatible
authentication server.
:type base_url: str | unicode
:param ttl: Duration after which authentication will be expired.
:type ttl: str | unicode
:param max_ttl: Maximum duration after which authentication will
be expired.
:type max_ttl: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure_method request.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Configure the connection parameters for GitHub.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param organization: The organization users must be part of.
:type organization: str | unicode
:param base_url: The API endpoint to use. Useful if you are running GitHub Enterprise or an API-compatible
authentication server.
:type base_url: str | unicode
:param ttl: Duration after which authentication will be expired.
:type ttl: str | unicode
:param max_ttl: Maximum duration after which authentication will
be expired.
:type max_ttl: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure_method request.
:rtype: requests.Response
### Response:
def configure(self, organization, base_url='', ttl='', max_ttl='', mount_point=DEFAULT_MOUNT_POINT):
"""Configure the connection parameters for GitHub.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param organization: The organization users must be part of.
:type organization: str | unicode
:param base_url: The API endpoint to use. Useful if you are running GitHub Enterprise or an API-compatible
authentication server.
:type base_url: str | unicode
:param ttl: Duration after which authentication will be expired.
:type ttl: str | unicode
:param max_ttl: Maximum duration after which authentication will
be expired.
:type max_ttl: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure_method request.
:rtype: requests.Response
"""
params = {
'organization': organization,
'base_url': base_url,
'ttl': ttl,
'max_ttl': max_ttl,
}
api_path = '/v1/auth/{mount_point}/config'.format(
mount_point=mount_point
)
return self._adapter.post(
url=api_path,
json=params,
) |
def startDrag(self, dropActions):
"""Reimplement Qt Method - handle drag event"""
data = QMimeData()
data.setUrls([QUrl(fname) for fname in self.get_selected_filenames()])
drag = QDrag(self)
drag.setMimeData(data)
drag.exec_() | Reimplement Qt Method - handle drag event | Below is the the instruction that describes the task:
### Input:
Reimplement Qt Method - handle drag event
### Response:
def startDrag(self, dropActions):
"""Reimplement Qt Method - handle drag event"""
data = QMimeData()
data.setUrls([QUrl(fname) for fname in self.get_selected_filenames()])
drag = QDrag(self)
drag.setMimeData(data)
drag.exec_() |
def mft_record_size(self):
"""
Returns:
int: MFT record size in bytes
"""
if self.extended_bpb.clusters_per_mft < 0:
return 2 ** abs(self.extended_bpb.clusters_per_mft)
else:
return self.clusters_per_mft * self.sectors_per_cluster * \
self.bytes_per_sector | Returns:
int: MFT record size in bytes | Below is the the instruction that describes the task:
### Input:
Returns:
int: MFT record size in bytes
### Response:
def mft_record_size(self):
"""
Returns:
int: MFT record size in bytes
"""
if self.extended_bpb.clusters_per_mft < 0:
return 2 ** abs(self.extended_bpb.clusters_per_mft)
else:
return self.clusters_per_mft * self.sectors_per_cluster * \
self.bytes_per_sector |
def activate_hopscotch(driver):
""" Allows you to use Hopscotch Tours with SeleniumBase
http://linkedin.github.io/hopscotch/
"""
hopscotch_css = constants.Hopscotch.MIN_CSS
hopscotch_js = constants.Hopscotch.MIN_JS
backdrop_style = style_sheet.hops_backdrop_style
verify_script = ("""// Verify Hopscotch activated
var hops = hopscotch.isActive;
""")
activate_bootstrap(driver)
js_utils.wait_for_ready_state_complete(driver)
js_utils.wait_for_angularjs(driver)
js_utils.add_css_style(driver, backdrop_style)
for x in range(4):
js_utils.activate_jquery(driver)
js_utils.add_css_link(driver, hopscotch_css)
js_utils.add_js_link(driver, hopscotch_js)
time.sleep(0.1)
for x in range(int(settings.MINI_TIMEOUT * 2.0)):
# Hopscotch needs a small amount of time to load & activate.
try:
driver.execute_script(verify_script)
js_utils.wait_for_ready_state_complete(driver)
js_utils.wait_for_angularjs(driver)
time.sleep(0.05)
return
except Exception:
time.sleep(0.15)
raise_unable_to_load_jquery_exception(driver) | Allows you to use Hopscotch Tours with SeleniumBase
http://linkedin.github.io/hopscotch/ | Below is the the instruction that describes the task:
### Input:
Allows you to use Hopscotch Tours with SeleniumBase
http://linkedin.github.io/hopscotch/
### Response:
def activate_hopscotch(driver):
""" Allows you to use Hopscotch Tours with SeleniumBase
http://linkedin.github.io/hopscotch/
"""
hopscotch_css = constants.Hopscotch.MIN_CSS
hopscotch_js = constants.Hopscotch.MIN_JS
backdrop_style = style_sheet.hops_backdrop_style
verify_script = ("""// Verify Hopscotch activated
var hops = hopscotch.isActive;
""")
activate_bootstrap(driver)
js_utils.wait_for_ready_state_complete(driver)
js_utils.wait_for_angularjs(driver)
js_utils.add_css_style(driver, backdrop_style)
for x in range(4):
js_utils.activate_jquery(driver)
js_utils.add_css_link(driver, hopscotch_css)
js_utils.add_js_link(driver, hopscotch_js)
time.sleep(0.1)
for x in range(int(settings.MINI_TIMEOUT * 2.0)):
# Hopscotch needs a small amount of time to load & activate.
try:
driver.execute_script(verify_script)
js_utils.wait_for_ready_state_complete(driver)
js_utils.wait_for_angularjs(driver)
time.sleep(0.05)
return
except Exception:
time.sleep(0.15)
raise_unable_to_load_jquery_exception(driver) |
def select_mastery_path(self, id, course_id, module_id, assignment_set_id=None, student_id=None):
"""
Select a mastery path.
Select a mastery path when module item includes several possible paths.
Requires Mastery Paths feature to be enabled. Returns a compound document
with the assignments included in the given path and any module items
related to those assignments
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - assignment_set_id
"""Assignment set chosen, as specified in the mastery_paths portion of the
context module item response"""
if assignment_set_id is not None:
data["assignment_set_id"] = assignment_set_id
# OPTIONAL - student_id
"""Which student the selection applies to. If not specified, current user is
implied."""
if student_id is not None:
data["student_id"] = student_id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path".format(**path), data=data, params=params, no_data=True) | Select a mastery path.
Select a mastery path when module item includes several possible paths.
Requires Mastery Paths feature to be enabled. Returns a compound document
with the assignments included in the given path and any module items
related to those assignments | Below is the the instruction that describes the task:
### Input:
Select a mastery path.
Select a mastery path when module item includes several possible paths.
Requires Mastery Paths feature to be enabled. Returns a compound document
with the assignments included in the given path and any module items
related to those assignments
### Response:
def select_mastery_path(self, id, course_id, module_id, assignment_set_id=None, student_id=None):
"""
Select a mastery path.
Select a mastery path when module item includes several possible paths.
Requires Mastery Paths feature to be enabled. Returns a compound document
with the assignments included in the given path and any module items
related to those assignments
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - module_id
"""ID"""
path["module_id"] = module_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - assignment_set_id
"""Assignment set chosen, as specified in the mastery_paths portion of the
context module item response"""
if assignment_set_id is not None:
data["assignment_set_id"] = assignment_set_id
# OPTIONAL - student_id
"""Which student the selection applies to. If not specified, current user is
implied."""
if student_id is not None:
data["student_id"] = student_id
self.logger.debug("POST /api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}/select_mastery_path".format(**path), data=data, params=params, no_data=True) |
def _list_records(self, rtype=None, name=None, content=None):
"""List all records for the hosted zone."""
records = []
paginator = RecordSetPaginator(self.r53_client, self.domain_id)
for record in paginator.all_record_sets():
if rtype is not None and record['Type'] != rtype:
continue
if name is not None and record['Name'] != self._fqdn_name(name):
continue
if record.get('AliasTarget', None) is not None:
record_content = [record['AliasTarget'].get('DNSName', None)]
if record.get('ResourceRecords', None) is not None:
record_content = [self._format_content(record['Type'], value['Value']) for value
in record['ResourceRecords']]
if content is not None and content not in record_content:
continue
LOGGER.debug('record: %s', record)
records.append({
'type': record['Type'],
'name': self._full_name(record['Name']),
'ttl': record.get('TTL', None),
'content': record_content[0] if len(record_content) == 1 else record_content,
})
LOGGER.debug('list_records: %s', records)
return records | List all records for the hosted zone. | Below is the the instruction that describes the task:
### Input:
List all records for the hosted zone.
### Response:
def _list_records(self, rtype=None, name=None, content=None):
"""List all records for the hosted zone."""
records = []
paginator = RecordSetPaginator(self.r53_client, self.domain_id)
for record in paginator.all_record_sets():
if rtype is not None and record['Type'] != rtype:
continue
if name is not None and record['Name'] != self._fqdn_name(name):
continue
if record.get('AliasTarget', None) is not None:
record_content = [record['AliasTarget'].get('DNSName', None)]
if record.get('ResourceRecords', None) is not None:
record_content = [self._format_content(record['Type'], value['Value']) for value
in record['ResourceRecords']]
if content is not None and content not in record_content:
continue
LOGGER.debug('record: %s', record)
records.append({
'type': record['Type'],
'name': self._full_name(record['Name']),
'ttl': record.get('TTL', None),
'content': record_content[0] if len(record_content) == 1 else record_content,
})
LOGGER.debug('list_records: %s', records)
return records |
def plot(self, plot_cmd=None, tf=lambda y: y):
"""plot the data we have, return ``self``"""
if not plot_cmd:
plot_cmd = self.plot_cmd
colors = 'bgrcmyk'
pyplot.hold(False)
res = self.res
flatx, flatf = self.flattened()
minf = np.inf
for i in flatf:
minf = min((minf, min(flatf[i])))
addf = 1e-9 - minf if minf <= 1e-9 else 0
for i in sorted(res.keys()): # we plot not all values here
if isinstance(i, int):
color = colors[i % len(colors)]
arx = sorted(res[i].keys())
plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')
pyplot.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)
pyplot.hold(True)
plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')
pyplot.ylabel('f + ' + str(addf))
pyplot.draw()
pyplot.ion()
pyplot.show()
# raw_input('press return')
return self | plot the data we have, return ``self`` | Below is the the instruction that describes the task:
### Input:
plot the data we have, return ``self``
### Response:
def plot(self, plot_cmd=None, tf=lambda y: y):
"""plot the data we have, return ``self``"""
if not plot_cmd:
plot_cmd = self.plot_cmd
colors = 'bgrcmyk'
pyplot.hold(False)
res = self.res
flatx, flatf = self.flattened()
minf = np.inf
for i in flatf:
minf = min((minf, min(flatf[i])))
addf = 1e-9 - minf if minf <= 1e-9 else 0
for i in sorted(res.keys()): # we plot not all values here
if isinstance(i, int):
color = colors[i % len(colors)]
arx = sorted(res[i].keys())
plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')
pyplot.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)
pyplot.hold(True)
plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')
pyplot.ylabel('f + ' + str(addf))
pyplot.draw()
pyplot.ion()
pyplot.show()
# raw_input('press return')
return self |
def build_schedule_item(name, **kwargs):
'''
Build a schedule job
CLI Example:
.. code-block:: bash
salt '*' schedule.build_schedule_item job1 function='test.ping' seconds=3600
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
return ret
schedule = {}
schedule[name] = salt.utils.odict.OrderedDict()
schedule[name]['function'] = kwargs['function']
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True
if item in kwargs and 'cron' in kwargs:
time_conflict = True
if time_conflict:
ret['result'] = False
ret['comment'] = 'Unable to use "seconds", "minutes", "hours", or "days" with "when" or "cron" options.'
return ret
if 'when' in kwargs and 'cron' in kwargs:
ret['result'] = False
ret['comment'] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs:
schedule[name][item] = kwargs[item]
if 'return_job' in kwargs:
schedule[name]['return_job'] = kwargs['return_job']
if 'metadata' in kwargs:
schedule[name]['metadata'] = kwargs['metadata']
if 'job_args' in kwargs:
schedule[name]['args'] = kwargs['job_args']
if 'job_kwargs' in kwargs:
schedule[name]['kwargs'] = kwargs['job_kwargs']
if 'maxrunning' in kwargs:
schedule[name]['maxrunning'] = kwargs['maxrunning']
else:
schedule[name]['maxrunning'] = 1
if 'name' in kwargs:
schedule[name]['name'] = kwargs['name']
else:
schedule[name]['name'] = name
if 'enabled' in kwargs:
schedule[name]['enabled'] = kwargs['enabled']
else:
schedule[name]['enabled'] = True
if 'jid_include' not in kwargs or kwargs['jid_include']:
schedule[name]['jid_include'] = True
if 'splay' in kwargs:
if isinstance(kwargs['splay'], dict):
# Ensure ordering of start and end arguments
schedule[name]['splay'] = salt.utils.odict.OrderedDict()
schedule[name]['splay']['start'] = kwargs['splay']['start']
schedule[name]['splay']['end'] = kwargs['splay']['end']
else:
schedule[name]['splay'] = kwargs['splay']
if 'when' in kwargs:
if not _WHEN_SUPPORTED:
ret['result'] = False
ret['comment'] = 'Missing dateutil.parser, "when" is unavailable.'
return ret
else:
validate_when = kwargs['when']
if not isinstance(validate_when, list):
validate_when = [validate_when]
for _when in validate_when:
try:
dateutil_parser.parse(_when)
except ValueError:
ret['result'] = False
ret['comment'] = 'Schedule item {0} for "when" in invalid.'.format(_when)
return ret
for item in ['range', 'when', 'once', 'once_fmt', 'cron',
'returner', 'after', 'return_config', 'return_kwargs',
'until', 'run_on_start', 'skip_during_range']:
if item in kwargs:
schedule[name][item] = kwargs[item]
return schedule[name] | Build a schedule job
CLI Example:
.. code-block:: bash
salt '*' schedule.build_schedule_item job1 function='test.ping' seconds=3600 | Below is the the instruction that describes the task:
### Input:
Build a schedule job
CLI Example:
.. code-block:: bash
salt '*' schedule.build_schedule_item job1 function='test.ping' seconds=3600
### Response:
def build_schedule_item(name, **kwargs):
'''
Build a schedule job
CLI Example:
.. code-block:: bash
salt '*' schedule.build_schedule_item job1 function='test.ping' seconds=3600
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
return ret
schedule = {}
schedule[name] = salt.utils.odict.OrderedDict()
schedule[name]['function'] = kwargs['function']
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True
if item in kwargs and 'cron' in kwargs:
time_conflict = True
if time_conflict:
ret['result'] = False
ret['comment'] = 'Unable to use "seconds", "minutes", "hours", or "days" with "when" or "cron" options.'
return ret
if 'when' in kwargs and 'cron' in kwargs:
ret['result'] = False
ret['comment'] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs:
schedule[name][item] = kwargs[item]
if 'return_job' in kwargs:
schedule[name]['return_job'] = kwargs['return_job']
if 'metadata' in kwargs:
schedule[name]['metadata'] = kwargs['metadata']
if 'job_args' in kwargs:
schedule[name]['args'] = kwargs['job_args']
if 'job_kwargs' in kwargs:
schedule[name]['kwargs'] = kwargs['job_kwargs']
if 'maxrunning' in kwargs:
schedule[name]['maxrunning'] = kwargs['maxrunning']
else:
schedule[name]['maxrunning'] = 1
if 'name' in kwargs:
schedule[name]['name'] = kwargs['name']
else:
schedule[name]['name'] = name
if 'enabled' in kwargs:
schedule[name]['enabled'] = kwargs['enabled']
else:
schedule[name]['enabled'] = True
if 'jid_include' not in kwargs or kwargs['jid_include']:
schedule[name]['jid_include'] = True
if 'splay' in kwargs:
if isinstance(kwargs['splay'], dict):
# Ensure ordering of start and end arguments
schedule[name]['splay'] = salt.utils.odict.OrderedDict()
schedule[name]['splay']['start'] = kwargs['splay']['start']
schedule[name]['splay']['end'] = kwargs['splay']['end']
else:
schedule[name]['splay'] = kwargs['splay']
if 'when' in kwargs:
if not _WHEN_SUPPORTED:
ret['result'] = False
ret['comment'] = 'Missing dateutil.parser, "when" is unavailable.'
return ret
else:
validate_when = kwargs['when']
if not isinstance(validate_when, list):
validate_when = [validate_when]
for _when in validate_when:
try:
dateutil_parser.parse(_when)
except ValueError:
ret['result'] = False
ret['comment'] = 'Schedule item {0} for "when" in invalid.'.format(_when)
return ret
for item in ['range', 'when', 'once', 'once_fmt', 'cron',
'returner', 'after', 'return_config', 'return_kwargs',
'until', 'run_on_start', 'skip_during_range']:
if item in kwargs:
schedule[name][item] = kwargs[item]
return schedule[name] |
def perform_action(
self, action, machines, params, progress_title, success_title):
"""Perform the action on the set of machines."""
if len(machines) == 0:
return 0
with utils.Spinner() as context:
return self._async_perform_action(
context, action, list(machines), params,
progress_title, success_title) | Perform the action on the set of machines. | Below is the the instruction that describes the task:
### Input:
Perform the action on the set of machines.
### Response:
def perform_action(
self, action, machines, params, progress_title, success_title):
"""Perform the action on the set of machines."""
if len(machines) == 0:
return 0
with utils.Spinner() as context:
return self._async_perform_action(
context, action, list(machines), params,
progress_title, success_title) |
def render(template='', data={}, partials_path='.', partials_ext='mustache',
partials_dict={}, padding='', def_ldel='{{', def_rdel='}}',
scopes=None):
"""Render a mustache template.
Renders a mustache template with a data scope and partial capability.
Given the file structure...
╷
├─╼ main.py
├─╼ main.ms
└─┮ partials
└── part.ms
then main.py would make the following call:
render(open('main.ms', 'r'), {...}, 'partials', 'ms')
Arguments:
template -- A file-like object or a string containing the template
data -- A python dictionary with your data scope
partials_path -- The path to where your partials are stored
(defaults to '.')
partials_ext -- The extension that you want the parser to look for
(defaults to 'mustache')
partials_dict -- A python dictionary which will be search for partials
before the filesystem is. {'include': 'foo'} is the same
as a file called include.mustache
(defaults to {})
padding -- This is for padding partials, and shouldn't be used
(but can be if you really want to)
def_ldel -- The default left delimiter
("{{" by default, as in spec compliant mustache)
def_rdel -- The default right delimiter
("}}" by default, as in spec compliant mustache)
scopes -- The list of scopes that get_key will look through
Returns:
A string containing the rendered template.
"""
# If the template is a seqeuence but not derived from a string
if isinstance(template, Sequence) and \
not isinstance(template, string_type):
# Then we don't need to tokenize it
# But it does need to be a generator
tokens = (token for token in template)
else:
if template in g_token_cache:
tokens = (token for token in g_token_cache[template])
else:
# Otherwise make a generator
tokens = tokenize(template, def_ldel, def_rdel)
output = unicode('', 'utf-8')
if scopes is None:
scopes = [data]
# Run through the tokens
for tag, key in tokens:
# Set the current scope
current_scope = scopes[0]
# If we're an end tag
if tag == 'end':
# Pop out of the latest scope
del scopes[0]
# If the current scope is falsy and not the only scope
elif not current_scope and len(scopes) != 1:
if tag in ['section', 'inverted section']:
# Set the most recent scope to a falsy value
# (I heard False is a good one)
scopes.insert(0, False)
# If we're a literal tag
elif tag == 'literal':
# Add padding to the key and add it to the output
if not isinstance(key, unicode_type): # python 2
key = unicode(key, 'utf-8')
output += key.replace('\n', '\n' + padding)
# If we're a variable tag
elif tag == 'variable':
# Add the html escaped key to the output
thing = _get_key(key, scopes)
if thing is True and key == '.':
# if we've coerced into a boolean by accident
# (inverted tags do this)
# then get the un-coerced object (next in the stack)
thing = scopes[1]
if not isinstance(thing, unicode_type):
thing = unicode(str(thing), 'utf-8')
output += _html_escape(thing)
# If we're a no html escape tag
elif tag == 'no escape':
# Just lookup the key and add it
thing = _get_key(key, scopes)
if not isinstance(thing, unicode_type):
thing = unicode(str(thing), 'utf-8')
output += thing
# If we're a section tag
elif tag == 'section':
# Get the sections scope
scope = _get_key(key, scopes)
# If the scope is a callable (as described in
# https://mustache.github.io/mustache.5.html)
if isinstance(scope, Callable):
# Generate template text from tags
text = unicode('', 'utf-8')
tags = []
for tag in tokens:
if tag == ('end', key):
break
tags.append(tag)
tag_type, tag_key = tag
if tag_type == 'literal':
text += tag_key
elif tag_type == 'no escape':
text += "%s& %s %s" % (def_ldel, tag_key, def_rdel)
else:
text += "%s%s %s%s" % (def_ldel, {
'commment': '!',
'section': '#',
'inverted section': '^',
'end': '/',
'partial': '>',
'set delimiter': '=',
'no escape': '&',
'variable': ''
}[tag_type], tag_key, def_rdel)
g_token_cache[text] = tags
rend = scope(text, lambda template, data=None: render(template,
data={},
partials_path=partials_path,
partials_ext=partials_ext,
partials_dict=partials_dict,
padding=padding,
def_ldel=def_ldel, def_rdel=def_rdel,
scopes=data and [data]+scopes or scopes))
if python3:
output += rend
else: # python 2
output += rend.decode('utf-8')
# If the scope is a sequence, an iterator or generator but not
# derived from a string
elif isinstance(scope, (Sequence, Iterator)) and \
not isinstance(scope, string_type):
# Then we need to do some looping
# Gather up all the tags inside the section
# (And don't be tricked by nested end tags with the same key)
# TODO: This feels like it still has edge cases, no?
tags = []
tags_with_same_key = 0
for tag in tokens:
if tag == ('section', key):
tags_with_same_key += 1
if tag == ('end', key):
tags_with_same_key -= 1
if tags_with_same_key < 0:
break
tags.append(tag)
# For every item in the scope
for thing in scope:
# Append it as the most recent scope and render
new_scope = [thing] + scopes
rend = render(template=tags, scopes=new_scope,
partials_path=partials_path,
partials_ext=partials_ext,
partials_dict=partials_dict,
def_ldel=def_ldel, def_rdel=def_rdel)
if python3:
output += rend
else: # python 2
output += rend.decode('utf-8')
else:
# Otherwise we're just a scope section
scopes.insert(0, scope)
# If we're an inverted section
elif tag == 'inverted section':
# Add the flipped scope to the scopes
scope = _get_key(key, scopes)
scopes.insert(0, not scope)
# If we're a partial
elif tag == 'partial':
# Load the partial
partial = _get_partial(key, partials_dict,
partials_path, partials_ext)
# Find what to pad the partial with
left = output.split('\n')[-1]
part_padding = padding
if left.isspace():
part_padding += left
# Render the partial
part_out = render(template=partial, partials_path=partials_path,
partials_ext=partials_ext,
partials_dict=partials_dict,
def_ldel=def_ldel, def_rdel=def_rdel,
padding=part_padding, scopes=scopes)
# If the partial was indented
if left.isspace():
# then remove the spaces from the end
part_out = part_out.rstrip(' \t')
# Add the partials output to the ouput
if python3:
output += part_out
else: # python 2
output += part_out.decode('utf-8')
if python3:
return output
else: # python 2
return output.encode('utf-8') | Render a mustache template.
Renders a mustache template with a data scope and partial capability.
Given the file structure...
╷
├─╼ main.py
├─╼ main.ms
└─┮ partials
└── part.ms
then main.py would make the following call:
render(open('main.ms', 'r'), {...}, 'partials', 'ms')
Arguments:
template -- A file-like object or a string containing the template
data -- A python dictionary with your data scope
partials_path -- The path to where your partials are stored
(defaults to '.')
partials_ext -- The extension that you want the parser to look for
(defaults to 'mustache')
partials_dict -- A python dictionary which will be search for partials
before the filesystem is. {'include': 'foo'} is the same
as a file called include.mustache
(defaults to {})
padding -- This is for padding partials, and shouldn't be used
(but can be if you really want to)
def_ldel -- The default left delimiter
("{{" by default, as in spec compliant mustache)
def_rdel -- The default right delimiter
("}}" by default, as in spec compliant mustache)
scopes -- The list of scopes that get_key will look through
Returns:
A string containing the rendered template. | Below is the the instruction that describes the task:
### Input:
Render a mustache template.
Renders a mustache template with a data scope and partial capability.
Given the file structure...
╷
├─╼ main.py
├─╼ main.ms
└─┮ partials
└── part.ms
then main.py would make the following call:
render(open('main.ms', 'r'), {...}, 'partials', 'ms')
Arguments:
template -- A file-like object or a string containing the template
data -- A python dictionary with your data scope
partials_path -- The path to where your partials are stored
(defaults to '.')
partials_ext -- The extension that you want the parser to look for
(defaults to 'mustache')
partials_dict -- A python dictionary which will be search for partials
before the filesystem is. {'include': 'foo'} is the same
as a file called include.mustache
(defaults to {})
padding -- This is for padding partials, and shouldn't be used
(but can be if you really want to)
def_ldel -- The default left delimiter
("{{" by default, as in spec compliant mustache)
def_rdel -- The default right delimiter
("}}" by default, as in spec compliant mustache)
scopes -- The list of scopes that get_key will look through
Returns:
A string containing the rendered template.
### Response:
def render(template='', data={}, partials_path='.', partials_ext='mustache',
partials_dict={}, padding='', def_ldel='{{', def_rdel='}}',
scopes=None):
"""Render a mustache template.
Renders a mustache template with a data scope and partial capability.
Given the file structure...
╷
├─╼ main.py
├─╼ main.ms
└─┮ partials
└── part.ms
then main.py would make the following call:
render(open('main.ms', 'r'), {...}, 'partials', 'ms')
Arguments:
template -- A file-like object or a string containing the template
data -- A python dictionary with your data scope
partials_path -- The path to where your partials are stored
(defaults to '.')
partials_ext -- The extension that you want the parser to look for
(defaults to 'mustache')
partials_dict -- A python dictionary which will be search for partials
before the filesystem is. {'include': 'foo'} is the same
as a file called include.mustache
(defaults to {})
padding -- This is for padding partials, and shouldn't be used
(but can be if you really want to)
def_ldel -- The default left delimiter
("{{" by default, as in spec compliant mustache)
def_rdel -- The default right delimiter
("}}" by default, as in spec compliant mustache)
scopes -- The list of scopes that get_key will look through
Returns:
A string containing the rendered template.
"""
# If the template is a seqeuence but not derived from a string
if isinstance(template, Sequence) and \
not isinstance(template, string_type):
# Then we don't need to tokenize it
# But it does need to be a generator
tokens = (token for token in template)
else:
if template in g_token_cache:
tokens = (token for token in g_token_cache[template])
else:
# Otherwise make a generator
tokens = tokenize(template, def_ldel, def_rdel)
output = unicode('', 'utf-8')
if scopes is None:
scopes = [data]
# Run through the tokens
for tag, key in tokens:
# Set the current scope
current_scope = scopes[0]
# If we're an end tag
if tag == 'end':
# Pop out of the latest scope
del scopes[0]
# If the current scope is falsy and not the only scope
elif not current_scope and len(scopes) != 1:
if tag in ['section', 'inverted section']:
# Set the most recent scope to a falsy value
# (I heard False is a good one)
scopes.insert(0, False)
# If we're a literal tag
elif tag == 'literal':
# Add padding to the key and add it to the output
if not isinstance(key, unicode_type): # python 2
key = unicode(key, 'utf-8')
output += key.replace('\n', '\n' + padding)
# If we're a variable tag
elif tag == 'variable':
# Add the html escaped key to the output
thing = _get_key(key, scopes)
if thing is True and key == '.':
# if we've coerced into a boolean by accident
# (inverted tags do this)
# then get the un-coerced object (next in the stack)
thing = scopes[1]
if not isinstance(thing, unicode_type):
thing = unicode(str(thing), 'utf-8')
output += _html_escape(thing)
# If we're a no html escape tag
elif tag == 'no escape':
# Just lookup the key and add it
thing = _get_key(key, scopes)
if not isinstance(thing, unicode_type):
thing = unicode(str(thing), 'utf-8')
output += thing
# If we're a section tag
elif tag == 'section':
# Get the sections scope
scope = _get_key(key, scopes)
# If the scope is a callable (as described in
# https://mustache.github.io/mustache.5.html)
if isinstance(scope, Callable):
# Generate template text from tags
text = unicode('', 'utf-8')
tags = []
for tag in tokens:
if tag == ('end', key):
break
tags.append(tag)
tag_type, tag_key = tag
if tag_type == 'literal':
text += tag_key
elif tag_type == 'no escape':
text += "%s& %s %s" % (def_ldel, tag_key, def_rdel)
else:
text += "%s%s %s%s" % (def_ldel, {
'commment': '!',
'section': '#',
'inverted section': '^',
'end': '/',
'partial': '>',
'set delimiter': '=',
'no escape': '&',
'variable': ''
}[tag_type], tag_key, def_rdel)
g_token_cache[text] = tags
rend = scope(text, lambda template, data=None: render(template,
data={},
partials_path=partials_path,
partials_ext=partials_ext,
partials_dict=partials_dict,
padding=padding,
def_ldel=def_ldel, def_rdel=def_rdel,
scopes=data and [data]+scopes or scopes))
if python3:
output += rend
else: # python 2
output += rend.decode('utf-8')
# If the scope is a sequence, an iterator or generator but not
# derived from a string
elif isinstance(scope, (Sequence, Iterator)) and \
not isinstance(scope, string_type):
# Then we need to do some looping
# Gather up all the tags inside the section
# (And don't be tricked by nested end tags with the same key)
# TODO: This feels like it still has edge cases, no?
tags = []
tags_with_same_key = 0
for tag in tokens:
if tag == ('section', key):
tags_with_same_key += 1
if tag == ('end', key):
tags_with_same_key -= 1
if tags_with_same_key < 0:
break
tags.append(tag)
# For every item in the scope
for thing in scope:
# Append it as the most recent scope and render
new_scope = [thing] + scopes
rend = render(template=tags, scopes=new_scope,
partials_path=partials_path,
partials_ext=partials_ext,
partials_dict=partials_dict,
def_ldel=def_ldel, def_rdel=def_rdel)
if python3:
output += rend
else: # python 2
output += rend.decode('utf-8')
else:
# Otherwise we're just a scope section
scopes.insert(0, scope)
# If we're an inverted section
elif tag == 'inverted section':
# Add the flipped scope to the scopes
scope = _get_key(key, scopes)
scopes.insert(0, not scope)
# If we're a partial
elif tag == 'partial':
# Load the partial
partial = _get_partial(key, partials_dict,
partials_path, partials_ext)
# Find what to pad the partial with
left = output.split('\n')[-1]
part_padding = padding
if left.isspace():
part_padding += left
# Render the partial
part_out = render(template=partial, partials_path=partials_path,
partials_ext=partials_ext,
partials_dict=partials_dict,
def_ldel=def_ldel, def_rdel=def_rdel,
padding=part_padding, scopes=scopes)
# If the partial was indented
if left.isspace():
# then remove the spaces from the end
part_out = part_out.rstrip(' \t')
# Add the partials output to the ouput
if python3:
output += part_out
else: # python 2
output += part_out.decode('utf-8')
if python3:
return output
else: # python 2
return output.encode('utf-8') |
def get_rps_list(self):
"""
get list of each second's rps
:returns: list of tuples (rps, duration of corresponding rps in seconds)
:rtype: list
"""
seconds = range(0, int(self.duration) + 1)
rps_groups = groupby([proper_round(self.rps_at(t)) for t in seconds],
lambda x: x)
rps_list = [(rps, len(list(rpl))) for rps, rpl in rps_groups]
return rps_list | get list of each second's rps
:returns: list of tuples (rps, duration of corresponding rps in seconds)
:rtype: list | Below is the the instruction that describes the task:
### Input:
get list of each second's rps
:returns: list of tuples (rps, duration of corresponding rps in seconds)
:rtype: list
### Response:
def get_rps_list(self):
"""
get list of each second's rps
:returns: list of tuples (rps, duration of corresponding rps in seconds)
:rtype: list
"""
seconds = range(0, int(self.duration) + 1)
rps_groups = groupby([proper_round(self.rps_at(t)) for t in seconds],
lambda x: x)
rps_list = [(rps, len(list(rpl))) for rps, rpl in rps_groups]
return rps_list |
def root_sections(h):
"""
Returns a list of all sections that have no parent.
"""
roots = []
for section in h.allsec():
sref = h.SectionRef(sec=section)
# has_parent returns a float... cast to bool
if sref.has_parent() < 0.9:
roots.append(section)
return roots | Returns a list of all sections that have no parent. | Below is the the instruction that describes the task:
### Input:
Returns a list of all sections that have no parent.
### Response:
def root_sections(h):
"""
Returns a list of all sections that have no parent.
"""
roots = []
for section in h.allsec():
sref = h.SectionRef(sec=section)
# has_parent returns a float... cast to bool
if sref.has_parent() < 0.9:
roots.append(section)
return roots |
def set_footer(self, text: str, icon_url: str = None) -> None:
"""
Sets the footer of the embed.
Parameters
----------
text: str
The footer text.
icon_url: str, optional
URL for the icon in the footer.
"""
self.footer = {
'text': text,
'icon_url': icon_url
} | Sets the footer of the embed.
Parameters
----------
text: str
The footer text.
icon_url: str, optional
URL for the icon in the footer. | Below is the the instruction that describes the task:
### Input:
Sets the footer of the embed.
Parameters
----------
text: str
The footer text.
icon_url: str, optional
URL for the icon in the footer.
### Response:
def set_footer(self, text: str, icon_url: str = None) -> None:
"""
Sets the footer of the embed.
Parameters
----------
text: str
The footer text.
icon_url: str, optional
URL for the icon in the footer.
"""
self.footer = {
'text': text,
'icon_url': icon_url
} |
def get_children(self):
"""Get the child nodes below this node.
:returns: The children.
:rtype: iterable(NodeNG)
"""
for field in self._astroid_fields:
attr = getattr(self, field)
if attr is None:
continue
if isinstance(attr, (list, tuple)):
yield from attr
else:
yield attr | Get the child nodes below this node.
:returns: The children.
:rtype: iterable(NodeNG) | Below is the the instruction that describes the task:
### Input:
Get the child nodes below this node.
:returns: The children.
:rtype: iterable(NodeNG)
### Response:
def get_children(self):
"""Get the child nodes below this node.
:returns: The children.
:rtype: iterable(NodeNG)
"""
for field in self._astroid_fields:
attr = getattr(self, field)
if attr is None:
continue
if isinstance(attr, (list, tuple)):
yield from attr
else:
yield attr |
def citation(self):
"""
Returns the contents of the citation.bib file that describes the source
and provenance of the dataset or to cite for academic work.
"""
path = find_dataset_path(
self.name, data_home=self.data_home, fname="meta.json", raises=False
)
if path is None:
return None
with open(path, 'r') as f:
return f.read() | Returns the contents of the citation.bib file that describes the source
and provenance of the dataset or to cite for academic work. | Below is the the instruction that describes the task:
### Input:
Returns the contents of the citation.bib file that describes the source
and provenance of the dataset or to cite for academic work.
### Response:
def citation(self):
"""
Returns the contents of the citation.bib file that describes the source
and provenance of the dataset or to cite for academic work.
"""
path = find_dataset_path(
self.name, data_home=self.data_home, fname="meta.json", raises=False
)
if path is None:
return None
with open(path, 'r') as f:
return f.read() |
def run(bam, chrom, pos1, pos2, reffa, chr_reffa, parameters):
"""Run mpileup on given chrom and pos"""
# check for chr ref
is_chr_query = chrom.startswith('chr')
if is_chr_query and chr_reffa is None:
chr_reffa = reffa
# check bam ref type
bam_header = subprocess.check_output("samtools view -H {}".format(bam), shell=True)
is_chr_bam = bam_header.find('SN:chr') != -1
if is_chr_bam:
reffa = chr_reffa
if not is_chr_query and is_chr_bam:
chrom = 'chr' + chrom
if is_chr_query and not is_chr_bam:
chrom = re.sub(r'^chr', '', chrom)
posmin = min(pos1, pos2)
posmax = max(pos1, pos2)
cmd = "samtools view -bh {bam} {chrom}:{pos1}-{pos2} " \
"| samtools mpileup {parameters} -f {reffa} -".format(bam=bam, chrom=chrom,
pos1=posmin, pos2=posmax,
reffa=reffa, parameters=parameters)
if pos1 == pos2:
cmd += " | awk '$2 == {pos}'".format(pos=pos1)
else:
cmd += " | tail -n +2 | awk '$2 >= {posmin} && $2 <= {posmax}'".format(posmin=posmin, posmax=posmax)
sys.stderr.write("Running:\n{}\n".format(cmd))
child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout, stderr = child.communicate()
if child.returncode != 0:
if len(stdout) == 0 and stderr is None:
warnings.warn("Command:\n{cmd}\n did not exit with zero exit code. "
"Possibly no coverage for sample.".format(cmd=cmd))
else:
raise(Exception("Command:\n{cmd}\n did not exit with zero exit code. "
"Check command.".format(cmd=cmd)))
else:
return stdout | Run mpileup on given chrom and pos | Below is the the instruction that describes the task:
### Input:
Run mpileup on given chrom and pos
### Response:
def run(bam, chrom, pos1, pos2, reffa, chr_reffa, parameters):
"""Run mpileup on given chrom and pos"""
# check for chr ref
is_chr_query = chrom.startswith('chr')
if is_chr_query and chr_reffa is None:
chr_reffa = reffa
# check bam ref type
bam_header = subprocess.check_output("samtools view -H {}".format(bam), shell=True)
is_chr_bam = bam_header.find('SN:chr') != -1
if is_chr_bam:
reffa = chr_reffa
if not is_chr_query and is_chr_bam:
chrom = 'chr' + chrom
if is_chr_query and not is_chr_bam:
chrom = re.sub(r'^chr', '', chrom)
posmin = min(pos1, pos2)
posmax = max(pos1, pos2)
cmd = "samtools view -bh {bam} {chrom}:{pos1}-{pos2} " \
"| samtools mpileup {parameters} -f {reffa} -".format(bam=bam, chrom=chrom,
pos1=posmin, pos2=posmax,
reffa=reffa, parameters=parameters)
if pos1 == pos2:
cmd += " | awk '$2 == {pos}'".format(pos=pos1)
else:
cmd += " | tail -n +2 | awk '$2 >= {posmin} && $2 <= {posmax}'".format(posmin=posmin, posmax=posmax)
sys.stderr.write("Running:\n{}\n".format(cmd))
child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout, stderr = child.communicate()
if child.returncode != 0:
if len(stdout) == 0 and stderr is None:
warnings.warn("Command:\n{cmd}\n did not exit with zero exit code. "
"Possibly no coverage for sample.".format(cmd=cmd))
else:
raise(Exception("Command:\n{cmd}\n did not exit with zero exit code. "
"Check command.".format(cmd=cmd)))
else:
return stdout |
def modify_process_summary(self, pid=None, text='', append=False):
'''
modify_process_summary(self, pid=None, text='')
Modifies the summary text of the process execution
:Parameters:
* *key* (`pid`) -- Identifier of an existing process
* *key* (`text`) -- summary text
* *append* (`boolean`) -- True to append to summary. False to override it.
'''
pid = self._get_pid(pid)
if append:
current_summary = self.get_process_info(pid).get('summary') or ''
modified_text = current_summary + '\n' + text
text = modified_text
request_data = {"id": pid, "data": str(text)}
return self._call_rest_api('post', '/processes/'+pid+'/summary', data=request_data, error='Failed to update process summary') | modify_process_summary(self, pid=None, text='')
Modifies the summary text of the process execution
:Parameters:
* *key* (`pid`) -- Identifier of an existing process
* *key* (`text`) -- summary text
* *append* (`boolean`) -- True to append to summary. False to override it. | Below is the the instruction that describes the task:
### Input:
modify_process_summary(self, pid=None, text='')
Modifies the summary text of the process execution
:Parameters:
* *key* (`pid`) -- Identifier of an existing process
* *key* (`text`) -- summary text
* *append* (`boolean`) -- True to append to summary. False to override it.
### Response:
def modify_process_summary(self, pid=None, text='', append=False):
'''
modify_process_summary(self, pid=None, text='')
Modifies the summary text of the process execution
:Parameters:
* *key* (`pid`) -- Identifier of an existing process
* *key* (`text`) -- summary text
* *append* (`boolean`) -- True to append to summary. False to override it.
'''
pid = self._get_pid(pid)
if append:
current_summary = self.get_process_info(pid).get('summary') or ''
modified_text = current_summary + '\n' + text
text = modified_text
request_data = {"id": pid, "data": str(text)}
return self._call_rest_api('post', '/processes/'+pid+'/summary', data=request_data, error='Failed to update process summary') |
def apply_T7(word):
'''If a VVV-sequence does not contain a potential /i/-final diphthong,
there is a syllable boundary between the second and third vowels, e.g.
[kau.an], [leu.an], [kiu.as].'''
T7 = ''
WORD = word.split('.')
for i, v in enumerate(WORD):
if contains_VVV(v):
for I, V in enumerate(v[::-1]):
if is_vowel(V):
WORD[i] = v[:I] + '.' + v[I:]
T7 = ' T7'
word = '.'.join(WORD)
return word, T7 | If a VVV-sequence does not contain a potential /i/-final diphthong,
there is a syllable boundary between the second and third vowels, e.g.
[kau.an], [leu.an], [kiu.as]. | Below is the the instruction that describes the task:
### Input:
If a VVV-sequence does not contain a potential /i/-final diphthong,
there is a syllable boundary between the second and third vowels, e.g.
[kau.an], [leu.an], [kiu.as].
### Response:
def apply_T7(word):
'''If a VVV-sequence does not contain a potential /i/-final diphthong,
there is a syllable boundary between the second and third vowels, e.g.
[kau.an], [leu.an], [kiu.as].'''
T7 = ''
WORD = word.split('.')
for i, v in enumerate(WORD):
if contains_VVV(v):
for I, V in enumerate(v[::-1]):
if is_vowel(V):
WORD[i] = v[:I] + '.' + v[I:]
T7 = ' T7'
word = '.'.join(WORD)
return word, T7 |
def add_compression(self, compression=True):
"""
Add an instruction enabling or disabling compression for the transmitted raster image lines.
Not all models support compression. If the specific model doesn't support it but this method
is called trying to enable it, either a warning is set or an exception is raised depending on
the value of :py:attr:`exception_on_warning`
:param bool compression: Whether compression should be on or off
"""
if self.model not in compressionsupport:
self._unsupported("Trying to set compression on a printer that doesn't support it")
return
self._compression = compression
self.data += b'\x4D' # M
self.data += bytes([compression << 1]) | Add an instruction enabling or disabling compression for the transmitted raster image lines.
Not all models support compression. If the specific model doesn't support it but this method
is called trying to enable it, either a warning is set or an exception is raised depending on
the value of :py:attr:`exception_on_warning`
:param bool compression: Whether compression should be on or off | Below is the the instruction that describes the task:
### Input:
Add an instruction enabling or disabling compression for the transmitted raster image lines.
Not all models support compression. If the specific model doesn't support it but this method
is called trying to enable it, either a warning is set or an exception is raised depending on
the value of :py:attr:`exception_on_warning`
:param bool compression: Whether compression should be on or off
### Response:
def add_compression(self, compression=True):
"""
Add an instruction enabling or disabling compression for the transmitted raster image lines.
Not all models support compression. If the specific model doesn't support it but this method
is called trying to enable it, either a warning is set or an exception is raised depending on
the value of :py:attr:`exception_on_warning`
:param bool compression: Whether compression should be on or off
"""
if self.model not in compressionsupport:
self._unsupported("Trying to set compression on a printer that doesn't support it")
return
self._compression = compression
self.data += b'\x4D' # M
self.data += bytes([compression << 1]) |
def min_cost_flow(self, display = None, **args):
'''
API:
min_cost_flow(self, display='off', **args)
Description:
Solves minimum cost flow problem using node/edge attributes with
the algorithm specified.
Pre:
(1) Assumes a directed graph in which each arc has 'capacity' and
'cost' attributes.
(2) Nodes should have 'demand' attribute. This value should be
positive for supply and negative for demand, and 0 for transhipment
nodes.
(3) The graph should be connected.
(4) Assumes (i,j) and (j,i) does not exist together. Needed when
solving max flow. (max flow problem is solved to get a feasible
flow).
Input:
display: 'off' for no display, 'pygame' for live update of tree
args: may have the following
display: display method, if not given current mode (the one
specified by __init__ or set_display) will be used.
algo: determines algorithm to use, can be one of the following
'simplex': network simplex algorithm
'cycle_canceling': cycle canceling algorithm
'simplex' is used if not given.
see Network Flows by Ahuja et al. for details of algorithms.
pivot: valid if algo is 'simlex', determines pivoting rule for
simplex, may be one of the following; 'first_eligible',
'dantzig' or 'scaled'.
'dantzig' is used if not given.
see Network Flows by Ahuja et al. for pivot rules.
root: valid if algo is 'simlex', specifies the root node for
simplex algorithm. It is name of the one of the nodes. It
will be chosen randomly if not provided.
Post:
The 'flow' attribute of each arc gives the optimal flows.
'distance' attribute of the nodes are also changed during max flow
solution process.
Examples:
g.min_cost_flow():
solves minimum cost feasible flow problem using simplex
algorithm with dantzig pivoting rule.
See pre section for details.
g.min_cost_flow(algo='cycle_canceling'):
solves minimum cost feasible flow problem using cycle canceling
agorithm.
g.min_cost_flow(algo='simplex', pivot='scaled'):
solves minimum cost feasible flow problem using network simplex
agorithm with scaled pivot rule.
'''
if display is None:
display = self.attr['display']
if 'algo' in args:
algorithm = args['algo']
else:
algorithm = 'simplex'
if algorithm is 'simplex':
if 'root' in args:
root = args['root']
else:
for k in self.neighbors:
root = k
break
if 'pivot' in args:
if not self.network_simplex(display, args['pivot'], root):
print('problem is infeasible')
else:
if not self.network_simplex(display, 'dantzig', root):
print('problem is infeasible')
elif algorithm is 'cycle_canceling':
if not self.cycle_canceling(display):
print('problem is infeasible')
else:
print(args['algo'], 'is not a defined algorithm. Exiting.')
return | API:
min_cost_flow(self, display='off', **args)
Description:
Solves minimum cost flow problem using node/edge attributes with
the algorithm specified.
Pre:
(1) Assumes a directed graph in which each arc has 'capacity' and
'cost' attributes.
(2) Nodes should have 'demand' attribute. This value should be
positive for supply and negative for demand, and 0 for transhipment
nodes.
(3) The graph should be connected.
(4) Assumes (i,j) and (j,i) does not exist together. Needed when
solving max flow. (max flow problem is solved to get a feasible
flow).
Input:
display: 'off' for no display, 'pygame' for live update of tree
args: may have the following
display: display method, if not given current mode (the one
specified by __init__ or set_display) will be used.
algo: determines algorithm to use, can be one of the following
'simplex': network simplex algorithm
'cycle_canceling': cycle canceling algorithm
'simplex' is used if not given.
see Network Flows by Ahuja et al. for details of algorithms.
pivot: valid if algo is 'simlex', determines pivoting rule for
simplex, may be one of the following; 'first_eligible',
'dantzig' or 'scaled'.
'dantzig' is used if not given.
see Network Flows by Ahuja et al. for pivot rules.
root: valid if algo is 'simlex', specifies the root node for
simplex algorithm. It is name of the one of the nodes. It
will be chosen randomly if not provided.
Post:
The 'flow' attribute of each arc gives the optimal flows.
'distance' attribute of the nodes are also changed during max flow
solution process.
Examples:
g.min_cost_flow():
solves minimum cost feasible flow problem using simplex
algorithm with dantzig pivoting rule.
See pre section for details.
g.min_cost_flow(algo='cycle_canceling'):
solves minimum cost feasible flow problem using cycle canceling
agorithm.
g.min_cost_flow(algo='simplex', pivot='scaled'):
solves minimum cost feasible flow problem using network simplex
agorithm with scaled pivot rule. | Below is the the instruction that describes the task:
### Input:
API:
min_cost_flow(self, display='off', **args)
Description:
Solves minimum cost flow problem using node/edge attributes with
the algorithm specified.
Pre:
(1) Assumes a directed graph in which each arc has 'capacity' and
'cost' attributes.
(2) Nodes should have 'demand' attribute. This value should be
positive for supply and negative for demand, and 0 for transhipment
nodes.
(3) The graph should be connected.
(4) Assumes (i,j) and (j,i) does not exist together. Needed when
solving max flow. (max flow problem is solved to get a feasible
flow).
Input:
display: 'off' for no display, 'pygame' for live update of tree
args: may have the following
display: display method, if not given current mode (the one
specified by __init__ or set_display) will be used.
algo: determines algorithm to use, can be one of the following
'simplex': network simplex algorithm
'cycle_canceling': cycle canceling algorithm
'simplex' is used if not given.
see Network Flows by Ahuja et al. for details of algorithms.
pivot: valid if algo is 'simlex', determines pivoting rule for
simplex, may be one of the following; 'first_eligible',
'dantzig' or 'scaled'.
'dantzig' is used if not given.
see Network Flows by Ahuja et al. for pivot rules.
root: valid if algo is 'simlex', specifies the root node for
simplex algorithm. It is name of the one of the nodes. It
will be chosen randomly if not provided.
Post:
The 'flow' attribute of each arc gives the optimal flows.
'distance' attribute of the nodes are also changed during max flow
solution process.
Examples:
g.min_cost_flow():
solves minimum cost feasible flow problem using simplex
algorithm with dantzig pivoting rule.
See pre section for details.
g.min_cost_flow(algo='cycle_canceling'):
solves minimum cost feasible flow problem using cycle canceling
agorithm.
g.min_cost_flow(algo='simplex', pivot='scaled'):
solves minimum cost feasible flow problem using network simplex
agorithm with scaled pivot rule.
### Response:
def min_cost_flow(self, display = None, **args):
'''
API:
min_cost_flow(self, display='off', **args)
Description:
Solves minimum cost flow problem using node/edge attributes with
the algorithm specified.
Pre:
(1) Assumes a directed graph in which each arc has 'capacity' and
'cost' attributes.
(2) Nodes should have 'demand' attribute. This value should be
positive for supply and negative for demand, and 0 for transhipment
nodes.
(3) The graph should be connected.
(4) Assumes (i,j) and (j,i) does not exist together. Needed when
solving max flow. (max flow problem is solved to get a feasible
flow).
Input:
display: 'off' for no display, 'pygame' for live update of tree
args: may have the following
display: display method, if not given current mode (the one
specified by __init__ or set_display) will be used.
algo: determines algorithm to use, can be one of the following
'simplex': network simplex algorithm
'cycle_canceling': cycle canceling algorithm
'simplex' is used if not given.
see Network Flows by Ahuja et al. for details of algorithms.
pivot: valid if algo is 'simlex', determines pivoting rule for
simplex, may be one of the following; 'first_eligible',
'dantzig' or 'scaled'.
'dantzig' is used if not given.
see Network Flows by Ahuja et al. for pivot rules.
root: valid if algo is 'simlex', specifies the root node for
simplex algorithm. It is name of the one of the nodes. It
will be chosen randomly if not provided.
Post:
The 'flow' attribute of each arc gives the optimal flows.
'distance' attribute of the nodes are also changed during max flow
solution process.
Examples:
g.min_cost_flow():
solves minimum cost feasible flow problem using simplex
algorithm with dantzig pivoting rule.
See pre section for details.
g.min_cost_flow(algo='cycle_canceling'):
solves minimum cost feasible flow problem using cycle canceling
agorithm.
g.min_cost_flow(algo='simplex', pivot='scaled'):
solves minimum cost feasible flow problem using network simplex
agorithm with scaled pivot rule.
'''
if display is None:
display = self.attr['display']
if 'algo' in args:
algorithm = args['algo']
else:
algorithm = 'simplex'
if algorithm is 'simplex':
if 'root' in args:
root = args['root']
else:
for k in self.neighbors:
root = k
break
if 'pivot' in args:
if not self.network_simplex(display, args['pivot'], root):
print('problem is infeasible')
else:
if not self.network_simplex(display, 'dantzig', root):
print('problem is infeasible')
elif algorithm is 'cycle_canceling':
if not self.cycle_canceling(display):
print('problem is infeasible')
else:
print(args['algo'], 'is not a defined algorithm. Exiting.')
return |
def composition(self):
"""
(Composition) Returns the composition
"""
elmap = collections.defaultdict(float)
for site in self:
for species, occu in site.species.items():
elmap[species] += occu
return Composition(elmap) | (Composition) Returns the composition | Below is the the instruction that describes the task:
### Input:
(Composition) Returns the composition
### Response:
def composition(self):
"""
(Composition) Returns the composition
"""
elmap = collections.defaultdict(float)
for site in self:
for species, occu in site.species.items():
elmap[species] += occu
return Composition(elmap) |
def inverse(self):
"""Returns a new instance of MarginalRateTaxScale
Invert a taxscale:
Assume tax_scale composed of bracket which thresholds are expressed in term of brut revenue.
The inverse is another MarginalTaxSclae which thresholds are expressed in terms of net revenue.
If net = revbrut - tax_scale.calc(revbrut) then brut = tax_scale.inverse().calc(net)
"""
# threshold : threshold of brut revenue
# net_threshold: threshold of net revenue
# theta : ordonnée à l'origine des segments des différents seuils dans une
# représentation du revenu imposable comme fonction linéaire par
# morceaux du revenu brut
# Actually 1 / (1- global_rate)
inverse = self.__class__(name = self.name + "'", option = self.option, unit = self.unit)
net_threshold = 0
for threshold, rate in zip(self.thresholds, self.rates):
if threshold == 0:
previous_rate = 0
theta = 0
# On calcule le seuil de revenu imposable de la tranche considérée.
net_threshold = (1 - previous_rate) * threshold + theta
inverse.add_bracket(net_threshold, 1 / (1 - rate))
theta = (rate - previous_rate) * threshold + theta
previous_rate = rate
return inverse | Returns a new instance of MarginalRateTaxScale
Invert a taxscale:
Assume tax_scale composed of bracket which thresholds are expressed in term of brut revenue.
The inverse is another MarginalTaxSclae which thresholds are expressed in terms of net revenue.
If net = revbrut - tax_scale.calc(revbrut) then brut = tax_scale.inverse().calc(net) | Below is the the instruction that describes the task:
### Input:
Returns a new instance of MarginalRateTaxScale
Invert a taxscale:
Assume tax_scale composed of bracket which thresholds are expressed in term of brut revenue.
The inverse is another MarginalTaxSclae which thresholds are expressed in terms of net revenue.
If net = revbrut - tax_scale.calc(revbrut) then brut = tax_scale.inverse().calc(net)
### Response:
def inverse(self):
"""Returns a new instance of MarginalRateTaxScale
Invert a taxscale:
Assume tax_scale composed of bracket which thresholds are expressed in term of brut revenue.
The inverse is another MarginalTaxSclae which thresholds are expressed in terms of net revenue.
If net = revbrut - tax_scale.calc(revbrut) then brut = tax_scale.inverse().calc(net)
"""
# threshold : threshold of brut revenue
# net_threshold: threshold of net revenue
# theta : ordonnée à l'origine des segments des différents seuils dans une
# représentation du revenu imposable comme fonction linéaire par
# morceaux du revenu brut
# Actually 1 / (1- global_rate)
inverse = self.__class__(name = self.name + "'", option = self.option, unit = self.unit)
net_threshold = 0
for threshold, rate in zip(self.thresholds, self.rates):
if threshold == 0:
previous_rate = 0
theta = 0
# On calcule le seuil de revenu imposable de la tranche considérée.
net_threshold = (1 - previous_rate) * threshold + theta
inverse.add_bracket(net_threshold, 1 / (1 - rate))
theta = (rate - previous_rate) * threshold + theta
previous_rate = rate
return inverse |
def get_unconnected_nodes(sentence_graph):
"""
Takes a TigerSentenceGraph and returns a list of node IDs of
unconnected nodes.
A node is unconnected, if it doesn't have any in- or outgoing edges.
A node is NOT considered unconnected, if the graph only consists of
that particular node.
Parameters
----------
sentence_graph : TigerSentenceGraph
a directed graph representing one syntax annotated sentence from
a TigerXML file
Returns
-------
unconnected_node_ids : list of str
a list of node IDs of unconnected nodes
"""
return [node for node in sentence_graph.nodes_iter()
if sentence_graph.degree(node) == 0 and
sentence_graph.number_of_nodes() > 1] | Takes a TigerSentenceGraph and returns a list of node IDs of
unconnected nodes.
A node is unconnected, if it doesn't have any in- or outgoing edges.
A node is NOT considered unconnected, if the graph only consists of
that particular node.
Parameters
----------
sentence_graph : TigerSentenceGraph
a directed graph representing one syntax annotated sentence from
a TigerXML file
Returns
-------
unconnected_node_ids : list of str
a list of node IDs of unconnected nodes | Below is the the instruction that describes the task:
### Input:
Takes a TigerSentenceGraph and returns a list of node IDs of
unconnected nodes.
A node is unconnected, if it doesn't have any in- or outgoing edges.
A node is NOT considered unconnected, if the graph only consists of
that particular node.
Parameters
----------
sentence_graph : TigerSentenceGraph
a directed graph representing one syntax annotated sentence from
a TigerXML file
Returns
-------
unconnected_node_ids : list of str
a list of node IDs of unconnected nodes
### Response:
def get_unconnected_nodes(sentence_graph):
"""
Takes a TigerSentenceGraph and returns a list of node IDs of
unconnected nodes.
A node is unconnected, if it doesn't have any in- or outgoing edges.
A node is NOT considered unconnected, if the graph only consists of
that particular node.
Parameters
----------
sentence_graph : TigerSentenceGraph
a directed graph representing one syntax annotated sentence from
a TigerXML file
Returns
-------
unconnected_node_ids : list of str
a list of node IDs of unconnected nodes
"""
return [node for node in sentence_graph.nodes_iter()
if sentence_graph.degree(node) == 0 and
sentence_graph.number_of_nodes() > 1] |
def drawBezier(self, p1, p2, p3, p4):
"""Draw a standard cubic Bezier curve.
"""
p1 = Point(p1)
p2 = Point(p2)
p3 = Point(p3)
p4 = Point(p4)
if not (self.lastPoint == p1):
self.draw_cont += "%g %g m\n" % JM_TUPLE(p1 * self.ipctm)
self.draw_cont += "%g %g %g %g %g %g c\n" % JM_TUPLE(list(p2 * self.ipctm) + \
list(p3 * self.ipctm) + \
list(p4 * self.ipctm))
self.updateRect(p1)
self.updateRect(p2)
self.updateRect(p3)
self.updateRect(p4)
self.lastPoint = p4
return self.lastPoint | Draw a standard cubic Bezier curve. | Below is the the instruction that describes the task:
### Input:
Draw a standard cubic Bezier curve.
### Response:
def drawBezier(self, p1, p2, p3, p4):
"""Draw a standard cubic Bezier curve.
"""
p1 = Point(p1)
p2 = Point(p2)
p3 = Point(p3)
p4 = Point(p4)
if not (self.lastPoint == p1):
self.draw_cont += "%g %g m\n" % JM_TUPLE(p1 * self.ipctm)
self.draw_cont += "%g %g %g %g %g %g c\n" % JM_TUPLE(list(p2 * self.ipctm) + \
list(p3 * self.ipctm) + \
list(p4 * self.ipctm))
self.updateRect(p1)
self.updateRect(p2)
self.updateRect(p3)
self.updateRect(p4)
self.lastPoint = p4
return self.lastPoint |
def parse_lrvalue_string(search_string,
delimiter=":"):
'''
The function takes a multi-line output/string with the format
"name/descr : value", and converts it to a dictionary object
with key value pairs, where key is built from the name/desc
part and value as the value.
eg: "Serial Number: FCH1724V1GT" will be translated to
dict['serial_number'] = "FCH1724V1GT"
'''
mac_search_pattern = r"(.*) *%s ([\w|\d]+.*)" % delimiter
search_pattern = r"(.*) *%s *(.*)" % delimiter
rexdict = {}
for line in search_string.splitlines():
line = line.strip()
mobj = re.match(mac_search_pattern, line)
if mobj:
key = mobj.group(1).lower()
key = "_".join(key.split()[0:3])
key = key.strip()
rexdict[key] = mobj.group(2)
continue
mobj = re.match(search_pattern, line)
if mobj:
key = mobj.group(1).lower()
key = "_".join(key.split()[0:3])
key = key.strip()
rexdict[key] = mobj.group(2)
return rexdict | The function takes a multi-line output/string with the format
"name/descr : value", and converts it to a dictionary object
with key value pairs, where key is built from the name/desc
part and value as the value.
eg: "Serial Number: FCH1724V1GT" will be translated to
dict['serial_number'] = "FCH1724V1GT" | Below is the the instruction that describes the task:
### Input:
The function takes a multi-line output/string with the format
"name/descr : value", and converts it to a dictionary object
with key value pairs, where key is built from the name/desc
part and value as the value.
eg: "Serial Number: FCH1724V1GT" will be translated to
dict['serial_number'] = "FCH1724V1GT"
### Response:
def parse_lrvalue_string(search_string,
delimiter=":"):
'''
The function takes a multi-line output/string with the format
"name/descr : value", and converts it to a dictionary object
with key value pairs, where key is built from the name/desc
part and value as the value.
eg: "Serial Number: FCH1724V1GT" will be translated to
dict['serial_number'] = "FCH1724V1GT"
'''
mac_search_pattern = r"(.*) *%s ([\w|\d]+.*)" % delimiter
search_pattern = r"(.*) *%s *(.*)" % delimiter
rexdict = {}
for line in search_string.splitlines():
line = line.strip()
mobj = re.match(mac_search_pattern, line)
if mobj:
key = mobj.group(1).lower()
key = "_".join(key.split()[0:3])
key = key.strip()
rexdict[key] = mobj.group(2)
continue
mobj = re.match(search_pattern, line)
if mobj:
key = mobj.group(1).lower()
key = "_".join(key.split()[0:3])
key = key.strip()
rexdict[key] = mobj.group(2)
return rexdict |
def calculate_ef_var(tpf, fpf):
"""
determine variance due to actives (efvar_a) decoys (efvar_d) and s2, the slope of the ROC curve tangent to the
fpf @ which the enrichment factor was calculated
:param tpf: float tpf @ which the enrichment factor was calculated
:param fpf: float fpf @ which the enrichment factor was calculated
:return efvara, efvard, s2: tuple
"""
efvara = (tpf * (1 - tpf))
efvard = (fpf * (1 - fpf))
ef = tpf / fpf
if fpf == 1:
return(0, 0, 0)
else:
s = ef * ( 1 + (np.log(ef)/np.log(fpf)))
s2 = s * s
return (efvara, efvard, s2) | determine variance due to actives (efvar_a) decoys (efvar_d) and s2, the slope of the ROC curve tangent to the
fpf @ which the enrichment factor was calculated
:param tpf: float tpf @ which the enrichment factor was calculated
:param fpf: float fpf @ which the enrichment factor was calculated
:return efvara, efvard, s2: tuple | Below is the the instruction that describes the task:
### Input:
determine variance due to actives (efvar_a) decoys (efvar_d) and s2, the slope of the ROC curve tangent to the
fpf @ which the enrichment factor was calculated
:param tpf: float tpf @ which the enrichment factor was calculated
:param fpf: float fpf @ which the enrichment factor was calculated
:return efvara, efvard, s2: tuple
### Response:
def calculate_ef_var(tpf, fpf):
"""
determine variance due to actives (efvar_a) decoys (efvar_d) and s2, the slope of the ROC curve tangent to the
fpf @ which the enrichment factor was calculated
:param tpf: float tpf @ which the enrichment factor was calculated
:param fpf: float fpf @ which the enrichment factor was calculated
:return efvara, efvard, s2: tuple
"""
efvara = (tpf * (1 - tpf))
efvard = (fpf * (1 - fpf))
ef = tpf / fpf
if fpf == 1:
return(0, 0, 0)
else:
s = ef * ( 1 + (np.log(ef)/np.log(fpf)))
s2 = s * s
return (efvara, efvard, s2) |
def video_load_time(self):
"""
Returns aggregate video load time for all pages.
"""
load_times = self.get_load_times('video')
return round(mean(load_times), self.decimal_precision) | Returns aggregate video load time for all pages. | Below is the the instruction that describes the task:
### Input:
Returns aggregate video load time for all pages.
### Response:
def video_load_time(self):
"""
Returns aggregate video load time for all pages.
"""
load_times = self.get_load_times('video')
return round(mean(load_times), self.decimal_precision) |
def create_event(self, register=False):
"""Create an asyncio.Event inside the emulation loop.
This method exists as a convenience to create an Event object that is
associated with the correct EventLoop(). If you pass register=True,
then the event will be registered as an event that must be set for the
EmulationLoop to be considered idle. This means that whenever
wait_idle() is called, it will block until this event is set.
Examples of when you may want this behavior is when the event is
signaling whether a tile has completed restarting itself. The reset()
rpc cannot block until the tile has initialized since it may need to
send its own rpcs as part of the initialization process. However, we
want to retain the behavior that once the reset() rpc returns the tile
has been completely reset.
The cleanest way of achieving this is to have the tile set its
self.initialized Event when it has finished rebooting and register
that event so that wait_idle() nicely blocks until the reset process
is complete.
Args:
register (bool): Whether to register the event so that wait_idle
blocks until it is set.
Returns:
asyncio.Event: The Event object.
"""
event = asyncio.Event(loop=self._loop)
if register:
self._events.add(event)
return event | Create an asyncio.Event inside the emulation loop.
This method exists as a convenience to create an Event object that is
associated with the correct EventLoop(). If you pass register=True,
then the event will be registered as an event that must be set for the
EmulationLoop to be considered idle. This means that whenever
wait_idle() is called, it will block until this event is set.
Examples of when you may want this behavior is when the event is
signaling whether a tile has completed restarting itself. The reset()
rpc cannot block until the tile has initialized since it may need to
send its own rpcs as part of the initialization process. However, we
want to retain the behavior that once the reset() rpc returns the tile
has been completely reset.
The cleanest way of achieving this is to have the tile set its
self.initialized Event when it has finished rebooting and register
that event so that wait_idle() nicely blocks until the reset process
is complete.
Args:
register (bool): Whether to register the event so that wait_idle
blocks until it is set.
Returns:
asyncio.Event: The Event object. | Below is the the instruction that describes the task:
### Input:
Create an asyncio.Event inside the emulation loop.
This method exists as a convenience to create an Event object that is
associated with the correct EventLoop(). If you pass register=True,
then the event will be registered as an event that must be set for the
EmulationLoop to be considered idle. This means that whenever
wait_idle() is called, it will block until this event is set.
Examples of when you may want this behavior is when the event is
signaling whether a tile has completed restarting itself. The reset()
rpc cannot block until the tile has initialized since it may need to
send its own rpcs as part of the initialization process. However, we
want to retain the behavior that once the reset() rpc returns the tile
has been completely reset.
The cleanest way of achieving this is to have the tile set its
self.initialized Event when it has finished rebooting and register
that event so that wait_idle() nicely blocks until the reset process
is complete.
Args:
register (bool): Whether to register the event so that wait_idle
blocks until it is set.
Returns:
asyncio.Event: The Event object.
### Response:
def create_event(self, register=False):
"""Create an asyncio.Event inside the emulation loop.
This method exists as a convenience to create an Event object that is
associated with the correct EventLoop(). If you pass register=True,
then the event will be registered as an event that must be set for the
EmulationLoop to be considered idle. This means that whenever
wait_idle() is called, it will block until this event is set.
Examples of when you may want this behavior is when the event is
signaling whether a tile has completed restarting itself. The reset()
rpc cannot block until the tile has initialized since it may need to
send its own rpcs as part of the initialization process. However, we
want to retain the behavior that once the reset() rpc returns the tile
has been completely reset.
The cleanest way of achieving this is to have the tile set its
self.initialized Event when it has finished rebooting and register
that event so that wait_idle() nicely blocks until the reset process
is complete.
Args:
register (bool): Whether to register the event so that wait_idle
blocks until it is set.
Returns:
asyncio.Event: The Event object.
"""
event = asyncio.Event(loop=self._loop)
if register:
self._events.add(event)
return event |
def venv_pth(self, dirs):
'''
Add the directories in `dirs` to the `sys.path`. A venv.pth file
will be written in the site-packages dir of this virtualenv to add
dirs to sys.path.
dirs: a list of directories.
'''
# Create venv.pth to add dirs to sys.path when using the virtualenv.
text = StringIO.StringIO()
text.write("# Autogenerated file. Do not modify.\n")
for path in dirs:
text.write('{}\n'.format(path))
put(text, os.path.join(self.site_packages_dir(), 'venv.pth'), mode=0664) | Add the directories in `dirs` to the `sys.path`. A venv.pth file
will be written in the site-packages dir of this virtualenv to add
dirs to sys.path.
dirs: a list of directories. | Below is the the instruction that describes the task:
### Input:
Add the directories in `dirs` to the `sys.path`. A venv.pth file
will be written in the site-packages dir of this virtualenv to add
dirs to sys.path.
dirs: a list of directories.
### Response:
def venv_pth(self, dirs):
'''
Add the directories in `dirs` to the `sys.path`. A venv.pth file
will be written in the site-packages dir of this virtualenv to add
dirs to sys.path.
dirs: a list of directories.
'''
# Create venv.pth to add dirs to sys.path when using the virtualenv.
text = StringIO.StringIO()
text.write("# Autogenerated file. Do not modify.\n")
for path in dirs:
text.write('{}\n'.format(path))
put(text, os.path.join(self.site_packages_dir(), 'venv.pth'), mode=0664) |
def named_crumb(context, name, *args, **kwargs):
""" Resolves given named URL and returns the relevant breadcrumb label (if
available). Usage::
<a href="{% url project-detail project.slug %}">
{% named_crumb project-detail project.slug %}
</a>
"""
url = reverse(name, args=args, kwargs=kwargs)
return find_crumb(context['request'], url) | Resolves given named URL and returns the relevant breadcrumb label (if
available). Usage::
<a href="{% url project-detail project.slug %}">
{% named_crumb project-detail project.slug %}
</a> | Below is the the instruction that describes the task:
### Input:
Resolves given named URL and returns the relevant breadcrumb label (if
available). Usage::
<a href="{% url project-detail project.slug %}">
{% named_crumb project-detail project.slug %}
</a>
### Response:
def named_crumb(context, name, *args, **kwargs):
""" Resolves given named URL and returns the relevant breadcrumb label (if
available). Usage::
<a href="{% url project-detail project.slug %}">
{% named_crumb project-detail project.slug %}
</a>
"""
url = reverse(name, args=args, kwargs=kwargs)
return find_crumb(context['request'], url) |
def is_businessperiod(cls, in_period):
"""
:param in_period: object to be checked
:type in_period: object, str, timedelta
:return: True if cast works
:rtype: Boolean
checks is argument con becasted to BusinessPeriod
"""
try: # to be removed
if str(in_period).upper() == '0D':
return True
else:
p = BusinessPeriod(str(in_period))
return not (p.days == 0 and p.months == 0 and p.years == 0 and p.businessdays == 0)
except:
return False | :param in_period: object to be checked
:type in_period: object, str, timedelta
:return: True if cast works
:rtype: Boolean
checks is argument con becasted to BusinessPeriod | Below is the the instruction that describes the task:
### Input:
:param in_period: object to be checked
:type in_period: object, str, timedelta
:return: True if cast works
:rtype: Boolean
checks is argument con becasted to BusinessPeriod
### Response:
def is_businessperiod(cls, in_period):
"""
:param in_period: object to be checked
:type in_period: object, str, timedelta
:return: True if cast works
:rtype: Boolean
checks is argument con becasted to BusinessPeriod
"""
try: # to be removed
if str(in_period).upper() == '0D':
return True
else:
p = BusinessPeriod(str(in_period))
return not (p.days == 0 and p.months == 0 and p.years == 0 and p.businessdays == 0)
except:
return False |
def UpsertUser(self, database_link, user, options=None):
"""Upserts a user.
:param str database_link:
The link to the database.
:param dict user:
The Azure Cosmos user to upsert.
:param dict options:
The request options for the request.
:return:
The upserted User.
:rtype: dict
"""
if options is None:
options = {}
database_id, path = self._GetDatabaseIdWithPathForUser(database_link, user)
return self.Upsert(user,
path,
'users',
database_id,
None,
options) | Upserts a user.
:param str database_link:
The link to the database.
:param dict user:
The Azure Cosmos user to upsert.
:param dict options:
The request options for the request.
:return:
The upserted User.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Upserts a user.
:param str database_link:
The link to the database.
:param dict user:
The Azure Cosmos user to upsert.
:param dict options:
The request options for the request.
:return:
The upserted User.
:rtype: dict
### Response:
def UpsertUser(self, database_link, user, options=None):
"""Upserts a user.
:param str database_link:
The link to the database.
:param dict user:
The Azure Cosmos user to upsert.
:param dict options:
The request options for the request.
:return:
The upserted User.
:rtype: dict
"""
if options is None:
options = {}
database_id, path = self._GetDatabaseIdWithPathForUser(database_link, user)
return self.Upsert(user,
path,
'users',
database_id,
None,
options) |
def set_from_file(file_name):
"""
Merge configuration from a file with JSON data
:param file_name: name of the file to be read
:raises TypeError: if file_name is not str
"""
if type(file_name) != str:
raise TypeError('file_name must be str')
global _config_file_name
_config_file_name = file_name
# Try to open the file and get the json data into a dictionary
with open(file_name, "r") as file:
data = yaml.load(file)
# each value found will overwrite the same value in the config
_list_merge(data, _config) | Merge configuration from a file with JSON data
:param file_name: name of the file to be read
:raises TypeError: if file_name is not str | Below is the the instruction that describes the task:
### Input:
Merge configuration from a file with JSON data
:param file_name: name of the file to be read
:raises TypeError: if file_name is not str
### Response:
def set_from_file(file_name):
"""
Merge configuration from a file with JSON data
:param file_name: name of the file to be read
:raises TypeError: if file_name is not str
"""
if type(file_name) != str:
raise TypeError('file_name must be str')
global _config_file_name
_config_file_name = file_name
# Try to open the file and get the json data into a dictionary
with open(file_name, "r") as file:
data = yaml.load(file)
# each value found will overwrite the same value in the config
_list_merge(data, _config) |
def optical_flow_rad_send(self, time_usec, sensor_id, integration_time_us, integrated_x, integrated_y, integrated_xgyro, integrated_ygyro, integrated_zgyro, temperature, quality, time_delta_distance_us, distance, force_mavlink1=False):
'''
Optical flow from an angular rate flow sensor (e.g. PX4FLOW or mouse
sensor)
time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
sensor_id : Sensor ID (uint8_t)
integration_time_us : Integration time in microseconds. Divide integrated_x and integrated_y by the integration time to obtain average flow. The integration time also indicates the. (uint32_t)
integrated_x : Flow in radians around X axis (Sensor RH rotation about the X axis induces a positive flow. Sensor linear motion along the positive Y axis induces a negative flow.) (float)
integrated_y : Flow in radians around Y axis (Sensor RH rotation about the Y axis induces a positive flow. Sensor linear motion along the positive X axis induces a positive flow.) (float)
integrated_xgyro : RH rotation around X axis (rad) (float)
integrated_ygyro : RH rotation around Y axis (rad) (float)
integrated_zgyro : RH rotation around Z axis (rad) (float)
temperature : Temperature * 100 in centi-degrees Celsius (int16_t)
quality : Optical flow quality / confidence. 0: no valid flow, 255: maximum quality (uint8_t)
time_delta_distance_us : Time in microseconds since the distance was sampled. (uint32_t)
distance : Distance to the center of the flow field in meters. Positive value (including zero): distance known. Negative value: Unknown distance. (float)
'''
return self.send(self.optical_flow_rad_encode(time_usec, sensor_id, integration_time_us, integrated_x, integrated_y, integrated_xgyro, integrated_ygyro, integrated_zgyro, temperature, quality, time_delta_distance_us, distance), force_mavlink1=force_mavlink1) | Optical flow from an angular rate flow sensor (e.g. PX4FLOW or mouse
sensor)
time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
sensor_id : Sensor ID (uint8_t)
integration_time_us : Integration time in microseconds. Divide integrated_x and integrated_y by the integration time to obtain average flow. The integration time also indicates the. (uint32_t)
integrated_x : Flow in radians around X axis (Sensor RH rotation about the X axis induces a positive flow. Sensor linear motion along the positive Y axis induces a negative flow.) (float)
integrated_y : Flow in radians around Y axis (Sensor RH rotation about the Y axis induces a positive flow. Sensor linear motion along the positive X axis induces a positive flow.) (float)
integrated_xgyro : RH rotation around X axis (rad) (float)
integrated_ygyro : RH rotation around Y axis (rad) (float)
integrated_zgyro : RH rotation around Z axis (rad) (float)
temperature : Temperature * 100 in centi-degrees Celsius (int16_t)
quality : Optical flow quality / confidence. 0: no valid flow, 255: maximum quality (uint8_t)
time_delta_distance_us : Time in microseconds since the distance was sampled. (uint32_t)
distance : Distance to the center of the flow field in meters. Positive value (including zero): distance known. Negative value: Unknown distance. (float) | Below is the the instruction that describes the task:
### Input:
Optical flow from an angular rate flow sensor (e.g. PX4FLOW or mouse
sensor)
time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
sensor_id : Sensor ID (uint8_t)
integration_time_us : Integration time in microseconds. Divide integrated_x and integrated_y by the integration time to obtain average flow. The integration time also indicates the. (uint32_t)
integrated_x : Flow in radians around X axis (Sensor RH rotation about the X axis induces a positive flow. Sensor linear motion along the positive Y axis induces a negative flow.) (float)
integrated_y : Flow in radians around Y axis (Sensor RH rotation about the Y axis induces a positive flow. Sensor linear motion along the positive X axis induces a positive flow.) (float)
integrated_xgyro : RH rotation around X axis (rad) (float)
integrated_ygyro : RH rotation around Y axis (rad) (float)
integrated_zgyro : RH rotation around Z axis (rad) (float)
temperature : Temperature * 100 in centi-degrees Celsius (int16_t)
quality : Optical flow quality / confidence. 0: no valid flow, 255: maximum quality (uint8_t)
time_delta_distance_us : Time in microseconds since the distance was sampled. (uint32_t)
distance : Distance to the center of the flow field in meters. Positive value (including zero): distance known. Negative value: Unknown distance. (float)
### Response:
def optical_flow_rad_send(self, time_usec, sensor_id, integration_time_us, integrated_x, integrated_y, integrated_xgyro, integrated_ygyro, integrated_zgyro, temperature, quality, time_delta_distance_us, distance, force_mavlink1=False):
'''
Optical flow from an angular rate flow sensor (e.g. PX4FLOW or mouse
sensor)
time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t)
sensor_id : Sensor ID (uint8_t)
integration_time_us : Integration time in microseconds. Divide integrated_x and integrated_y by the integration time to obtain average flow. The integration time also indicates the. (uint32_t)
integrated_x : Flow in radians around X axis (Sensor RH rotation about the X axis induces a positive flow. Sensor linear motion along the positive Y axis induces a negative flow.) (float)
integrated_y : Flow in radians around Y axis (Sensor RH rotation about the Y axis induces a positive flow. Sensor linear motion along the positive X axis induces a positive flow.) (float)
integrated_xgyro : RH rotation around X axis (rad) (float)
integrated_ygyro : RH rotation around Y axis (rad) (float)
integrated_zgyro : RH rotation around Z axis (rad) (float)
temperature : Temperature * 100 in centi-degrees Celsius (int16_t)
quality : Optical flow quality / confidence. 0: no valid flow, 255: maximum quality (uint8_t)
time_delta_distance_us : Time in microseconds since the distance was sampled. (uint32_t)
distance : Distance to the center of the flow field in meters. Positive value (including zero): distance known. Negative value: Unknown distance. (float)
'''
return self.send(self.optical_flow_rad_encode(time_usec, sensor_id, integration_time_us, integrated_x, integrated_y, integrated_xgyro, integrated_ygyro, integrated_zgyro, temperature, quality, time_delta_distance_us, distance), force_mavlink1=force_mavlink1) |
def run(self):
"""Starts the receiver."""
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
loop = asyncio.get_event_loop()
loop.run_until_complete(self._run_loop(executor))
self._log.info('Shutting down...')
executor.shutdown() | Starts the receiver. | Below is the the instruction that describes the task:
### Input:
Starts the receiver.
### Response:
def run(self):
"""Starts the receiver."""
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
loop = asyncio.get_event_loop()
loop.run_until_complete(self._run_loop(executor))
self._log.info('Shutting down...')
executor.shutdown() |
def import_by_path(path):
"""Append the path to sys.path, then attempt to import module with
path's basename, finally making certain to remove appended path.
http://stackoverflow.com/questions/1096216/override-namespace-in-python"""
sys.path.append(os.path.dirname(path))
try:
return __import__(os.path.basename(path))
except ImportError:
logger.warn('unable to import {0}'.format(path))
finally:
del sys.path[-1] | Append the path to sys.path, then attempt to import module with
path's basename, finally making certain to remove appended path.
http://stackoverflow.com/questions/1096216/override-namespace-in-python | Below is the the instruction that describes the task:
### Input:
Append the path to sys.path, then attempt to import module with
path's basename, finally making certain to remove appended path.
http://stackoverflow.com/questions/1096216/override-namespace-in-python
### Response:
def import_by_path(path):
"""Append the path to sys.path, then attempt to import module with
path's basename, finally making certain to remove appended path.
http://stackoverflow.com/questions/1096216/override-namespace-in-python"""
sys.path.append(os.path.dirname(path))
try:
return __import__(os.path.basename(path))
except ImportError:
logger.warn('unable to import {0}'.format(path))
finally:
del sys.path[-1] |
def tunnel(container, local_port, remote_port=None, gateway_port=None):
'''
Set up an SSH tunnel into the container, using the host as a gateway host.
Args:
* container: Container name or ID
* local_port: Local port
* remote_port=None: Port on the Docker container (defaults to local_port)
* gateway_port=None: Port on the gateway host (defaults to remote_port)
'''
if remote_port is None:
remote_port = local_port
if gateway_port is None:
gateway_port = remote_port
remote_host = get_ip(container)
command = '''
ssh -v
-o StrictHostKeyChecking=no
-i "%(key_filename)s"
-L %(local_port)s:localhost:%(gateway_port)s
%(gateway_user)s@%(gateway_host)s
sshpass -p root
ssh -o StrictHostKeyChecking=no
-L %(gateway_port)s:localhost:%(remote_port)s
root@%(remote_host)s
''' % {
'key_filename': env.key_filename,
'local_port': local_port,
'gateway_port': gateway_port,
'gateway_user': env.user,
'gateway_host': env.host,
'remote_port': remote_port,
'remote_host': remote_host,
}
command = command.replace('\n', '')
local(command) | Set up an SSH tunnel into the container, using the host as a gateway host.
Args:
* container: Container name or ID
* local_port: Local port
* remote_port=None: Port on the Docker container (defaults to local_port)
* gateway_port=None: Port on the gateway host (defaults to remote_port) | Below is the the instruction that describes the task:
### Input:
Set up an SSH tunnel into the container, using the host as a gateway host.
Args:
* container: Container name or ID
* local_port: Local port
* remote_port=None: Port on the Docker container (defaults to local_port)
* gateway_port=None: Port on the gateway host (defaults to remote_port)
### Response:
def tunnel(container, local_port, remote_port=None, gateway_port=None):
'''
Set up an SSH tunnel into the container, using the host as a gateway host.
Args:
* container: Container name or ID
* local_port: Local port
* remote_port=None: Port on the Docker container (defaults to local_port)
* gateway_port=None: Port on the gateway host (defaults to remote_port)
'''
if remote_port is None:
remote_port = local_port
if gateway_port is None:
gateway_port = remote_port
remote_host = get_ip(container)
command = '''
ssh -v
-o StrictHostKeyChecking=no
-i "%(key_filename)s"
-L %(local_port)s:localhost:%(gateway_port)s
%(gateway_user)s@%(gateway_host)s
sshpass -p root
ssh -o StrictHostKeyChecking=no
-L %(gateway_port)s:localhost:%(remote_port)s
root@%(remote_host)s
''' % {
'key_filename': env.key_filename,
'local_port': local_port,
'gateway_port': gateway_port,
'gateway_user': env.user,
'gateway_host': env.host,
'remote_port': remote_port,
'remote_host': remote_host,
}
command = command.replace('\n', '')
local(command) |
def mime_type(self, path):
"""Get mime-type from filename"""
name, ext = os.path.splitext(path)
return MIME_TYPES[ext] | Get mime-type from filename | Below is the the instruction that describes the task:
### Input:
Get mime-type from filename
### Response:
def mime_type(self, path):
"""Get mime-type from filename"""
name, ext = os.path.splitext(path)
return MIME_TYPES[ext] |
def hash_length(instance):
"""Ensure keys in 'hashes'-type properties are no more than 30 characters long.
"""
for key, obj in instance['objects'].items():
if 'type' not in obj:
continue
if obj['type'] == 'file':
try:
hashes = obj['hashes']
except KeyError:
pass
else:
for h in hashes:
if (len(h) > 30):
yield JSONError("Object '%s' has a 'hashes' dictionary"
" with a hash of type '%s', which is "
"longer than 30 characters."
% (key, h), instance['id'], 'hash-algo')
try:
ads = obj['extensions']['ntfs-ext']['alternate_data_streams']
except KeyError:
pass
else:
for datastream in ads:
if 'hashes' not in datastream:
continue
for h in datastream['hashes']:
if (len(h) > 30):
yield JSONError("Object '%s' has an NTFS extension"
" with an alternate data stream that has a"
" 'hashes' dictionary with a hash of type "
"'%s', which is longer than 30 "
"characters."
% (key, h), instance['id'], 'hash-algo')
try:
head_hashes = obj['extensions']['windows-pebinary-ext']['file_header_hashes']
except KeyError:
pass
else:
for h in head_hashes:
if (len(h) > 30):
yield JSONError("Object '%s' has a Windows PE Binary "
"File extension with a file header hash of "
"'%s', which is longer than 30 "
"characters."
% (key, h), instance['id'], 'hash-algo')
try:
hashes = obj['extensions']['windows-pebinary-ext']['optional_header']['hashes']
except KeyError:
pass
else:
for h in hashes:
if (len(h) > 30):
yield JSONError("Object '%s' has a Windows PE Binary "
"File extension with an optional header that "
"has a hash of '%s', which is longer "
"than 30 characters."
% (key, h), instance['id'], 'hash-algo')
try:
sections = obj['extensions']['windows-pebinary-ext']['sections']
except KeyError:
pass
else:
for s in sections:
if 'hashes' not in s:
continue
for h in s['hashes']:
if (len(h) > 30):
yield JSONError("Object '%s' has a Windows PE "
"Binary File extension with a section that"
" has a hash of '%s', which is "
"longer than 30 characters."
% (key, h), instance['id'], 'hash-algo')
elif obj['type'] == 'artifact' or obj['type'] == 'x509-certificate':
try:
hashes = obj['hashes']
except KeyError:
pass
else:
for h in hashes:
if (len(h) > 30):
yield JSONError("Object '%s' has a 'hashes' dictionary"
" with a hash of type '%s', which is "
"longer than 30 characters."
% (key, h), instance['id'], 'hash-algo') | Ensure keys in 'hashes'-type properties are no more than 30 characters long. | Below is the the instruction that describes the task:
### Input:
Ensure keys in 'hashes'-type properties are no more than 30 characters long.
### Response:
def hash_length(instance):
"""Ensure keys in 'hashes'-type properties are no more than 30 characters long.
"""
for key, obj in instance['objects'].items():
if 'type' not in obj:
continue
if obj['type'] == 'file':
try:
hashes = obj['hashes']
except KeyError:
pass
else:
for h in hashes:
if (len(h) > 30):
yield JSONError("Object '%s' has a 'hashes' dictionary"
" with a hash of type '%s', which is "
"longer than 30 characters."
% (key, h), instance['id'], 'hash-algo')
try:
ads = obj['extensions']['ntfs-ext']['alternate_data_streams']
except KeyError:
pass
else:
for datastream in ads:
if 'hashes' not in datastream:
continue
for h in datastream['hashes']:
if (len(h) > 30):
yield JSONError("Object '%s' has an NTFS extension"
" with an alternate data stream that has a"
" 'hashes' dictionary with a hash of type "
"'%s', which is longer than 30 "
"characters."
% (key, h), instance['id'], 'hash-algo')
try:
head_hashes = obj['extensions']['windows-pebinary-ext']['file_header_hashes']
except KeyError:
pass
else:
for h in head_hashes:
if (len(h) > 30):
yield JSONError("Object '%s' has a Windows PE Binary "
"File extension with a file header hash of "
"'%s', which is longer than 30 "
"characters."
% (key, h), instance['id'], 'hash-algo')
try:
hashes = obj['extensions']['windows-pebinary-ext']['optional_header']['hashes']
except KeyError:
pass
else:
for h in hashes:
if (len(h) > 30):
yield JSONError("Object '%s' has a Windows PE Binary "
"File extension with an optional header that "
"has a hash of '%s', which is longer "
"than 30 characters."
% (key, h), instance['id'], 'hash-algo')
try:
sections = obj['extensions']['windows-pebinary-ext']['sections']
except KeyError:
pass
else:
for s in sections:
if 'hashes' not in s:
continue
for h in s['hashes']:
if (len(h) > 30):
yield JSONError("Object '%s' has a Windows PE "
"Binary File extension with a section that"
" has a hash of '%s', which is "
"longer than 30 characters."
% (key, h), instance['id'], 'hash-algo')
elif obj['type'] == 'artifact' or obj['type'] == 'x509-certificate':
try:
hashes = obj['hashes']
except KeyError:
pass
else:
for h in hashes:
if (len(h) > 30):
yield JSONError("Object '%s' has a 'hashes' dictionary"
" with a hash of type '%s', which is "
"longer than 30 characters."
% (key, h), instance['id'], 'hash-algo') |
def parse_log_path(args, trial_content):
'''parse log path'''
path_list = []
host_list = []
for trial in trial_content:
if args.trial_id and args.trial_id != 'all' and trial.get('id') != args.trial_id:
continue
pattern = r'(?P<head>.+)://(?P<host>.+):(?P<path>.*)'
match = re.search(pattern,trial['logPath'])
if match:
path_list.append(match.group('path'))
host_list.append(match.group('host'))
if not path_list:
print_error('Trial id %s error!' % args.trial_id)
exit(1)
return path_list, host_list | parse log path | Below is the the instruction that describes the task:
### Input:
parse log path
### Response:
def parse_log_path(args, trial_content):
'''parse log path'''
path_list = []
host_list = []
for trial in trial_content:
if args.trial_id and args.trial_id != 'all' and trial.get('id') != args.trial_id:
continue
pattern = r'(?P<head>.+)://(?P<host>.+):(?P<path>.*)'
match = re.search(pattern,trial['logPath'])
if match:
path_list.append(match.group('path'))
host_list.append(match.group('host'))
if not path_list:
print_error('Trial id %s error!' % args.trial_id)
exit(1)
return path_list, host_list |
def _get_health_status(self, url, ssl_params, timeout):
"""
Don't send the "can connect" service check if we have troubles getting
the health status
"""
try:
r = self._perform_request(url, "/health", ssl_params, timeout)
# we don't use get() here so we can report a KeyError
return r.json()[self.HEALTH_KEY]
except Exception as e:
self.log.debug("Can't determine health status: {}".format(e)) | Don't send the "can connect" service check if we have troubles getting
the health status | Below is the the instruction that describes the task:
### Input:
Don't send the "can connect" service check if we have troubles getting
the health status
### Response:
def _get_health_status(self, url, ssl_params, timeout):
"""
Don't send the "can connect" service check if we have troubles getting
the health status
"""
try:
r = self._perform_request(url, "/health", ssl_params, timeout)
# we don't use get() here so we can report a KeyError
return r.json()[self.HEALTH_KEY]
except Exception as e:
self.log.debug("Can't determine health status: {}".format(e)) |
def add(places, name, cmd, args, env=None, uid=None, gid=None, extras=None,
env_inherit=None):
"""Add a process.
:param places: a Places instance
:param name: string, the logical name of the process
:param cmd: string, executable
:param args: list of strings, command-line arguments
:param env: dictionary mapping strings to strings
(will be environment in subprocess)
:param uid: integer, uid to run the new process as
:param gid: integer, gid to run the new process as
:param extras: a dictionary with additional parameters
:param env_inherit: a list of environment variables to inherit
:returns: None
"""
args = [cmd]+args
config = filepath.FilePath(places.config)
fle = config.child(name)
details = dict(args=args)
if env is not None:
newEnv = {}
for thing in env:
name, value = thing.split('=', 1)
newEnv[name] = value
details['env'] = newEnv
if uid is not None:
details['uid'] = uid
if gid is not None:
details['gid'] = gid
if env_inherit is not None:
details['env_inherit'] = env_inherit
if extras is not None:
details.update(extras)
content = _dumps(details)
fle.setContent(content) | Add a process.
:param places: a Places instance
:param name: string, the logical name of the process
:param cmd: string, executable
:param args: list of strings, command-line arguments
:param env: dictionary mapping strings to strings
(will be environment in subprocess)
:param uid: integer, uid to run the new process as
:param gid: integer, gid to run the new process as
:param extras: a dictionary with additional parameters
:param env_inherit: a list of environment variables to inherit
:returns: None | Below is the the instruction that describes the task:
### Input:
Add a process.
:param places: a Places instance
:param name: string, the logical name of the process
:param cmd: string, executable
:param args: list of strings, command-line arguments
:param env: dictionary mapping strings to strings
(will be environment in subprocess)
:param uid: integer, uid to run the new process as
:param gid: integer, gid to run the new process as
:param extras: a dictionary with additional parameters
:param env_inherit: a list of environment variables to inherit
:returns: None
### Response:
def add(places, name, cmd, args, env=None, uid=None, gid=None, extras=None,
env_inherit=None):
"""Add a process.
:param places: a Places instance
:param name: string, the logical name of the process
:param cmd: string, executable
:param args: list of strings, command-line arguments
:param env: dictionary mapping strings to strings
(will be environment in subprocess)
:param uid: integer, uid to run the new process as
:param gid: integer, gid to run the new process as
:param extras: a dictionary with additional parameters
:param env_inherit: a list of environment variables to inherit
:returns: None
"""
args = [cmd]+args
config = filepath.FilePath(places.config)
fle = config.child(name)
details = dict(args=args)
if env is not None:
newEnv = {}
for thing in env:
name, value = thing.split('=', 1)
newEnv[name] = value
details['env'] = newEnv
if uid is not None:
details['uid'] = uid
if gid is not None:
details['gid'] = gid
if env_inherit is not None:
details['env_inherit'] = env_inherit
if extras is not None:
details.update(extras)
content = _dumps(details)
fle.setContent(content) |
def pool_revert(self, pool_id, version_id):
"""Function to revert a specific pool (Requires login) (UNTESTED).
Parameters:
pool_id (int): Where pool_id is the pool id.
version_id (int):
"""
return self._get('pools/{0}/revert.json'.format(pool_id),
{'version_id': version_id}, method='PUT', auth=True) | Function to revert a specific pool (Requires login) (UNTESTED).
Parameters:
pool_id (int): Where pool_id is the pool id.
version_id (int): | Below is the the instruction that describes the task:
### Input:
Function to revert a specific pool (Requires login) (UNTESTED).
Parameters:
pool_id (int): Where pool_id is the pool id.
version_id (int):
### Response:
def pool_revert(self, pool_id, version_id):
"""Function to revert a specific pool (Requires login) (UNTESTED).
Parameters:
pool_id (int): Where pool_id is the pool id.
version_id (int):
"""
return self._get('pools/{0}/revert.json'.format(pool_id),
{'version_id': version_id}, method='PUT', auth=True) |
def date_time_this_century(
self,
before_now=True,
after_now=False,
tzinfo=None):
"""
Gets a DateTime object for the current century.
:param before_now: include days in current century before today
:param after_now: include days in current century after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
now = datetime.now(tzinfo)
this_century_start = datetime(
now.year - (now.year % 100), 1, 1, tzinfo=tzinfo)
next_century_start = datetime(
min(this_century_start.year + 100, MAXYEAR), 1, 1, tzinfo=tzinfo)
if before_now and after_now:
return self.date_time_between_dates(
this_century_start, next_century_start, tzinfo)
elif not before_now and after_now:
return self.date_time_between_dates(now, next_century_start, tzinfo)
elif not after_now and before_now:
return self.date_time_between_dates(this_century_start, now, tzinfo)
else:
return now | Gets a DateTime object for the current century.
:param before_now: include days in current century before today
:param after_now: include days in current century after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime | Below is the the instruction that describes the task:
### Input:
Gets a DateTime object for the current century.
:param before_now: include days in current century before today
:param after_now: include days in current century after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
### Response:
def date_time_this_century(
self,
before_now=True,
after_now=False,
tzinfo=None):
"""
Gets a DateTime object for the current century.
:param before_now: include days in current century before today
:param after_now: include days in current century after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
now = datetime.now(tzinfo)
this_century_start = datetime(
now.year - (now.year % 100), 1, 1, tzinfo=tzinfo)
next_century_start = datetime(
min(this_century_start.year + 100, MAXYEAR), 1, 1, tzinfo=tzinfo)
if before_now and after_now:
return self.date_time_between_dates(
this_century_start, next_century_start, tzinfo)
elif not before_now and after_now:
return self.date_time_between_dates(now, next_century_start, tzinfo)
elif not after_now and before_now:
return self.date_time_between_dates(this_century_start, now, tzinfo)
else:
return now |
def get_tags_of_offer_per_page(self, offer_id, per_page=1000, page=1):
"""
Get tags of offer per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param offer_id: the offer id
:return: list
"""
return self._get_resource_per_page(
resource=OFFER_TAGS,
per_page=per_page,
page=page,
params={'offer_id': offer_id},
) | Get tags of offer per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param offer_id: the offer id
:return: list | Below is the the instruction that describes the task:
### Input:
Get tags of offer per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param offer_id: the offer id
:return: list
### Response:
def get_tags_of_offer_per_page(self, offer_id, per_page=1000, page=1):
"""
Get tags of offer per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param offer_id: the offer id
:return: list
"""
return self._get_resource_per_page(
resource=OFFER_TAGS,
per_page=per_page,
page=page,
params={'offer_id': offer_id},
) |
def factory(cfg, login, pswd, request_type):
"""
Instantiate ExportRequest
:param cfg: request configuration, should consist of request description (url and optional parameters)
:param login:
:param pswd:
:param request_type: TYPE_SET_FIELD_VALUE || TYPE_CREATE_ENTITY || TYPE_DELETE_ENTITY || TYPE_CREATE_RELATION
:return: ExportRequest instance
"""
if request_type == ExportRequest.TYPE_SET_FIELD_VALUE:
return SetFieldValueRequest(cfg, login, pswd)
elif request_type == ExportRequest.TYPE_CREATE_ENTITY:
return CreateEntityRequest(cfg, login, pswd)
elif request_type == ExportRequest.TYPE_DELETE_ENTITY:
return DeleteEntityRequest(cfg, login, pswd)
elif request_type == ExportRequest.TYPE_CREATE_RELATION:
return CreateRelationRequest(cfg, login, pswd)
else:
raise NotImplementedError('Not supported request type - {}'.format(request_type)) | Instantiate ExportRequest
:param cfg: request configuration, should consist of request description (url and optional parameters)
:param login:
:param pswd:
:param request_type: TYPE_SET_FIELD_VALUE || TYPE_CREATE_ENTITY || TYPE_DELETE_ENTITY || TYPE_CREATE_RELATION
:return: ExportRequest instance | Below is the the instruction that describes the task:
### Input:
Instantiate ExportRequest
:param cfg: request configuration, should consist of request description (url and optional parameters)
:param login:
:param pswd:
:param request_type: TYPE_SET_FIELD_VALUE || TYPE_CREATE_ENTITY || TYPE_DELETE_ENTITY || TYPE_CREATE_RELATION
:return: ExportRequest instance
### Response:
def factory(cfg, login, pswd, request_type):
"""
Instantiate ExportRequest
:param cfg: request configuration, should consist of request description (url and optional parameters)
:param login:
:param pswd:
:param request_type: TYPE_SET_FIELD_VALUE || TYPE_CREATE_ENTITY || TYPE_DELETE_ENTITY || TYPE_CREATE_RELATION
:return: ExportRequest instance
"""
if request_type == ExportRequest.TYPE_SET_FIELD_VALUE:
return SetFieldValueRequest(cfg, login, pswd)
elif request_type == ExportRequest.TYPE_CREATE_ENTITY:
return CreateEntityRequest(cfg, login, pswd)
elif request_type == ExportRequest.TYPE_DELETE_ENTITY:
return DeleteEntityRequest(cfg, login, pswd)
elif request_type == ExportRequest.TYPE_CREATE_RELATION:
return CreateRelationRequest(cfg, login, pswd)
else:
raise NotImplementedError('Not supported request type - {}'.format(request_type)) |
def convert_md_to_rst(source, destination=None, backup_dir=None):
"""Try to convert the source, an .md (markdown) file, to an .rst
(reStructuredText) file at the destination. If the destination isn't
provided, it defaults to be the same as the source path except for the
filename extension. If the destination file already exists, it will be
overwritten. In the event of an error, the destination file will be
left untouched."""
# Doing this in the function instead of the module level ensures the
# error occurs when the function is called, rather than when the module
# is evaluated.
try:
import pypandoc
except ImportError:
# Don't give up right away; first try to install the python module.
os.system("pip install pypandoc")
import pypandoc
# Set our destination path to a default, if necessary
destination = destination or (os.path.splitext(source)[0] + '.rst')
# Likewise for the backup directory
backup_dir = backup_dir or os.path.join(os.path.dirname(destination),
'bak')
bak_name = (os.path.basename(destination) +
time.strftime('.%Y%m%d%H%M%S.bak'))
bak_path = os.path.join(backup_dir, bak_name)
# If there's already a file at the destination path, move it out of the
# way, but don't delete it.
if os.path.isfile(destination):
if not os.path.isdir(os.path.dirname(bak_path)):
os.mkdir(os.path.dirname(bak_path))
os.rename(destination, bak_path)
try:
# Try to convert the file.
pypandoc.convert(
source,
'rst',
format='md',
outputfile=destination
)
except:
# If for any reason the conversion fails, try to put things back
# like we found them.
if os.path.isfile(destination):
os.remove(destination)
if os.path.isfile(bak_path):
os.rename(bak_path, destination)
raise | Try to convert the source, an .md (markdown) file, to an .rst
(reStructuredText) file at the destination. If the destination isn't
provided, it defaults to be the same as the source path except for the
filename extension. If the destination file already exists, it will be
overwritten. In the event of an error, the destination file will be
left untouched. | Below is the the instruction that describes the task:
### Input:
Try to convert the source, an .md (markdown) file, to an .rst
(reStructuredText) file at the destination. If the destination isn't
provided, it defaults to be the same as the source path except for the
filename extension. If the destination file already exists, it will be
overwritten. In the event of an error, the destination file will be
left untouched.
### Response:
def convert_md_to_rst(source, destination=None, backup_dir=None):
"""Try to convert the source, an .md (markdown) file, to an .rst
(reStructuredText) file at the destination. If the destination isn't
provided, it defaults to be the same as the source path except for the
filename extension. If the destination file already exists, it will be
overwritten. In the event of an error, the destination file will be
left untouched."""
# Doing this in the function instead of the module level ensures the
# error occurs when the function is called, rather than when the module
# is evaluated.
try:
import pypandoc
except ImportError:
# Don't give up right away; first try to install the python module.
os.system("pip install pypandoc")
import pypandoc
# Set our destination path to a default, if necessary
destination = destination or (os.path.splitext(source)[0] + '.rst')
# Likewise for the backup directory
backup_dir = backup_dir or os.path.join(os.path.dirname(destination),
'bak')
bak_name = (os.path.basename(destination) +
time.strftime('.%Y%m%d%H%M%S.bak'))
bak_path = os.path.join(backup_dir, bak_name)
# If there's already a file at the destination path, move it out of the
# way, but don't delete it.
if os.path.isfile(destination):
if not os.path.isdir(os.path.dirname(bak_path)):
os.mkdir(os.path.dirname(bak_path))
os.rename(destination, bak_path)
try:
# Try to convert the file.
pypandoc.convert(
source,
'rst',
format='md',
outputfile=destination
)
except:
# If for any reason the conversion fails, try to put things back
# like we found them.
if os.path.isfile(destination):
os.remove(destination)
if os.path.isfile(bak_path):
os.rename(bak_path, destination)
raise |
def total_variation(arr):
'''
If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature.
'''
return np.sum(np.abs(np.diff(arr, axis=0)), axis=0) | If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature. | Below is the the instruction that describes the task:
### Input:
If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature.
### Response:
def total_variation(arr):
'''
If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature.
'''
return np.sum(np.abs(np.diff(arr, axis=0)), axis=0) |
def passes(self):
"""
Returns a list structure of the appended passes and its options.
Returns (list): The appended passes.
"""
ret = []
for pass_ in self.working_list:
ret.append(pass_.dump_passes())
return ret | Returns a list structure of the appended passes and its options.
Returns (list): The appended passes. | Below is the the instruction that describes the task:
### Input:
Returns a list structure of the appended passes and its options.
Returns (list): The appended passes.
### Response:
def passes(self):
"""
Returns a list structure of the appended passes and its options.
Returns (list): The appended passes.
"""
ret = []
for pass_ in self.working_list:
ret.append(pass_.dump_passes())
return ret |
def jd_to_datetime(jd, returniso=False):
'''This converts a UTC JD to a Python `datetime` object or ISO date string.
Parameters
----------
jd : float
The Julian date measured at UTC.
returniso : bool
If False, returns a naive Python `datetime` object corresponding to
`jd`. If True, returns the ISO format string corresponding to the date
and time at UTC from `jd`.
Returns
-------
datetime or str
Depending on the value of `returniso`.
'''
tt = astime.Time(jd, format='jd', scale='utc')
if returniso:
return tt.iso
else:
return tt.datetime | This converts a UTC JD to a Python `datetime` object or ISO date string.
Parameters
----------
jd : float
The Julian date measured at UTC.
returniso : bool
If False, returns a naive Python `datetime` object corresponding to
`jd`. If True, returns the ISO format string corresponding to the date
and time at UTC from `jd`.
Returns
-------
datetime or str
Depending on the value of `returniso`. | Below is the the instruction that describes the task:
### Input:
This converts a UTC JD to a Python `datetime` object or ISO date string.
Parameters
----------
jd : float
The Julian date measured at UTC.
returniso : bool
If False, returns a naive Python `datetime` object corresponding to
`jd`. If True, returns the ISO format string corresponding to the date
and time at UTC from `jd`.
Returns
-------
datetime or str
Depending on the value of `returniso`.
### Response:
def jd_to_datetime(jd, returniso=False):
'''This converts a UTC JD to a Python `datetime` object or ISO date string.
Parameters
----------
jd : float
The Julian date measured at UTC.
returniso : bool
If False, returns a naive Python `datetime` object corresponding to
`jd`. If True, returns the ISO format string corresponding to the date
and time at UTC from `jd`.
Returns
-------
datetime or str
Depending on the value of `returniso`.
'''
tt = astime.Time(jd, format='jd', scale='utc')
if returniso:
return tt.iso
else:
return tt.datetime |
def class_dict_to_specs(mcs, class_dict):
"""Takes a class `__dict__` and returns `HeronComponentSpec` entries"""
specs = {}
for name, spec in class_dict.items():
if isinstance(spec, HeronComponentSpec):
# Use the variable name as the specification name.
if spec.name is None:
spec.name = name
if spec.name in specs:
raise ValueError("Duplicate component name: %s" % spec.name)
else:
specs[spec.name] = spec
return specs | Takes a class `__dict__` and returns `HeronComponentSpec` entries | Below is the the instruction that describes the task:
### Input:
Takes a class `__dict__` and returns `HeronComponentSpec` entries
### Response:
def class_dict_to_specs(mcs, class_dict):
"""Takes a class `__dict__` and returns `HeronComponentSpec` entries"""
specs = {}
for name, spec in class_dict.items():
if isinstance(spec, HeronComponentSpec):
# Use the variable name as the specification name.
if spec.name is None:
spec.name = name
if spec.name in specs:
raise ValueError("Duplicate component name: %s" % spec.name)
else:
specs[spec.name] = spec
return specs |
def _release(level):
"""TODO: we should make sure that we are on master release"""
version, comment = _new_version(level)
if version is not None:
run(['git',
'commit',
str(VER_PATH.relative_to(BASE_PATH)),
str(CHANGES_PATH.relative_to(BASE_PATH)),
'--amend',
'--no-edit',
])
run(['git',
'tag',
'-a',
'v' + version,
'-m',
'"' + comment + '"',
])
run(['git',
'push',
'origin',
'--tags',
])
run(['git',
'push',
'origin',
'master',
'-f',
]) | TODO: we should make sure that we are on master release | Below is the the instruction that describes the task:
### Input:
TODO: we should make sure that we are on master release
### Response:
def _release(level):
"""TODO: we should make sure that we are on master release"""
version, comment = _new_version(level)
if version is not None:
run(['git',
'commit',
str(VER_PATH.relative_to(BASE_PATH)),
str(CHANGES_PATH.relative_to(BASE_PATH)),
'--amend',
'--no-edit',
])
run(['git',
'tag',
'-a',
'v' + version,
'-m',
'"' + comment + '"',
])
run(['git',
'push',
'origin',
'--tags',
])
run(['git',
'push',
'origin',
'master',
'-f',
]) |
def pad(data, padwidth, value=0.0):
"""
Pad an array with a specific value.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
value : data.dtype
The value with which to pad. Default is ``0.0``.
See also
--------
pad_to_size, pad_repeat_border, pad_repeat_border_corner
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((3, 3))
>>> dd.util.pad(x, (1, 2), value=0.0)
array([[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]])
"""
data = np.asarray(data)
shape = data.shape
if isinstance(padwidth, int):
padwidth = (padwidth,)*len(shape)
padded_shape = tuple(map(lambda ix: ix[1]+padwidth[ix[0]]*2,
enumerate(shape)))
new_data = np.empty(padded_shape, dtype=data.dtype)
new_data[..., :] = value
new_data[[slice(w, -w) if w > 0 else slice(None) for w in padwidth]] = data
return new_data | Pad an array with a specific value.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
value : data.dtype
The value with which to pad. Default is ``0.0``.
See also
--------
pad_to_size, pad_repeat_border, pad_repeat_border_corner
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((3, 3))
>>> dd.util.pad(x, (1, 2), value=0.0)
array([[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]]) | Below is the the instruction that describes the task:
### Input:
Pad an array with a specific value.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
value : data.dtype
The value with which to pad. Default is ``0.0``.
See also
--------
pad_to_size, pad_repeat_border, pad_repeat_border_corner
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((3, 3))
>>> dd.util.pad(x, (1, 2), value=0.0)
array([[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]])
### Response:
def pad(data, padwidth, value=0.0):
"""
Pad an array with a specific value.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
value : data.dtype
The value with which to pad. Default is ``0.0``.
See also
--------
pad_to_size, pad_repeat_border, pad_repeat_border_corner
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((3, 3))
>>> dd.util.pad(x, (1, 2), value=0.0)
array([[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]])
"""
data = np.asarray(data)
shape = data.shape
if isinstance(padwidth, int):
padwidth = (padwidth,)*len(shape)
padded_shape = tuple(map(lambda ix: ix[1]+padwidth[ix[0]]*2,
enumerate(shape)))
new_data = np.empty(padded_shape, dtype=data.dtype)
new_data[..., :] = value
new_data[[slice(w, -w) if w > 0 else slice(None) for w in padwidth]] = data
return new_data |
def describe_events(SourceIdentifier=None, SourceType=None, StartTime=None, EndTime=None, Duration=None, EventCategories=None, Filters=None, MaxRecords=None, Marker=None):
"""
Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on AWS DMS events, see Working with Events and Notifications .
See also: AWS API Documentation
:example: response = client.describe_events(
SourceIdentifier='string',
SourceType='replication-instance',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Duration=123,
EventCategories=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type SourceIdentifier: string
:param SourceIdentifier: The identifier of the event source. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It cannot end with a hyphen or contain two consecutive hyphens.
:type SourceType: string
:param SourceType: The type of AWS DMS resource that generates events.
Valid values: replication-instance | migration-task
:type StartTime: datetime
:param StartTime: The start time for the events to be listed.
:type EndTime: datetime
:param EndTime: The end time for the events to be listed.
:type Duration: integer
:param Duration: The duration of the events to be listed.
:type EventCategories: list
:param EventCategories: A list of event categories for a source type that you want to subscribe to.
(string) --
:type Filters: list
:param Filters: Filters applied to the action.
(dict) --
Name (string) -- [REQUIRED]The name of the filter.
Values (list) -- [REQUIRED]The filter value.
(string) --
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
:return: {
'Marker': 'string',
'Events': [
{
'SourceIdentifier': 'string',
'SourceType': 'replication-instance',
'Message': 'string',
'EventCategories': [
'string',
],
'Date': datetime(2015, 1, 1)
},
]
}
:returns:
(string) --
"""
pass | Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on AWS DMS events, see Working with Events and Notifications .
See also: AWS API Documentation
:example: response = client.describe_events(
SourceIdentifier='string',
SourceType='replication-instance',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Duration=123,
EventCategories=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type SourceIdentifier: string
:param SourceIdentifier: The identifier of the event source. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It cannot end with a hyphen or contain two consecutive hyphens.
:type SourceType: string
:param SourceType: The type of AWS DMS resource that generates events.
Valid values: replication-instance | migration-task
:type StartTime: datetime
:param StartTime: The start time for the events to be listed.
:type EndTime: datetime
:param EndTime: The end time for the events to be listed.
:type Duration: integer
:param Duration: The duration of the events to be listed.
:type EventCategories: list
:param EventCategories: A list of event categories for a source type that you want to subscribe to.
(string) --
:type Filters: list
:param Filters: Filters applied to the action.
(dict) --
Name (string) -- [REQUIRED]The name of the filter.
Values (list) -- [REQUIRED]The filter value.
(string) --
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
:return: {
'Marker': 'string',
'Events': [
{
'SourceIdentifier': 'string',
'SourceType': 'replication-instance',
'Message': 'string',
'EventCategories': [
'string',
],
'Date': datetime(2015, 1, 1)
},
]
}
:returns:
(string) -- | Below is the the instruction that describes the task:
### Input:
Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on AWS DMS events, see Working with Events and Notifications .
See also: AWS API Documentation
:example: response = client.describe_events(
SourceIdentifier='string',
SourceType='replication-instance',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Duration=123,
EventCategories=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type SourceIdentifier: string
:param SourceIdentifier: The identifier of the event source. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It cannot end with a hyphen or contain two consecutive hyphens.
:type SourceType: string
:param SourceType: The type of AWS DMS resource that generates events.
Valid values: replication-instance | migration-task
:type StartTime: datetime
:param StartTime: The start time for the events to be listed.
:type EndTime: datetime
:param EndTime: The end time for the events to be listed.
:type Duration: integer
:param Duration: The duration of the events to be listed.
:type EventCategories: list
:param EventCategories: A list of event categories for a source type that you want to subscribe to.
(string) --
:type Filters: list
:param Filters: Filters applied to the action.
(dict) --
Name (string) -- [REQUIRED]The name of the filter.
Values (list) -- [REQUIRED]The filter value.
(string) --
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
:return: {
'Marker': 'string',
'Events': [
{
'SourceIdentifier': 'string',
'SourceType': 'replication-instance',
'Message': 'string',
'EventCategories': [
'string',
],
'Date': datetime(2015, 1, 1)
},
]
}
:returns:
(string) --
### Response:
def describe_events(SourceIdentifier=None, SourceType=None, StartTime=None, EndTime=None, Duration=None, EventCategories=None, Filters=None, MaxRecords=None, Marker=None):
"""
Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on AWS DMS events, see Working with Events and Notifications .
See also: AWS API Documentation
:example: response = client.describe_events(
SourceIdentifier='string',
SourceType='replication-instance',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Duration=123,
EventCategories=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type SourceIdentifier: string
:param SourceIdentifier: The identifier of the event source. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It cannot end with a hyphen or contain two consecutive hyphens.
:type SourceType: string
:param SourceType: The type of AWS DMS resource that generates events.
Valid values: replication-instance | migration-task
:type StartTime: datetime
:param StartTime: The start time for the events to be listed.
:type EndTime: datetime
:param EndTime: The end time for the events to be listed.
:type Duration: integer
:param Duration: The duration of the events to be listed.
:type EventCategories: list
:param EventCategories: A list of event categories for a source type that you want to subscribe to.
(string) --
:type Filters: list
:param Filters: Filters applied to the action.
(dict) --
Name (string) -- [REQUIRED]The name of the filter.
Values (list) -- [REQUIRED]The filter value.
(string) --
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
:return: {
'Marker': 'string',
'Events': [
{
'SourceIdentifier': 'string',
'SourceType': 'replication-instance',
'Message': 'string',
'EventCategories': [
'string',
],
'Date': datetime(2015, 1, 1)
},
]
}
:returns:
(string) --
"""
pass |
def __connect(host, port, username, password, private_key):
"""
Establish remote connection
:param host: Hostname or IP address to connect to
:param port: Port number to use for SSH
:param username: Username credentials for SSH access
:param password: Password credentials for SSH access (or private key passphrase)
:param private_key: Private key to bypass clear text password
:return: Paramiko SSH client instance if connection was established
:raises Exception if connection was unsuccessful
"""
# Initialize the SSH connection
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if private_key is not None and password is not None:
private_key = paramiko.RSAKey.from_private_key_file(private_key, password)
elif private_key is not None:
private_key = paramiko.RSAKey.from_private_key_file(private_key, password)
# Establish the SSH connection
try:
ssh.connect(host, port, username, password, private_key)
except Exception as e:
raise e
# Return the established SSH connection
return ssh | Establish remote connection
:param host: Hostname or IP address to connect to
:param port: Port number to use for SSH
:param username: Username credentials for SSH access
:param password: Password credentials for SSH access (or private key passphrase)
:param private_key: Private key to bypass clear text password
:return: Paramiko SSH client instance if connection was established
:raises Exception if connection was unsuccessful | Below is the the instruction that describes the task:
### Input:
Establish remote connection
:param host: Hostname or IP address to connect to
:param port: Port number to use for SSH
:param username: Username credentials for SSH access
:param password: Password credentials for SSH access (or private key passphrase)
:param private_key: Private key to bypass clear text password
:return: Paramiko SSH client instance if connection was established
:raises Exception if connection was unsuccessful
### Response:
def __connect(host, port, username, password, private_key):
"""
Establish remote connection
:param host: Hostname or IP address to connect to
:param port: Port number to use for SSH
:param username: Username credentials for SSH access
:param password: Password credentials for SSH access (or private key passphrase)
:param private_key: Private key to bypass clear text password
:return: Paramiko SSH client instance if connection was established
:raises Exception if connection was unsuccessful
"""
# Initialize the SSH connection
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if private_key is not None and password is not None:
private_key = paramiko.RSAKey.from_private_key_file(private_key, password)
elif private_key is not None:
private_key = paramiko.RSAKey.from_private_key_file(private_key, password)
# Establish the SSH connection
try:
ssh.connect(host, port, username, password, private_key)
except Exception as e:
raise e
# Return the established SSH connection
return ssh |
def add_parent_commands(self, cmd_path, help=None):
"""
Create parent command object in cmd tree then return
the last parent command object.
:rtype: dict
"""
existed_cmd_end_index = self.index_in_tree(cmd_path)
new_path, existed_path = self._get_paths(
cmd_path,
existed_cmd_end_index,
)
parent_node = self.get_cmd_by_path(existed_path)
last_one_index = 1
new_path_len = len(new_path)
_kwargs = {}
for cmd_name in new_path:
if last_one_index >= new_path_len:
_kwargs['help'] = help
sub_cmd = parent_node['cmd'].add_cmd(
cmd_name, **_kwargs
)
parent_node = _mk_cmd_node(cmd_name, sub_cmd)
self._add_node(
parent_node,
existed_path + new_path[:new_path.index(cmd_name)]
)
last_one_index += 1
return parent_node | Create parent command object in cmd tree then return
the last parent command object.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Create parent command object in cmd tree then return
the last parent command object.
:rtype: dict
### Response:
def add_parent_commands(self, cmd_path, help=None):
"""
Create parent command object in cmd tree then return
the last parent command object.
:rtype: dict
"""
existed_cmd_end_index = self.index_in_tree(cmd_path)
new_path, existed_path = self._get_paths(
cmd_path,
existed_cmd_end_index,
)
parent_node = self.get_cmd_by_path(existed_path)
last_one_index = 1
new_path_len = len(new_path)
_kwargs = {}
for cmd_name in new_path:
if last_one_index >= new_path_len:
_kwargs['help'] = help
sub_cmd = parent_node['cmd'].add_cmd(
cmd_name, **_kwargs
)
parent_node = _mk_cmd_node(cmd_name, sub_cmd)
self._add_node(
parent_node,
existed_path + new_path[:new_path.index(cmd_name)]
)
last_one_index += 1
return parent_node |
def stash_calibration(self, attenuations, freqs, frange, calname):
"""Save it for later"""
self.calibration_vector = attenuations
self.calibration_freqs = freqs
self.calibration_frange = frange
self.calname = calname | Save it for later | Below is the the instruction that describes the task:
### Input:
Save it for later
### Response:
def stash_calibration(self, attenuations, freqs, frange, calname):
"""Save it for later"""
self.calibration_vector = attenuations
self.calibration_freqs = freqs
self.calibration_frange = frange
self.calname = calname |
def stats(self, key=None):
"""
Return server stats.
:param key: Optional if you want status from a key.
:type key: six.string_types
:return: A dict with server stats
:rtype: dict
"""
# TODO: Stats with key is not working.
returns = {}
for server in self.servers:
returns[server.server] = server.stats(key)
return returns | Return server stats.
:param key: Optional if you want status from a key.
:type key: six.string_types
:return: A dict with server stats
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Return server stats.
:param key: Optional if you want status from a key.
:type key: six.string_types
:return: A dict with server stats
:rtype: dict
### Response:
def stats(self, key=None):
"""
Return server stats.
:param key: Optional if you want status from a key.
:type key: six.string_types
:return: A dict with server stats
:rtype: dict
"""
# TODO: Stats with key is not working.
returns = {}
for server in self.servers:
returns[server.server] = server.stats(key)
return returns |
def viterbi_decoder(self,x,metric_type='soft',quant_level=3):
"""
A method which performs Viterbi decoding of noisy bit stream,
taking as input soft bit values centered on +/-1 and returning
hard decision 0/1 bits.
Parameters
----------
x: Received noisy bit values centered on +/-1 at one sample per bit
metric_type:
'hard' - Hard decision metric. Expects binary or 0/1 input values.
'unquant' - unquantized soft decision decoding. Expects +/-1
input values.
'soft' - soft decision decoding.
quant_level: The quantization level for soft decoding. Expected
input values between 0 and 2^quant_level-1. 0 represents the most
confident 0 and 2^quant_level-1 represents the most confident 1.
Only used for 'soft' metric type.
Returns
-------
y: Decoded 0/1 bit stream
Examples
--------
>>> import numpy as np
>>> from numpy.random import randint
>>> import sk_dsp_comm.fec_conv as fec
>>> import sk_dsp_comm.digitalcom as dc
>>> import matplotlib.pyplot as plt
>>> # Soft decision rate 1/2 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 4
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc1 = fec.fec_conv(('11101','10011'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc1.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_AWGN(2*y-1,EbN0-3,1) # Channel SNR is 3 dB less for rate 1/2
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc1.viterbi_decoder(yn_hard,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/2 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 77, BEP = 7.72e-03
kmax = 0, taumax = 0
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
*****************************************************
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
>>> # Consider the trellis traceback after the sim completes
>>> cc1.traceback_plot()
>>> plt.show()
>>> # Compare a collection of simulation results with soft decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3 = fec.conv_Pb_bound(1/3,8,[3, 0, 15],SNRdB,1)
>>> Pb_s_third_4 = fec.conv_Pb_bound(1/3,10,[6, 0, 6, 0],SNRdB,1)
>>> Pb_s_third_5 = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56],SNRdB,1)
>>> Pb_s_third_6 = fec.conv_Pb_bound(1/3,13,[1, 8, 26, 20, 19, 62],SNRdB,1)
>>> Pb_s_third_7 = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,1)
>>> Pb_s_third_8 = fec.conv_Pb_bound(1/3,16,[1, 0, 24, 0, 113, 0, 287, 0],SNRdB,1)
>>> Pb_s_half = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225],SNRdB,1)
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_4,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_5,'g')
>>> plt.semilogy(SNRdB,Pb_s_third_6,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_7,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_8,'--')
>>> plt.semilogy([0,1,2,3,4,5],[9.08e-02,2.73e-02,6.52e-03,\
8.94e-04,8.54e-05,5e-6],'gs')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Soft Decision Rate 1/2 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Soft',\
'R=1/3, K=4, Soft','R=1/3, K=5, Soft',\
'R=1/3, K=6, Soft','R=1/3, K=7, Soft',\
'R=1/3, K=8, Soft','R=1/3, K=5, Sim', \
'Simulation'),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Hard decision rate 1/3 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 3
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc2 = fec.fec_conv(('11111','11011','10101'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc2.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_AWGN(2*y-1,EbN0-10*np.log10(3),1) # Channel SNR is 10*log10(3) dB less
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc2.viterbi_decoder(yn_hard.real,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/3 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
*****************************************************
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
>>> # Compare a collection of simulation results with hard decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3_hard = fec.conv_Pb_bound(1/3,8,[3, 0, 15, 0, 58, 0, 201, 0],SNRdB,0)
>>> Pb_s_third_5_hard = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56, 0, 320, 0],SNRdB,0)
>>> Pb_s_third_7_hard = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,0)
>>> Pb_s_third_5_hard_sim = np.array([8.94e-04,1.11e-04,8.73e-06])
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3_hard,'r--')
>>> plt.semilogy(SNRdB,Pb_s_third_5_hard,'g--')
>>> plt.semilogy(SNRdB,Pb_s_third_7_hard,'k--')
>>> plt.semilogy(np.array([5,6,7]),Pb_s_third_5_hard_sim,'sg')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Hard Decision Rate 1/3 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Hard',\
'R=1/3, K=5, Hard', 'R=1/3, K=7, Hard',\
),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Show the traceback for the rate 1/3 hard decision case
>>> cc2.traceback_plot()
"""
if metric_type == 'hard':
# If hard decision must have 0/1 integers for input else float
if np.issubdtype(x.dtype, np.integer):
if x.max() > 1 or x.min() < 0:
raise ValueError('Integer bit values must be 0 or 1')
else:
raise ValueError('Decoder inputs must be integers on [0,1] for hard decisions')
# Initialize cumulative metrics array
cm_present = np.zeros((self.Nstates,1))
NS = len(x) # number of channel symbols to process;
# must be even for rate 1/2
# must be a multiple of 3 for rate 1/3
y = np.zeros(NS-self.decision_depth) # Decoded bit sequence
k = 0
symbolL = self.rate.denominator
# Calculate branch metrics and update traceback states and traceback bits
for n in range(0,NS,symbolL):
cm_past = self.paths.cumulative_metric[:,0]
tb_states_temp = self.paths.traceback_states[:,:-1].copy()
tb_bits_temp = self.paths.traceback_bits[:,:-1].copy()
for m in range(self.Nstates):
d1 = self.bm_calc(self.branches.bits1[m],
x[n:n+symbolL],metric_type,
quant_level)
d1 = d1 + cm_past[self.branches.states1[m]]
d2 = self.bm_calc(self.branches.bits2[m],
x[n:n+symbolL],metric_type,
quant_level)
d2 = d2 + cm_past[self.branches.states2[m]]
if d1 <= d2: # Find the survivor assuming minimum distance wins
cm_present[m] = d1
self.paths.traceback_states[m,:] = np.hstack((self.branches.states1[m],
tb_states_temp[int(self.branches.states1[m]),:]))
self.paths.traceback_bits[m,:] = np.hstack((self.branches.input1[m],
tb_bits_temp[int(self.branches.states1[m]),:]))
else:
cm_present[m] = d2
self.paths.traceback_states[m,:] = np.hstack((self.branches.states2[m],
tb_states_temp[int(self.branches.states2[m]),:]))
self.paths.traceback_bits[m,:] = np.hstack((self.branches.input2[m],
tb_bits_temp[int(self.branches.states2[m]),:]))
# Update cumulative metric history
self.paths.cumulative_metric = np.hstack((cm_present,
self.paths.cumulative_metric[:,:-1]))
# Obtain estimate of input bit sequence from the oldest bit in
# the traceback having the smallest (most likely) cumulative metric
min_metric = min(self.paths.cumulative_metric[:,0])
min_idx = np.where(self.paths.cumulative_metric[:,0] == min_metric)
if n >= symbolL*self.decision_depth-symbolL: # 2 since Rate = 1/2
y[k] = self.paths.traceback_bits[min_idx[0][0],-1]
k += 1
y = y[:k] # trim final length
return y | A method which performs Viterbi decoding of noisy bit stream,
taking as input soft bit values centered on +/-1 and returning
hard decision 0/1 bits.
Parameters
----------
x: Received noisy bit values centered on +/-1 at one sample per bit
metric_type:
'hard' - Hard decision metric. Expects binary or 0/1 input values.
'unquant' - unquantized soft decision decoding. Expects +/-1
input values.
'soft' - soft decision decoding.
quant_level: The quantization level for soft decoding. Expected
input values between 0 and 2^quant_level-1. 0 represents the most
confident 0 and 2^quant_level-1 represents the most confident 1.
Only used for 'soft' metric type.
Returns
-------
y: Decoded 0/1 bit stream
Examples
--------
>>> import numpy as np
>>> from numpy.random import randint
>>> import sk_dsp_comm.fec_conv as fec
>>> import sk_dsp_comm.digitalcom as dc
>>> import matplotlib.pyplot as plt
>>> # Soft decision rate 1/2 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 4
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc1 = fec.fec_conv(('11101','10011'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc1.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_AWGN(2*y-1,EbN0-3,1) # Channel SNR is 3 dB less for rate 1/2
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc1.viterbi_decoder(yn_hard,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/2 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 77, BEP = 7.72e-03
kmax = 0, taumax = 0
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
*****************************************************
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
>>> # Consider the trellis traceback after the sim completes
>>> cc1.traceback_plot()
>>> plt.show()
>>> # Compare a collection of simulation results with soft decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3 = fec.conv_Pb_bound(1/3,8,[3, 0, 15],SNRdB,1)
>>> Pb_s_third_4 = fec.conv_Pb_bound(1/3,10,[6, 0, 6, 0],SNRdB,1)
>>> Pb_s_third_5 = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56],SNRdB,1)
>>> Pb_s_third_6 = fec.conv_Pb_bound(1/3,13,[1, 8, 26, 20, 19, 62],SNRdB,1)
>>> Pb_s_third_7 = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,1)
>>> Pb_s_third_8 = fec.conv_Pb_bound(1/3,16,[1, 0, 24, 0, 113, 0, 287, 0],SNRdB,1)
>>> Pb_s_half = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225],SNRdB,1)
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_4,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_5,'g')
>>> plt.semilogy(SNRdB,Pb_s_third_6,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_7,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_8,'--')
>>> plt.semilogy([0,1,2,3,4,5],[9.08e-02,2.73e-02,6.52e-03,\
8.94e-04,8.54e-05,5e-6],'gs')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Soft Decision Rate 1/2 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Soft',\
'R=1/3, K=4, Soft','R=1/3, K=5, Soft',\
'R=1/3, K=6, Soft','R=1/3, K=7, Soft',\
'R=1/3, K=8, Soft','R=1/3, K=5, Sim', \
'Simulation'),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Hard decision rate 1/3 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 3
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc2 = fec.fec_conv(('11111','11011','10101'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc2.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_AWGN(2*y-1,EbN0-10*np.log10(3),1) # Channel SNR is 10*log10(3) dB less
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc2.viterbi_decoder(yn_hard.real,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/3 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
*****************************************************
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
>>> # Compare a collection of simulation results with hard decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3_hard = fec.conv_Pb_bound(1/3,8,[3, 0, 15, 0, 58, 0, 201, 0],SNRdB,0)
>>> Pb_s_third_5_hard = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56, 0, 320, 0],SNRdB,0)
>>> Pb_s_third_7_hard = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,0)
>>> Pb_s_third_5_hard_sim = np.array([8.94e-04,1.11e-04,8.73e-06])
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3_hard,'r--')
>>> plt.semilogy(SNRdB,Pb_s_third_5_hard,'g--')
>>> plt.semilogy(SNRdB,Pb_s_third_7_hard,'k--')
>>> plt.semilogy(np.array([5,6,7]),Pb_s_third_5_hard_sim,'sg')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Hard Decision Rate 1/3 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Hard',\
'R=1/3, K=5, Hard', 'R=1/3, K=7, Hard',\
),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Show the traceback for the rate 1/3 hard decision case
>>> cc2.traceback_plot() | Below is the the instruction that describes the task:
### Input:
A method which performs Viterbi decoding of noisy bit stream,
taking as input soft bit values centered on +/-1 and returning
hard decision 0/1 bits.
Parameters
----------
x: Received noisy bit values centered on +/-1 at one sample per bit
metric_type:
'hard' - Hard decision metric. Expects binary or 0/1 input values.
'unquant' - unquantized soft decision decoding. Expects +/-1
input values.
'soft' - soft decision decoding.
quant_level: The quantization level for soft decoding. Expected
input values between 0 and 2^quant_level-1. 0 represents the most
confident 0 and 2^quant_level-1 represents the most confident 1.
Only used for 'soft' metric type.
Returns
-------
y: Decoded 0/1 bit stream
Examples
--------
>>> import numpy as np
>>> from numpy.random import randint
>>> import sk_dsp_comm.fec_conv as fec
>>> import sk_dsp_comm.digitalcom as dc
>>> import matplotlib.pyplot as plt
>>> # Soft decision rate 1/2 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 4
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc1 = fec.fec_conv(('11101','10011'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc1.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_AWGN(2*y-1,EbN0-3,1) # Channel SNR is 3 dB less for rate 1/2
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc1.viterbi_decoder(yn_hard,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/2 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 77, BEP = 7.72e-03
kmax = 0, taumax = 0
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
*****************************************************
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
>>> # Consider the trellis traceback after the sim completes
>>> cc1.traceback_plot()
>>> plt.show()
>>> # Compare a collection of simulation results with soft decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3 = fec.conv_Pb_bound(1/3,8,[3, 0, 15],SNRdB,1)
>>> Pb_s_third_4 = fec.conv_Pb_bound(1/3,10,[6, 0, 6, 0],SNRdB,1)
>>> Pb_s_third_5 = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56],SNRdB,1)
>>> Pb_s_third_6 = fec.conv_Pb_bound(1/3,13,[1, 8, 26, 20, 19, 62],SNRdB,1)
>>> Pb_s_third_7 = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,1)
>>> Pb_s_third_8 = fec.conv_Pb_bound(1/3,16,[1, 0, 24, 0, 113, 0, 287, 0],SNRdB,1)
>>> Pb_s_half = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225],SNRdB,1)
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_4,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_5,'g')
>>> plt.semilogy(SNRdB,Pb_s_third_6,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_7,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_8,'--')
>>> plt.semilogy([0,1,2,3,4,5],[9.08e-02,2.73e-02,6.52e-03,\
8.94e-04,8.54e-05,5e-6],'gs')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Soft Decision Rate 1/2 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Soft',\
'R=1/3, K=4, Soft','R=1/3, K=5, Soft',\
'R=1/3, K=6, Soft','R=1/3, K=7, Soft',\
'R=1/3, K=8, Soft','R=1/3, K=5, Sim', \
'Simulation'),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Hard decision rate 1/3 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 3
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc2 = fec.fec_conv(('11111','11011','10101'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc2.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_AWGN(2*y-1,EbN0-10*np.log10(3),1) # Channel SNR is 10*log10(3) dB less
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc2.viterbi_decoder(yn_hard.real,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/3 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
*****************************************************
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
>>> # Compare a collection of simulation results with hard decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3_hard = fec.conv_Pb_bound(1/3,8,[3, 0, 15, 0, 58, 0, 201, 0],SNRdB,0)
>>> Pb_s_third_5_hard = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56, 0, 320, 0],SNRdB,0)
>>> Pb_s_third_7_hard = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,0)
>>> Pb_s_third_5_hard_sim = np.array([8.94e-04,1.11e-04,8.73e-06])
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3_hard,'r--')
>>> plt.semilogy(SNRdB,Pb_s_third_5_hard,'g--')
>>> plt.semilogy(SNRdB,Pb_s_third_7_hard,'k--')
>>> plt.semilogy(np.array([5,6,7]),Pb_s_third_5_hard_sim,'sg')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Hard Decision Rate 1/3 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Hard',\
'R=1/3, K=5, Hard', 'R=1/3, K=7, Hard',\
),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Show the traceback for the rate 1/3 hard decision case
>>> cc2.traceback_plot()
### Response:
def viterbi_decoder(self,x,metric_type='soft',quant_level=3):
"""
A method which performs Viterbi decoding of noisy bit stream,
taking as input soft bit values centered on +/-1 and returning
hard decision 0/1 bits.
Parameters
----------
x: Received noisy bit values centered on +/-1 at one sample per bit
metric_type:
'hard' - Hard decision metric. Expects binary or 0/1 input values.
'unquant' - unquantized soft decision decoding. Expects +/-1
input values.
'soft' - soft decision decoding.
quant_level: The quantization level for soft decoding. Expected
input values between 0 and 2^quant_level-1. 0 represents the most
confident 0 and 2^quant_level-1 represents the most confident 1.
Only used for 'soft' metric type.
Returns
-------
y: Decoded 0/1 bit stream
Examples
--------
>>> import numpy as np
>>> from numpy.random import randint
>>> import sk_dsp_comm.fec_conv as fec
>>> import sk_dsp_comm.digitalcom as dc
>>> import matplotlib.pyplot as plt
>>> # Soft decision rate 1/2 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 4
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc1 = fec.fec_conv(('11101','10011'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc1.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_AWGN(2*y-1,EbN0-3,1) # Channel SNR is 3 dB less for rate 1/2
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc1.viterbi_decoder(yn_hard,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/2 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 77, BEP = 7.72e-03
kmax = 0, taumax = 0
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
*****************************************************
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
>>> # Consider the trellis traceback after the sim completes
>>> cc1.traceback_plot()
>>> plt.show()
>>> # Compare a collection of simulation results with soft decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3 = fec.conv_Pb_bound(1/3,8,[3, 0, 15],SNRdB,1)
>>> Pb_s_third_4 = fec.conv_Pb_bound(1/3,10,[6, 0, 6, 0],SNRdB,1)
>>> Pb_s_third_5 = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56],SNRdB,1)
>>> Pb_s_third_6 = fec.conv_Pb_bound(1/3,13,[1, 8, 26, 20, 19, 62],SNRdB,1)
>>> Pb_s_third_7 = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,1)
>>> Pb_s_third_8 = fec.conv_Pb_bound(1/3,16,[1, 0, 24, 0, 113, 0, 287, 0],SNRdB,1)
>>> Pb_s_half = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225],SNRdB,1)
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_4,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_5,'g')
>>> plt.semilogy(SNRdB,Pb_s_third_6,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_7,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_8,'--')
>>> plt.semilogy([0,1,2,3,4,5],[9.08e-02,2.73e-02,6.52e-03,\
8.94e-04,8.54e-05,5e-6],'gs')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Soft Decision Rate 1/2 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Soft',\
'R=1/3, K=4, Soft','R=1/3, K=5, Soft',\
'R=1/3, K=6, Soft','R=1/3, K=7, Soft',\
'R=1/3, K=8, Soft','R=1/3, K=5, Sim', \
'Simulation'),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Hard decision rate 1/3 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 3
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc2 = fec.fec_conv(('11111','11011','10101'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc2.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_AWGN(2*y-1,EbN0-10*np.log10(3),1) # Channel SNR is 10*log10(3) dB less
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc2.viterbi_decoder(yn_hard.real,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/3 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
*****************************************************
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
>>> # Compare a collection of simulation results with hard decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3_hard = fec.conv_Pb_bound(1/3,8,[3, 0, 15, 0, 58, 0, 201, 0],SNRdB,0)
>>> Pb_s_third_5_hard = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56, 0, 320, 0],SNRdB,0)
>>> Pb_s_third_7_hard = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,0)
>>> Pb_s_third_5_hard_sim = np.array([8.94e-04,1.11e-04,8.73e-06])
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3_hard,'r--')
>>> plt.semilogy(SNRdB,Pb_s_third_5_hard,'g--')
>>> plt.semilogy(SNRdB,Pb_s_third_7_hard,'k--')
>>> plt.semilogy(np.array([5,6,7]),Pb_s_third_5_hard_sim,'sg')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Hard Decision Rate 1/3 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Hard',\
'R=1/3, K=5, Hard', 'R=1/3, K=7, Hard',\
),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Show the traceback for the rate 1/3 hard decision case
>>> cc2.traceback_plot()
"""
if metric_type == 'hard':
# If hard decision must have 0/1 integers for input else float
if np.issubdtype(x.dtype, np.integer):
if x.max() > 1 or x.min() < 0:
raise ValueError('Integer bit values must be 0 or 1')
else:
raise ValueError('Decoder inputs must be integers on [0,1] for hard decisions')
# Initialize cumulative metrics array
cm_present = np.zeros((self.Nstates,1))
NS = len(x) # number of channel symbols to process;
# must be even for rate 1/2
# must be a multiple of 3 for rate 1/3
y = np.zeros(NS-self.decision_depth) # Decoded bit sequence
k = 0
symbolL = self.rate.denominator
# Calculate branch metrics and update traceback states and traceback bits
for n in range(0,NS,symbolL):
cm_past = self.paths.cumulative_metric[:,0]
tb_states_temp = self.paths.traceback_states[:,:-1].copy()
tb_bits_temp = self.paths.traceback_bits[:,:-1].copy()
for m in range(self.Nstates):
d1 = self.bm_calc(self.branches.bits1[m],
x[n:n+symbolL],metric_type,
quant_level)
d1 = d1 + cm_past[self.branches.states1[m]]
d2 = self.bm_calc(self.branches.bits2[m],
x[n:n+symbolL],metric_type,
quant_level)
d2 = d2 + cm_past[self.branches.states2[m]]
if d1 <= d2: # Find the survivor assuming minimum distance wins
cm_present[m] = d1
self.paths.traceback_states[m,:] = np.hstack((self.branches.states1[m],
tb_states_temp[int(self.branches.states1[m]),:]))
self.paths.traceback_bits[m,:] = np.hstack((self.branches.input1[m],
tb_bits_temp[int(self.branches.states1[m]),:]))
else:
cm_present[m] = d2
self.paths.traceback_states[m,:] = np.hstack((self.branches.states2[m],
tb_states_temp[int(self.branches.states2[m]),:]))
self.paths.traceback_bits[m,:] = np.hstack((self.branches.input2[m],
tb_bits_temp[int(self.branches.states2[m]),:]))
# Update cumulative metric history
self.paths.cumulative_metric = np.hstack((cm_present,
self.paths.cumulative_metric[:,:-1]))
# Obtain estimate of input bit sequence from the oldest bit in
# the traceback having the smallest (most likely) cumulative metric
min_metric = min(self.paths.cumulative_metric[:,0])
min_idx = np.where(self.paths.cumulative_metric[:,0] == min_metric)
if n >= symbolL*self.decision_depth-symbolL: # 2 since Rate = 1/2
y[k] = self.paths.traceback_bits[min_idx[0][0],-1]
k += 1
y = y[:k] # trim final length
return y |
def assertDutTraceDoesNotContain(dut, message, bench):
"""
Raise TestStepFail if bench.verify_trace does not find message from dut traces.
:param dut: Dut object.
:param message: Message to look for.
:param: Bench, must contain verify_trace method.
:raises: AttributeError if bench does not contain verify_trace method.
TestStepFail if verify_trace returns True.
"""
if not hasattr(bench, "verify_trace"):
raise AttributeError("Bench object does not contain verify_trace method!")
if bench.verify_trace(dut, message, False):
raise TestStepFail('Assert: Message(s) "%s" in response' % message) | Raise TestStepFail if bench.verify_trace does not find message from dut traces.
:param dut: Dut object.
:param message: Message to look for.
:param: Bench, must contain verify_trace method.
:raises: AttributeError if bench does not contain verify_trace method.
TestStepFail if verify_trace returns True. | Below is the the instruction that describes the task:
### Input:
Raise TestStepFail if bench.verify_trace does not find message from dut traces.
:param dut: Dut object.
:param message: Message to look for.
:param: Bench, must contain verify_trace method.
:raises: AttributeError if bench does not contain verify_trace method.
TestStepFail if verify_trace returns True.
### Response:
def assertDutTraceDoesNotContain(dut, message, bench):
"""
Raise TestStepFail if bench.verify_trace does not find message from dut traces.
:param dut: Dut object.
:param message: Message to look for.
:param: Bench, must contain verify_trace method.
:raises: AttributeError if bench does not contain verify_trace method.
TestStepFail if verify_trace returns True.
"""
if not hasattr(bench, "verify_trace"):
raise AttributeError("Bench object does not contain verify_trace method!")
if bench.verify_trace(dut, message, False):
raise TestStepFail('Assert: Message(s) "%s" in response' % message) |
def merge_graphs(main_graph, addition_graph):
"""Merges an ''addition_graph'' into the ''main_graph''.
Returns a tuple of dictionaries, mapping old node ids and edge ids to new ids.
"""
node_mapping = {}
edge_mapping = {}
for node in addition_graph.get_all_node_objects():
node_id = node['id']
new_id = main_graph.new_node()
node_mapping[node_id] = new_id
for edge in addition_graph.get_all_edge_objects():
edge_id = edge['id']
old_vertex_a_id, old_vertex_b_id = edge['vertices']
new_vertex_a_id = node_mapping[old_vertex_a_id]
new_vertex_b_id = node_mapping[old_vertex_b_id]
new_edge_id = main_graph.new_edge(new_vertex_a_id, new_vertex_b_id)
edge_mapping[edge_id] = new_edge_id
return node_mapping, edge_mapping | Merges an ''addition_graph'' into the ''main_graph''.
Returns a tuple of dictionaries, mapping old node ids and edge ids to new ids. | Below is the the instruction that describes the task:
### Input:
Merges an ''addition_graph'' into the ''main_graph''.
Returns a tuple of dictionaries, mapping old node ids and edge ids to new ids.
### Response:
def merge_graphs(main_graph, addition_graph):
"""Merges an ''addition_graph'' into the ''main_graph''.
Returns a tuple of dictionaries, mapping old node ids and edge ids to new ids.
"""
node_mapping = {}
edge_mapping = {}
for node in addition_graph.get_all_node_objects():
node_id = node['id']
new_id = main_graph.new_node()
node_mapping[node_id] = new_id
for edge in addition_graph.get_all_edge_objects():
edge_id = edge['id']
old_vertex_a_id, old_vertex_b_id = edge['vertices']
new_vertex_a_id = node_mapping[old_vertex_a_id]
new_vertex_b_id = node_mapping[old_vertex_b_id]
new_edge_id = main_graph.new_edge(new_vertex_a_id, new_vertex_b_id)
edge_mapping[edge_id] = new_edge_id
return node_mapping, edge_mapping |
def _terminate(self):
'''Shutdown agent gently removing the descriptor and
notifying partners.'''
def generate_body():
d = defer.succeed(None)
d.addBoth(defer.drop_param, self.agent.shutdown_agent)
# Delete the descriptor
d.addBoth(lambda _: self.delete_document(self._descriptor))
return d
return self._terminate_procedure(generate_body) | Shutdown agent gently removing the descriptor and
notifying partners. | Below is the the instruction that describes the task:
### Input:
Shutdown agent gently removing the descriptor and
notifying partners.
### Response:
def _terminate(self):
'''Shutdown agent gently removing the descriptor and
notifying partners.'''
def generate_body():
d = defer.succeed(None)
d.addBoth(defer.drop_param, self.agent.shutdown_agent)
# Delete the descriptor
d.addBoth(lambda _: self.delete_document(self._descriptor))
return d
return self._terminate_procedure(generate_body) |
def _load_templates(workflow: dict, templates_root: str):
"""Load templates keys."""
workflow_template_path = join(templates_root, workflow['id'],
workflow['version'])
for i, stage_config in enumerate(workflow['stages']):
stage_template_path = join(workflow_template_path,
stage_config['id'],
stage_config['version'])
for config_type in ['ee_config', 'app_config']:
for key, value in stage_config[config_type].items():
if 'template' in key:
template_file = join(stage_template_path, value)
with open(template_file, 'r') as file:
template_str = file.read()
workflow['stages'][i][config_type][key] = template_str | Load templates keys. | Below is the the instruction that describes the task:
### Input:
Load templates keys.
### Response:
def _load_templates(workflow: dict, templates_root: str):
"""Load templates keys."""
workflow_template_path = join(templates_root, workflow['id'],
workflow['version'])
for i, stage_config in enumerate(workflow['stages']):
stage_template_path = join(workflow_template_path,
stage_config['id'],
stage_config['version'])
for config_type in ['ee_config', 'app_config']:
for key, value in stage_config[config_type].items():
if 'template' in key:
template_file = join(stage_template_path, value)
with open(template_file, 'r') as file:
template_str = file.read()
workflow['stages'][i][config_type][key] = template_str |
def make_op_return_tx(data, private_key,
blockchain_client=BlockchainInfoClient(), fee=OP_RETURN_FEE,
change_address=None, format='bin'):
""" Builds and signs an OP_RETURN transaction.
"""
# get out the private key object, sending address, and inputs
private_key_obj, from_address, inputs = analyze_private_key(private_key,
blockchain_client)
# get the change address
if not change_address:
change_address = from_address
# create the outputs
outputs = make_op_return_outputs(data, inputs, change_address,
fee=fee, format=format)
# serialize the transaction
unsigned_tx = serialize_transaction(inputs, outputs)
# generate a scriptSig for each input
for i in xrange(0, len(inputs)):
signed_tx = sign_transaction(unsigned_tx, i, private_key_obj.to_hex())
unsigned_tx = signed_tx
# return the signed tx
return signed_tx | Builds and signs an OP_RETURN transaction. | Below is the the instruction that describes the task:
### Input:
Builds and signs an OP_RETURN transaction.
### Response:
def make_op_return_tx(data, private_key,
blockchain_client=BlockchainInfoClient(), fee=OP_RETURN_FEE,
change_address=None, format='bin'):
""" Builds and signs an OP_RETURN transaction.
"""
# get out the private key object, sending address, and inputs
private_key_obj, from_address, inputs = analyze_private_key(private_key,
blockchain_client)
# get the change address
if not change_address:
change_address = from_address
# create the outputs
outputs = make_op_return_outputs(data, inputs, change_address,
fee=fee, format=format)
# serialize the transaction
unsigned_tx = serialize_transaction(inputs, outputs)
# generate a scriptSig for each input
for i in xrange(0, len(inputs)):
signed_tx = sign_transaction(unsigned_tx, i, private_key_obj.to_hex())
unsigned_tx = signed_tx
# return the signed tx
return signed_tx |
def table(T_table_world=RigidTransform(from_frame='table', to_frame='world'), dim=0.16, color=(0,0,0)):
"""Plot a table mesh in 3D.
Parameters
----------
T_table_world : autolab_core.RigidTransform
Pose of table relative to world.
dim : float
The side-length for the table.
color : 3-tuple
Color tuple.
"""
table_vertices = np.array([[ dim, dim, 0],
[ dim, -dim, 0],
[-dim, dim, 0],
[-dim, -dim, 0]]).astype('float')
table_tris = np.array([[0, 1, 2], [1, 2, 3]])
table_mesh = trimesh.Trimesh(table_vertices, table_tris)
table_mesh.apply_transform(T_table_world.matrix)
Visualizer3D.mesh(table_mesh, style='surface', smooth=True, color=color) | Plot a table mesh in 3D.
Parameters
----------
T_table_world : autolab_core.RigidTransform
Pose of table relative to world.
dim : float
The side-length for the table.
color : 3-tuple
Color tuple. | Below is the the instruction that describes the task:
### Input:
Plot a table mesh in 3D.
Parameters
----------
T_table_world : autolab_core.RigidTransform
Pose of table relative to world.
dim : float
The side-length for the table.
color : 3-tuple
Color tuple.
### Response:
def table(T_table_world=RigidTransform(from_frame='table', to_frame='world'), dim=0.16, color=(0,0,0)):
"""Plot a table mesh in 3D.
Parameters
----------
T_table_world : autolab_core.RigidTransform
Pose of table relative to world.
dim : float
The side-length for the table.
color : 3-tuple
Color tuple.
"""
table_vertices = np.array([[ dim, dim, 0],
[ dim, -dim, 0],
[-dim, dim, 0],
[-dim, -dim, 0]]).astype('float')
table_tris = np.array([[0, 1, 2], [1, 2, 3]])
table_mesh = trimesh.Trimesh(table_vertices, table_tris)
table_mesh.apply_transform(T_table_world.matrix)
Visualizer3D.mesh(table_mesh, style='surface', smooth=True, color=color) |
def get_string_polyglot_attack(self, obj):
"""
Return a polyglot attack containing the original object
"""
return self.polyglot_attacks[random.choice(self.config.techniques)] % obj | Return a polyglot attack containing the original object | Below is the the instruction that describes the task:
### Input:
Return a polyglot attack containing the original object
### Response:
def get_string_polyglot_attack(self, obj):
"""
Return a polyglot attack containing the original object
"""
return self.polyglot_attacks[random.choice(self.config.techniques)] % obj |
def get_depth_term(self, C, rup):
"""
Returns depth term (dependent on top of rupture depth) as given
in equations 1
Note that there is a ztor cap of 100 km that is introduced in the
Fortran code but not mentioned in the original paper!
"""
if rup.ztor > 100.0:
return C["bSLH"] * 100.0
else:
return C["bSLH"] * rup.ztor | Returns depth term (dependent on top of rupture depth) as given
in equations 1
Note that there is a ztor cap of 100 km that is introduced in the
Fortran code but not mentioned in the original paper! | Below is the the instruction that describes the task:
### Input:
Returns depth term (dependent on top of rupture depth) as given
in equations 1
Note that there is a ztor cap of 100 km that is introduced in the
Fortran code but not mentioned in the original paper!
### Response:
def get_depth_term(self, C, rup):
"""
Returns depth term (dependent on top of rupture depth) as given
in equations 1
Note that there is a ztor cap of 100 km that is introduced in the
Fortran code but not mentioned in the original paper!
"""
if rup.ztor > 100.0:
return C["bSLH"] * 100.0
else:
return C["bSLH"] * rup.ztor |
def reply(self, timeout=None):
"""
Returns the initial reply. This is emitted before any subscription
data is emitted. This function raises an exception if the subscription
attempt failed.
"""
self._wait_on_signal(self._response_received)
if self._response_exception is not None:
msg = self._response_exception.message
raise YamcsError(msg)
return self._response_reply | Returns the initial reply. This is emitted before any subscription
data is emitted. This function raises an exception if the subscription
attempt failed. | Below is the the instruction that describes the task:
### Input:
Returns the initial reply. This is emitted before any subscription
data is emitted. This function raises an exception if the subscription
attempt failed.
### Response:
def reply(self, timeout=None):
"""
Returns the initial reply. This is emitted before any subscription
data is emitted. This function raises an exception if the subscription
attempt failed.
"""
self._wait_on_signal(self._response_received)
if self._response_exception is not None:
msg = self._response_exception.message
raise YamcsError(msg)
return self._response_reply |
def create_full_tear_sheet(factor_data,
long_short=True,
group_neutral=False,
by_group=False):
"""
Creates a full tear sheet for analysis and evaluating single
return predicting (alpha) factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
long_short : bool
Should this computation happen on a long short portfolio?
- See tears.create_returns_tear_sheet for details on how this flag
affects returns analysis
group_neutral : bool
Should this computation happen on a group neutral portfolio?
- See tears.create_returns_tear_sheet for details on how this flag
affects returns analysis
- See tears.create_information_tear_sheet for details on how this
flag affects information analysis
by_group : bool
If True, display graphs separately for each group.
"""
plotting.plot_quantile_statistics_table(factor_data)
create_returns_tear_sheet(factor_data,
long_short,
group_neutral,
by_group,
set_context=False)
create_information_tear_sheet(factor_data,
group_neutral,
by_group,
set_context=False)
create_turnover_tear_sheet(factor_data, set_context=False) | Creates a full tear sheet for analysis and evaluating single
return predicting (alpha) factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
long_short : bool
Should this computation happen on a long short portfolio?
- See tears.create_returns_tear_sheet for details on how this flag
affects returns analysis
group_neutral : bool
Should this computation happen on a group neutral portfolio?
- See tears.create_returns_tear_sheet for details on how this flag
affects returns analysis
- See tears.create_information_tear_sheet for details on how this
flag affects information analysis
by_group : bool
If True, display graphs separately for each group. | Below is the the instruction that describes the task:
### Input:
Creates a full tear sheet for analysis and evaluating single
return predicting (alpha) factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
long_short : bool
Should this computation happen on a long short portfolio?
- See tears.create_returns_tear_sheet for details on how this flag
affects returns analysis
group_neutral : bool
Should this computation happen on a group neutral portfolio?
- See tears.create_returns_tear_sheet for details on how this flag
affects returns analysis
- See tears.create_information_tear_sheet for details on how this
flag affects information analysis
by_group : bool
If True, display graphs separately for each group.
### Response:
def create_full_tear_sheet(factor_data,
long_short=True,
group_neutral=False,
by_group=False):
"""
Creates a full tear sheet for analysis and evaluating single
return predicting (alpha) factor.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
long_short : bool
Should this computation happen on a long short portfolio?
- See tears.create_returns_tear_sheet for details on how this flag
affects returns analysis
group_neutral : bool
Should this computation happen on a group neutral portfolio?
- See tears.create_returns_tear_sheet for details on how this flag
affects returns analysis
- See tears.create_information_tear_sheet for details on how this
flag affects information analysis
by_group : bool
If True, display graphs separately for each group.
"""
plotting.plot_quantile_statistics_table(factor_data)
create_returns_tear_sheet(factor_data,
long_short,
group_neutral,
by_group,
set_context=False)
create_information_tear_sheet(factor_data,
group_neutral,
by_group,
set_context=False)
create_turnover_tear_sheet(factor_data, set_context=False) |
def execute(self, conn, app, release_version, pset_hash, output_label, global_tag, transaction = False):
"""
returns id for a given application
This always requires all four variables to be set, because
you better have them in blockInsert
"""
binds = {}
binds["app_name"]=app
binds["release_version"]=release_version
binds["pset_hash"]=pset_hash
binds["output_module_label"]=output_label
binds["global_tag"]=global_tag
result = self.dbi.processData(self.sql, binds, conn, transaction)
plist = self.formatDict(result)
if len(plist) < 1: return -1
return plist[0]["output_mod_config_id"] | returns id for a given application
This always requires all four variables to be set, because
you better have them in blockInsert | Below is the the instruction that describes the task:
### Input:
returns id for a given application
This always requires all four variables to be set, because
you better have them in blockInsert
### Response:
def execute(self, conn, app, release_version, pset_hash, output_label, global_tag, transaction = False):
"""
returns id for a given application
This always requires all four variables to be set, because
you better have them in blockInsert
"""
binds = {}
binds["app_name"]=app
binds["release_version"]=release_version
binds["pset_hash"]=pset_hash
binds["output_module_label"]=output_label
binds["global_tag"]=global_tag
result = self.dbi.processData(self.sql, binds, conn, transaction)
plist = self.formatDict(result)
if len(plist) < 1: return -1
return plist[0]["output_mod_config_id"] |
def write(self):
"""
Writes the ``.sln`` file to disk.
"""
filters = {
'MSGUID': lambda x: ('{%s}' % x).upper(),
'relslnfile': lambda x: os.path.relpath(x, os.path.dirname(self.FileName))
}
context = {
'sln': self
}
return self.render(self.__jinja_template__, self.FileName, context, filters) | Writes the ``.sln`` file to disk. | Below is the the instruction that describes the task:
### Input:
Writes the ``.sln`` file to disk.
### Response:
def write(self):
"""
Writes the ``.sln`` file to disk.
"""
filters = {
'MSGUID': lambda x: ('{%s}' % x).upper(),
'relslnfile': lambda x: os.path.relpath(x, os.path.dirname(self.FileName))
}
context = {
'sln': self
}
return self.render(self.__jinja_template__, self.FileName, context, filters) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.