code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def std(self) -> Optional[float]: #, ddof=0):
"""Standard deviation of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float
"""
# TODO: Add DOF
if self._stats:
return np.sqrt(self.variance())
else:
return None | Standard deviation of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float | Below is the the instruction that describes the task:
### Input:
Standard deviation of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float
### Response:
def std(self) -> Optional[float]: #, ddof=0):
"""Standard deviation of all values entered into histogram.
This number is precise, because we keep the necessary data
separate from bin contents.
Returns
-------
float
"""
# TODO: Add DOF
if self._stats:
return np.sqrt(self.variance())
else:
return None |
def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Fetch an attribute missing from the server's response.
For more information, see `Bugzilla #1237257
<https://bugzilla.redhat.com/show_bug.cgi?id=1237257>`_.
Add content_view_component to the response if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
content_view_component.
"""
if attrs is None:
attrs = self.read_json()
if _get_version(self._server_config) < Version('6.1'):
org = _get_org(self._server_config, attrs['organization']['label'])
attrs['organization'] = org.get_values()
if ignore is None:
ignore = set()
ignore.add('content_view_component')
result = super(ContentView, self).read(entity, attrs, ignore, params)
if 'content_view_components' in attrs and attrs['content_view_components']:
result.content_view_component = [
ContentViewComponent(
self._server_config,
composite_content_view=result.id,
id=content_view_component['id'],
)
for content_view_component in attrs['content_view_components']
]
return result | Fetch an attribute missing from the server's response.
For more information, see `Bugzilla #1237257
<https://bugzilla.redhat.com/show_bug.cgi?id=1237257>`_.
Add content_view_component to the response if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
content_view_component. | Below is the the instruction that describes the task:
### Input:
Fetch an attribute missing from the server's response.
For more information, see `Bugzilla #1237257
<https://bugzilla.redhat.com/show_bug.cgi?id=1237257>`_.
Add content_view_component to the response if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
content_view_component.
### Response:
def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Fetch an attribute missing from the server's response.
For more information, see `Bugzilla #1237257
<https://bugzilla.redhat.com/show_bug.cgi?id=1237257>`_.
Add content_view_component to the response if needed, as
:meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize
content_view_component.
"""
if attrs is None:
attrs = self.read_json()
if _get_version(self._server_config) < Version('6.1'):
org = _get_org(self._server_config, attrs['organization']['label'])
attrs['organization'] = org.get_values()
if ignore is None:
ignore = set()
ignore.add('content_view_component')
result = super(ContentView, self).read(entity, attrs, ignore, params)
if 'content_view_components' in attrs and attrs['content_view_components']:
result.content_view_component = [
ContentViewComponent(
self._server_config,
composite_content_view=result.id,
id=content_view_component['id'],
)
for content_view_component in attrs['content_view_components']
]
return result |
def configure(**kwargs):
"""Global configuration for event handling."""
for key in kwargs:
if key == 'is_logging_enabled':
Event.is_logging_enabled = kwargs[key]
elif key == 'collector_queue':
Event.collector_queue = kwargs[key]
else:
Logger.get_logger(__name__).error("Unknown key %s in configure or bad type %s",
key, type(kwargs[key])) | Global configuration for event handling. | Below is the the instruction that describes the task:
### Input:
Global configuration for event handling.
### Response:
def configure(**kwargs):
"""Global configuration for event handling."""
for key in kwargs:
if key == 'is_logging_enabled':
Event.is_logging_enabled = kwargs[key]
elif key == 'collector_queue':
Event.collector_queue = kwargs[key]
else:
Logger.get_logger(__name__).error("Unknown key %s in configure or bad type %s",
key, type(kwargs[key])) |
def set_path(dicts, keys, v):
""" Helper function for modifying nested dictionaries
:param dicts: dict: the given dictionary
:param keys: list str: path to added value
:param v: str: value to be added
Example:
>>> d = dict()
>>> set_path(d, ['a', 'b', 'c'], 'd')
>>> d
{'a': {'b': {'c': ['d']}}}
In case of duplicate paths, the additional value will
be added to the leaf node rather than simply replace it:
>>> set_path(d, ['a', 'b', 'c'], 'e')
>>> d
{'a': {'b': {'c': ['d', 'e']}}}
"""
for key in keys[:-1]:
dicts = dicts.setdefault(key, dict())
dicts = dicts.setdefault(keys[-1], list())
dicts.append(v) | Helper function for modifying nested dictionaries
:param dicts: dict: the given dictionary
:param keys: list str: path to added value
:param v: str: value to be added
Example:
>>> d = dict()
>>> set_path(d, ['a', 'b', 'c'], 'd')
>>> d
{'a': {'b': {'c': ['d']}}}
In case of duplicate paths, the additional value will
be added to the leaf node rather than simply replace it:
>>> set_path(d, ['a', 'b', 'c'], 'e')
>>> d
{'a': {'b': {'c': ['d', 'e']}}} | Below is the the instruction that describes the task:
### Input:
Helper function for modifying nested dictionaries
:param dicts: dict: the given dictionary
:param keys: list str: path to added value
:param v: str: value to be added
Example:
>>> d = dict()
>>> set_path(d, ['a', 'b', 'c'], 'd')
>>> d
{'a': {'b': {'c': ['d']}}}
In case of duplicate paths, the additional value will
be added to the leaf node rather than simply replace it:
>>> set_path(d, ['a', 'b', 'c'], 'e')
>>> d
{'a': {'b': {'c': ['d', 'e']}}}
### Response:
def set_path(dicts, keys, v):
""" Helper function for modifying nested dictionaries
:param dicts: dict: the given dictionary
:param keys: list str: path to added value
:param v: str: value to be added
Example:
>>> d = dict()
>>> set_path(d, ['a', 'b', 'c'], 'd')
>>> d
{'a': {'b': {'c': ['d']}}}
In case of duplicate paths, the additional value will
be added to the leaf node rather than simply replace it:
>>> set_path(d, ['a', 'b', 'c'], 'e')
>>> d
{'a': {'b': {'c': ['d', 'e']}}}
"""
for key in keys[:-1]:
dicts = dicts.setdefault(key, dict())
dicts = dicts.setdefault(keys[-1], list())
dicts.append(v) |
def summarize(objects):
"""Summarize an objects list.
Return a list of lists, whereas each row consists of::
[str(type), number of objects of this type, total size of these objects].
No guarantee regarding the order is given.
"""
count = {}
total_size = {}
for o in objects:
otype = _repr(o)
if otype in count:
count[otype] += 1
total_size[otype] += _getsizeof(o)
else:
count[otype] = 1
total_size[otype] = _getsizeof(o)
rows = []
for otype in count:
rows.append([otype, count[otype], total_size[otype]])
return rows | Summarize an objects list.
Return a list of lists, whereas each row consists of::
[str(type), number of objects of this type, total size of these objects].
No guarantee regarding the order is given. | Below is the the instruction that describes the task:
### Input:
Summarize an objects list.
Return a list of lists, whereas each row consists of::
[str(type), number of objects of this type, total size of these objects].
No guarantee regarding the order is given.
### Response:
def summarize(objects):
"""Summarize an objects list.
Return a list of lists, whereas each row consists of::
[str(type), number of objects of this type, total size of these objects].
No guarantee regarding the order is given.
"""
count = {}
total_size = {}
for o in objects:
otype = _repr(o)
if otype in count:
count[otype] += 1
total_size[otype] += _getsizeof(o)
else:
count[otype] = 1
total_size[otype] = _getsizeof(o)
rows = []
for otype in count:
rows.append([otype, count[otype], total_size[otype]])
return rows |
def delete(self, *args, **kwargs):
"""Handles updating the related `votes` and `score` fields attached to the model."""
# XXX: circular import
from fields import RatingField
qs = self.distinct().values_list('content_type', 'object_id').order_by('content_type')
to_update = []
for content_type, objects in itertools.groupby(qs, key=lambda x: x[0]):
model_class = ContentType.objects.get(pk=content_type).model_class()
if model_class:
to_update.extend(list(model_class.objects.filter(pk__in=list(objects)[0])))
retval = super(VoteQuerySet, self).delete(*args, **kwargs)
# TODO: this could be improved
for obj in to_update:
for field in getattr(obj, '_djangoratings', []):
getattr(obj, field.name)._update(commit=False)
obj.save()
return retval | Handles updating the related `votes` and `score` fields attached to the model. | Below is the the instruction that describes the task:
### Input:
Handles updating the related `votes` and `score` fields attached to the model.
### Response:
def delete(self, *args, **kwargs):
"""Handles updating the related `votes` and `score` fields attached to the model."""
# XXX: circular import
from fields import RatingField
qs = self.distinct().values_list('content_type', 'object_id').order_by('content_type')
to_update = []
for content_type, objects in itertools.groupby(qs, key=lambda x: x[0]):
model_class = ContentType.objects.get(pk=content_type).model_class()
if model_class:
to_update.extend(list(model_class.objects.filter(pk__in=list(objects)[0])))
retval = super(VoteQuerySet, self).delete(*args, **kwargs)
# TODO: this could be improved
for obj in to_update:
for field in getattr(obj, '_djangoratings', []):
getattr(obj, field.name)._update(commit=False)
obj.save()
return retval |
def config_(name: str, local: bool, package: str, section: str,
key: Optional[str]):
"""Extract or list values from config."""
cfg = config.read_configs(package, name, local=local)
if key:
with suppress(NoOptionError, NoSectionError):
echo(cfg.get(section, key))
else:
with suppress(NoSectionError):
for opt in cfg.options(section):
colourise.pinfo(opt)
echo(' {}'.format(cfg.get(section, opt))) | Extract or list values from config. | Below is the the instruction that describes the task:
### Input:
Extract or list values from config.
### Response:
def config_(name: str, local: bool, package: str, section: str,
key: Optional[str]):
"""Extract or list values from config."""
cfg = config.read_configs(package, name, local=local)
if key:
with suppress(NoOptionError, NoSectionError):
echo(cfg.get(section, key))
else:
with suppress(NoSectionError):
for opt in cfg.options(section):
colourise.pinfo(opt)
echo(' {}'.format(cfg.get(section, opt))) |
def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a blob from a container.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
if 'blob' not in kwargs:
raise SaltCloudSystemExit(
'A blob must be specified'
)
storageservice = _get_block_blob_service(kwargs)
storageservice.delete_blob(kwargs['container'], kwargs['blob'])
return True | Delete a blob from a container. | Below is the the instruction that describes the task:
### Input:
Delete a blob from a container.
### Response:
def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a blob from a container.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
if 'blob' not in kwargs:
raise SaltCloudSystemExit(
'A blob must be specified'
)
storageservice = _get_block_blob_service(kwargs)
storageservice.delete_blob(kwargs['container'], kwargs['blob'])
return True |
def ReplaceHomoglyphs(s):
"""Returns s with unicode homoglyphs replaced by ascii equivalents."""
homoglyphs = {
'\xa0': ' ', # ?
'\u00e3': '', # TODO(gsfowler) drop after .proto spurious char elided
'\u00a0': ' ', # ?
'\u00a9': '(C)', # COPYRIGHT SIGN (would you believe "asciiglyph"?)
'\u00ae': '(R)', # REGISTERED SIGN (would you believe "asciiglyph"?)
'\u2014': '-', # EM DASH
'\u2018': "'", # LEFT SINGLE QUOTATION MARK
'\u2019': "'", # RIGHT SINGLE QUOTATION MARK
'\u201c': '"', # LEFT DOUBLE QUOTATION MARK
'\u201d': '"', # RIGHT DOUBLE QUOTATION MARK
'\u2026': '...', # HORIZONTAL ELLIPSIS
'\u2e3a': '-', # TWO-EM DASH
}
def _ReplaceOne(c):
"""Returns the homoglyph or escaped replacement for c."""
equiv = homoglyphs.get(c)
if equiv is not None:
return equiv
try:
c.encode('ascii')
return c
except UnicodeError:
pass
try:
return c.encode('unicode-escape').decode('ascii')
except UnicodeError:
return '?'
return ''.join([_ReplaceOne(c) for c in s]) | Returns s with unicode homoglyphs replaced by ascii equivalents. | Below is the the instruction that describes the task:
### Input:
Returns s with unicode homoglyphs replaced by ascii equivalents.
### Response:
def ReplaceHomoglyphs(s):
"""Returns s with unicode homoglyphs replaced by ascii equivalents."""
homoglyphs = {
'\xa0': ' ', # ?
'\u00e3': '', # TODO(gsfowler) drop after .proto spurious char elided
'\u00a0': ' ', # ?
'\u00a9': '(C)', # COPYRIGHT SIGN (would you believe "asciiglyph"?)
'\u00ae': '(R)', # REGISTERED SIGN (would you believe "asciiglyph"?)
'\u2014': '-', # EM DASH
'\u2018': "'", # LEFT SINGLE QUOTATION MARK
'\u2019': "'", # RIGHT SINGLE QUOTATION MARK
'\u201c': '"', # LEFT DOUBLE QUOTATION MARK
'\u201d': '"', # RIGHT DOUBLE QUOTATION MARK
'\u2026': '...', # HORIZONTAL ELLIPSIS
'\u2e3a': '-', # TWO-EM DASH
}
def _ReplaceOne(c):
"""Returns the homoglyph or escaped replacement for c."""
equiv = homoglyphs.get(c)
if equiv is not None:
return equiv
try:
c.encode('ascii')
return c
except UnicodeError:
pass
try:
return c.encode('unicode-escape').decode('ascii')
except UnicodeError:
return '?'
return ''.join([_ReplaceOne(c) for c in s]) |
def generate_template(template_name, **context):
"""Load and generate a template."""
context.update(href=href, format_datetime=format_datetime)
return template_loader.load(template_name).generate(**context) | Load and generate a template. | Below is the the instruction that describes the task:
### Input:
Load and generate a template.
### Response:
def generate_template(template_name, **context):
"""Load and generate a template."""
context.update(href=href, format_datetime=format_datetime)
return template_loader.load(template_name).generate(**context) |
def graph_from_dot_file(path, encoding=None):
"""Load graphs from DOT file at `path`.
@param path: to DOT file
@param encoding: as passed to `io.open`.
For example, `'utf-8'`.
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
"""
with io.open(path, 'rt', encoding=encoding) as f:
s = f.read()
if not PY3:
s = unicode(s)
graphs = graph_from_dot_data(s)
return graphs | Load graphs from DOT file at `path`.
@param path: to DOT file
@param encoding: as passed to `io.open`.
For example, `'utf-8'`.
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot` | Below is the the instruction that describes the task:
### Input:
Load graphs from DOT file at `path`.
@param path: to DOT file
@param encoding: as passed to `io.open`.
For example, `'utf-8'`.
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
### Response:
def graph_from_dot_file(path, encoding=None):
"""Load graphs from DOT file at `path`.
@param path: to DOT file
@param encoding: as passed to `io.open`.
For example, `'utf-8'`.
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
"""
with io.open(path, 'rt', encoding=encoding) as f:
s = f.read()
if not PY3:
s = unicode(s)
graphs = graph_from_dot_data(s)
return graphs |
def _compile_prefixes(self):
'''
Create a dict of all OS prefixes and their compiled regexs
'''
self.compiled_prefixes = {}
for dev_os, os_config in self.config.items():
if not os_config:
continue
self.compiled_prefixes[dev_os] = []
for prefix in os_config.get('prefixes', []):
values = prefix.get('values', {})
line = prefix.get('line', '')
if prefix.get('__python_fun__'):
self.compiled_prefixes[dev_os].append({
'__python_fun__': prefix['__python_fun__'],
'__python_mod__': prefix['__python_mod__']
})
continue # if python profiler defined for this prefix,
# no need to go further, but jump to the next prefix
# Add 'pri' and 'message' to the line, and values
line = '{{pri}}{}{{message}}'.format(line)
# PRI https://tools.ietf.org/html/rfc5424#section-6.2.1
values['pri'] = r'\<(\d+)\>'
values['message'] = '(.*)'
# We will now figure out which position each value is in so we can use it with the match statement
position = {}
for key in values.keys():
position[line.find('{' + key + '}')] = key
sorted_position = {}
for i, elem in enumerate(sorted(position.items())):
sorted_position[elem[1]] = i + 1
# Escape the line, then remove the escape for the curly bracets so they can be used when formatting
escaped = re.escape(line).replace(r'\{', '{').replace(r'\}', '}')
# Replace a whitespace with \s+
escaped = escaped.replace(r'\ ', r'\s+')
self.compiled_prefixes[dev_os].append({
'prefix': re.compile(escaped.format(**values)),
'prefix_positions': sorted_position,
'raw_prefix': escaped.format(**values),
'values': values
}) | Create a dict of all OS prefixes and their compiled regexs | Below is the the instruction that describes the task:
### Input:
Create a dict of all OS prefixes and their compiled regexs
### Response:
def _compile_prefixes(self):
'''
Create a dict of all OS prefixes and their compiled regexs
'''
self.compiled_prefixes = {}
for dev_os, os_config in self.config.items():
if not os_config:
continue
self.compiled_prefixes[dev_os] = []
for prefix in os_config.get('prefixes', []):
values = prefix.get('values', {})
line = prefix.get('line', '')
if prefix.get('__python_fun__'):
self.compiled_prefixes[dev_os].append({
'__python_fun__': prefix['__python_fun__'],
'__python_mod__': prefix['__python_mod__']
})
continue # if python profiler defined for this prefix,
# no need to go further, but jump to the next prefix
# Add 'pri' and 'message' to the line, and values
line = '{{pri}}{}{{message}}'.format(line)
# PRI https://tools.ietf.org/html/rfc5424#section-6.2.1
values['pri'] = r'\<(\d+)\>'
values['message'] = '(.*)'
# We will now figure out which position each value is in so we can use it with the match statement
position = {}
for key in values.keys():
position[line.find('{' + key + '}')] = key
sorted_position = {}
for i, elem in enumerate(sorted(position.items())):
sorted_position[elem[1]] = i + 1
# Escape the line, then remove the escape for the curly bracets so they can be used when formatting
escaped = re.escape(line).replace(r'\{', '{').replace(r'\}', '}')
# Replace a whitespace with \s+
escaped = escaped.replace(r'\ ', r'\s+')
self.compiled_prefixes[dev_os].append({
'prefix': re.compile(escaped.format(**values)),
'prefix_positions': sorted_position,
'raw_prefix': escaped.format(**values),
'values': values
}) |
def read_limits(self, limit=None, page_size=None):
"""
Takes a limit on the max number of records to read and a max page_size
and calculates the max number of pages to read.
:param int limit: Max number of records to read.
:param int page_size: Max page size.
:return dict: A dictionary of paging limits.
"""
page_limit = values.unset
if limit is not None:
if page_size is None:
page_size = limit
page_limit = int(ceil(limit / float(page_size)))
return {
'limit': limit or values.unset,
'page_size': page_size or values.unset,
'page_limit': page_limit,
} | Takes a limit on the max number of records to read and a max page_size
and calculates the max number of pages to read.
:param int limit: Max number of records to read.
:param int page_size: Max page size.
:return dict: A dictionary of paging limits. | Below is the the instruction that describes the task:
### Input:
Takes a limit on the max number of records to read and a max page_size
and calculates the max number of pages to read.
:param int limit: Max number of records to read.
:param int page_size: Max page size.
:return dict: A dictionary of paging limits.
### Response:
def read_limits(self, limit=None, page_size=None):
"""
Takes a limit on the max number of records to read and a max page_size
and calculates the max number of pages to read.
:param int limit: Max number of records to read.
:param int page_size: Max page size.
:return dict: A dictionary of paging limits.
"""
page_limit = values.unset
if limit is not None:
if page_size is None:
page_size = limit
page_limit = int(ceil(limit / float(page_size)))
return {
'limit': limit or values.unset,
'page_size': page_size or values.unset,
'page_limit': page_limit,
} |
def container_rename_folder(object_id, input_params={}, always_retry=False, **kwargs):
"""
Invokes the /container-xxxx/renameFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FrenameFolder
"""
return DXHTTPRequest('/%s/renameFolder' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /container-xxxx/renameFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FrenameFolder | Below is the the instruction that describes the task:
### Input:
Invokes the /container-xxxx/renameFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FrenameFolder
### Response:
def container_rename_folder(object_id, input_params={}, always_retry=False, **kwargs):
"""
Invokes the /container-xxxx/renameFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FrenameFolder
"""
return DXHTTPRequest('/%s/renameFolder' % object_id, input_params, always_retry=always_retry, **kwargs) |
def _clean_post_content(blog_url, content):
"""
Replace import path with something relative to blog.
"""
content = re.sub(
"<img.src=\"%s(.*)\"" % blog_url,
lambda s: "<img src=\"%s\"" % _get_relative_upload(s.groups(1)[0]),
content)
return content | Replace import path with something relative to blog. | Below is the the instruction that describes the task:
### Input:
Replace import path with something relative to blog.
### Response:
def _clean_post_content(blog_url, content):
"""
Replace import path with something relative to blog.
"""
content = re.sub(
"<img.src=\"%s(.*)\"" % blog_url,
lambda s: "<img src=\"%s\"" % _get_relative_upload(s.groups(1)[0]),
content)
return content |
def clear_search_defaults(self, args=None):
"""
Clear all search defaults specified by the list of parameter names
given as ``args``. If ``args`` is not given, then clear all existing
search defaults.
Examples::
conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn'])
conn.clear_search_defaults(['scope'])
conn.clear_search_defaults()
"""
if args is None:
self._search_defaults.clear()
else:
for arg in args:
if arg in self._search_defaults:
del self._search_defaults[arg] | Clear all search defaults specified by the list of parameter names
given as ``args``. If ``args`` is not given, then clear all existing
search defaults.
Examples::
conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn'])
conn.clear_search_defaults(['scope'])
conn.clear_search_defaults() | Below is the the instruction that describes the task:
### Input:
Clear all search defaults specified by the list of parameter names
given as ``args``. If ``args`` is not given, then clear all existing
search defaults.
Examples::
conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn'])
conn.clear_search_defaults(['scope'])
conn.clear_search_defaults()
### Response:
def clear_search_defaults(self, args=None):
"""
Clear all search defaults specified by the list of parameter names
given as ``args``. If ``args`` is not given, then clear all existing
search defaults.
Examples::
conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn'])
conn.clear_search_defaults(['scope'])
conn.clear_search_defaults()
"""
if args is None:
self._search_defaults.clear()
else:
for arg in args:
if arg in self._search_defaults:
del self._search_defaults[arg] |
def _mapFuture(callable_, *iterables):
"""Similar to the built-in map function, but each of its
iteration will spawn a separate independent parallel Future that will run
either locally or remotely as `callable(*args)`.
:param callable: Any callable object (function or class object with *__call__*
method); this object will be called to execute each Future.
:param iterables: A tuple of iterable objects; each will be zipped
to form an iterable of arguments tuples that will be passed to the
callable object as a separate Future.
:returns: A list of Future objects, each corresponding to an iteration of
map.
On return, the Futures are pending execution locally, but may also be
transfered remotely depending on global load. Execution may be carried on
with any further computations. To retrieve the map results, you need to
either wait for or join with the spawned Futures. See functions waitAny,
waitAll, or joinAll. Alternatively, You may also use functions mapWait or
mapJoin that will wait or join before returning."""
childrenList = []
for args in zip(*iterables):
childrenList.append(submit(callable_, *args))
return childrenList | Similar to the built-in map function, but each of its
iteration will spawn a separate independent parallel Future that will run
either locally or remotely as `callable(*args)`.
:param callable: Any callable object (function or class object with *__call__*
method); this object will be called to execute each Future.
:param iterables: A tuple of iterable objects; each will be zipped
to form an iterable of arguments tuples that will be passed to the
callable object as a separate Future.
:returns: A list of Future objects, each corresponding to an iteration of
map.
On return, the Futures are pending execution locally, but may also be
transfered remotely depending on global load. Execution may be carried on
with any further computations. To retrieve the map results, you need to
either wait for or join with the spawned Futures. See functions waitAny,
waitAll, or joinAll. Alternatively, You may also use functions mapWait or
mapJoin that will wait or join before returning. | Below is the the instruction that describes the task:
### Input:
Similar to the built-in map function, but each of its
iteration will spawn a separate independent parallel Future that will run
either locally or remotely as `callable(*args)`.
:param callable: Any callable object (function or class object with *__call__*
method); this object will be called to execute each Future.
:param iterables: A tuple of iterable objects; each will be zipped
to form an iterable of arguments tuples that will be passed to the
callable object as a separate Future.
:returns: A list of Future objects, each corresponding to an iteration of
map.
On return, the Futures are pending execution locally, but may also be
transfered remotely depending on global load. Execution may be carried on
with any further computations. To retrieve the map results, you need to
either wait for or join with the spawned Futures. See functions waitAny,
waitAll, or joinAll. Alternatively, You may also use functions mapWait or
mapJoin that will wait or join before returning.
### Response:
def _mapFuture(callable_, *iterables):
"""Similar to the built-in map function, but each of its
iteration will spawn a separate independent parallel Future that will run
either locally or remotely as `callable(*args)`.
:param callable: Any callable object (function or class object with *__call__*
method); this object will be called to execute each Future.
:param iterables: A tuple of iterable objects; each will be zipped
to form an iterable of arguments tuples that will be passed to the
callable object as a separate Future.
:returns: A list of Future objects, each corresponding to an iteration of
map.
On return, the Futures are pending execution locally, but may also be
transfered remotely depending on global load. Execution may be carried on
with any further computations. To retrieve the map results, you need to
either wait for or join with the spawned Futures. See functions waitAny,
waitAll, or joinAll. Alternatively, You may also use functions mapWait or
mapJoin that will wait or join before returning."""
childrenList = []
for args in zip(*iterables):
childrenList.append(submit(callable_, *args))
return childrenList |
def disassemble_string(self, lpAddress, code):
"""
Disassemble instructions from a block of binary code.
@type lpAddress: int
@param lpAddress: Memory address where the code was read from.
@type code: str
@param code: Binary code to disassemble.
@rtype: list of tuple( long, int, str, str )
@return: List of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise NotImplementedError:
No compatible disassembler was found for the current platform.
"""
try:
disasm = self.__disasm
except AttributeError:
disasm = self.__disasm = Disassembler( self.get_arch() )
return disasm.decode(lpAddress, code) | Disassemble instructions from a block of binary code.
@type lpAddress: int
@param lpAddress: Memory address where the code was read from.
@type code: str
@param code: Binary code to disassemble.
@rtype: list of tuple( long, int, str, str )
@return: List of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise NotImplementedError:
No compatible disassembler was found for the current platform. | Below is the the instruction that describes the task:
### Input:
Disassemble instructions from a block of binary code.
@type lpAddress: int
@param lpAddress: Memory address where the code was read from.
@type code: str
@param code: Binary code to disassemble.
@rtype: list of tuple( long, int, str, str )
@return: List of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise NotImplementedError:
No compatible disassembler was found for the current platform.
### Response:
def disassemble_string(self, lpAddress, code):
"""
Disassemble instructions from a block of binary code.
@type lpAddress: int
@param lpAddress: Memory address where the code was read from.
@type code: str
@param code: Binary code to disassemble.
@rtype: list of tuple( long, int, str, str )
@return: List of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise NotImplementedError:
No compatible disassembler was found for the current platform.
"""
try:
disasm = self.__disasm
except AttributeError:
disasm = self.__disasm = Disassembler( self.get_arch() )
return disasm.decode(lpAddress, code) |
def cmd_fernet_genkey(writecfg):
"""Generate a new Fernet Key, optionally write it to ~/.habu.json
Example:
\b
$ habu.fernet.genkey
xgvWCIvjwe9Uq7NBvwO796iI4dsGD623QOT9GWqnuhg=
"""
key = Fernet.generate_key()
print(key.decode())
if writecfg:
habucfg = loadcfg(environment=False)
habucfg['FERNET_KEY'] = key.decode()
with Path('~/.habu.json').expanduser().open('w') as f:
f.write(json.dumps(habucfg, indent=4, sort_keys=True)) | Generate a new Fernet Key, optionally write it to ~/.habu.json
Example:
\b
$ habu.fernet.genkey
xgvWCIvjwe9Uq7NBvwO796iI4dsGD623QOT9GWqnuhg= | Below is the the instruction that describes the task:
### Input:
Generate a new Fernet Key, optionally write it to ~/.habu.json
Example:
\b
$ habu.fernet.genkey
xgvWCIvjwe9Uq7NBvwO796iI4dsGD623QOT9GWqnuhg=
### Response:
def cmd_fernet_genkey(writecfg):
"""Generate a new Fernet Key, optionally write it to ~/.habu.json
Example:
\b
$ habu.fernet.genkey
xgvWCIvjwe9Uq7NBvwO796iI4dsGD623QOT9GWqnuhg=
"""
key = Fernet.generate_key()
print(key.decode())
if writecfg:
habucfg = loadcfg(environment=False)
habucfg['FERNET_KEY'] = key.decode()
with Path('~/.habu.json').expanduser().open('w') as f:
f.write(json.dumps(habucfg, indent=4, sort_keys=True)) |
def iso8601_date(d):
"""
Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date
"""
if d == values.unset:
return d
elif isinstance(d, datetime.datetime):
return str(d.date())
elif isinstance(d, datetime.date):
return str(d)
elif isinstance(d, str):
return d | Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date | Below is the the instruction that describes the task:
### Input:
Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date
### Response:
def iso8601_date(d):
"""
Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date
"""
if d == values.unset:
return d
elif isinstance(d, datetime.datetime):
return str(d.date())
elif isinstance(d, datetime.date):
return str(d)
elif isinstance(d, str):
return d |
def get_items(self) -> Iterator[StoryItem]:
"""Retrieve all items from a story."""
yield from (StoryItem(self._context, item, self.owner_profile) for item in reversed(self._node['items'])) | Retrieve all items from a story. | Below is the the instruction that describes the task:
### Input:
Retrieve all items from a story.
### Response:
def get_items(self) -> Iterator[StoryItem]:
"""Retrieve all items from a story."""
yield from (StoryItem(self._context, item, self.owner_profile) for item in reversed(self._node['items'])) |
def main(output):
"""
Generate a c7n-org gcp projects config file
"""
client = Session().client('cloudresourcemanager', 'v1', 'projects')
results = []
for page in client.execute_paged_query('list', {}):
for project in page.get('projects', []):
if project['lifecycleState'] != 'ACTIVE':
continue
project_info = {
'project_id': project['projectId'],
'name': project['name'],
}
if 'labels' in project:
project_info['tags'] = [
'label:%s:%s' % (k, v) for k, v in project.get('labels', {}).items()]
results.append(project_info)
output.write(
yaml.safe_dump({'projects': results}, default_flow_style=False)) | Generate a c7n-org gcp projects config file | Below is the the instruction that describes the task:
### Input:
Generate a c7n-org gcp projects config file
### Response:
def main(output):
"""
Generate a c7n-org gcp projects config file
"""
client = Session().client('cloudresourcemanager', 'v1', 'projects')
results = []
for page in client.execute_paged_query('list', {}):
for project in page.get('projects', []):
if project['lifecycleState'] != 'ACTIVE':
continue
project_info = {
'project_id': project['projectId'],
'name': project['name'],
}
if 'labels' in project:
project_info['tags'] = [
'label:%s:%s' % (k, v) for k, v in project.get('labels', {}).items()]
results.append(project_info)
output.write(
yaml.safe_dump({'projects': results}, default_flow_style=False)) |
def comment_create(self, post_id, body, do_not_bump_post=None):
"""Action to lets you create a comment (Requires login).
Parameters:
post_id (int):
body (str):
do_not_bump_post (bool): Set to 1 if you do not want the post to be
bumped to the top of the comment listing.
"""
params = {
'comment[post_id]': post_id,
'comment[body]': body,
'comment[do_not_bump_post]': do_not_bump_post
}
return self._get('comments.json', params, 'POST', auth=True) | Action to lets you create a comment (Requires login).
Parameters:
post_id (int):
body (str):
do_not_bump_post (bool): Set to 1 if you do not want the post to be
bumped to the top of the comment listing. | Below is the the instruction that describes the task:
### Input:
Action to lets you create a comment (Requires login).
Parameters:
post_id (int):
body (str):
do_not_bump_post (bool): Set to 1 if you do not want the post to be
bumped to the top of the comment listing.
### Response:
def comment_create(self, post_id, body, do_not_bump_post=None):
"""Action to lets you create a comment (Requires login).
Parameters:
post_id (int):
body (str):
do_not_bump_post (bool): Set to 1 if you do not want the post to be
bumped to the top of the comment listing.
"""
params = {
'comment[post_id]': post_id,
'comment[body]': body,
'comment[do_not_bump_post]': do_not_bump_post
}
return self._get('comments.json', params, 'POST', auth=True) |
def set_meta(mcs, bases, attr):
"""
Get all of the ``Meta`` classes from bases and combine them with this
class.
Pops or creates ``Meta`` from attributes, combines all bases, adds
``_meta`` to attributes with all meta
:param bases: bases of this class
:param attr: class attributes
:return: attributes with ``Meta`` class from combined parents
"""
# pop the meta class from the attributes
meta = attr.pop(mcs._meta_cls, types.ClassType(mcs._meta_cls, (), {}))
# get a list of the meta public class attributes
meta_attrs = get_public_attributes(meta)
# check all bases for meta
for base in bases:
base_meta = getattr(base, mcs._meta_cls, None)
# skip if base has no meta
if base_meta is None:
continue
# loop over base meta
for a in get_public_attributes(base_meta, as_list=False):
# skip if already in meta
if a in meta_attrs:
continue
# copy meta-option attribute from base
setattr(meta, a, getattr(base_meta, a))
attr[mcs._meta_attr] = meta # set _meta combined from bases
return attr | Get all of the ``Meta`` classes from bases and combine them with this
class.
Pops or creates ``Meta`` from attributes, combines all bases, adds
``_meta`` to attributes with all meta
:param bases: bases of this class
:param attr: class attributes
:return: attributes with ``Meta`` class from combined parents | Below is the the instruction that describes the task:
### Input:
Get all of the ``Meta`` classes from bases and combine them with this
class.
Pops or creates ``Meta`` from attributes, combines all bases, adds
``_meta`` to attributes with all meta
:param bases: bases of this class
:param attr: class attributes
:return: attributes with ``Meta`` class from combined parents
### Response:
def set_meta(mcs, bases, attr):
"""
Get all of the ``Meta`` classes from bases and combine them with this
class.
Pops or creates ``Meta`` from attributes, combines all bases, adds
``_meta`` to attributes with all meta
:param bases: bases of this class
:param attr: class attributes
:return: attributes with ``Meta`` class from combined parents
"""
# pop the meta class from the attributes
meta = attr.pop(mcs._meta_cls, types.ClassType(mcs._meta_cls, (), {}))
# get a list of the meta public class attributes
meta_attrs = get_public_attributes(meta)
# check all bases for meta
for base in bases:
base_meta = getattr(base, mcs._meta_cls, None)
# skip if base has no meta
if base_meta is None:
continue
# loop over base meta
for a in get_public_attributes(base_meta, as_list=False):
# skip if already in meta
if a in meta_attrs:
continue
# copy meta-option attribute from base
setattr(meta, a, getattr(base_meta, a))
attr[mcs._meta_attr] = meta # set _meta combined from bases
return attr |
def _match_national_number(number, number_desc, allow_prefix_match):
"""Returns whether the given national number (a string containing only decimal digits) matches
the national number pattern defined in the given PhoneNumberDesc object.
"""
# We don't want to consider it a prefix match when matching non-empty input against an empty
# pattern.
if number_desc is None or number_desc.national_number_pattern is None or len(number_desc.national_number_pattern) == 0:
return False
return _match(number, re.compile(number_desc.national_number_pattern), allow_prefix_match) | Returns whether the given national number (a string containing only decimal digits) matches
the national number pattern defined in the given PhoneNumberDesc object. | Below is the the instruction that describes the task:
### Input:
Returns whether the given national number (a string containing only decimal digits) matches
the national number pattern defined in the given PhoneNumberDesc object.
### Response:
def _match_national_number(number, number_desc, allow_prefix_match):
"""Returns whether the given national number (a string containing only decimal digits) matches
the national number pattern defined in the given PhoneNumberDesc object.
"""
# We don't want to consider it a prefix match when matching non-empty input against an empty
# pattern.
if number_desc is None or number_desc.national_number_pattern is None or len(number_desc.national_number_pattern) == 0:
return False
return _match(number, re.compile(number_desc.national_number_pattern), allow_prefix_match) |
def ExportingCursorWrapper(cursor_class, alias, vendor):
"""Returns a CursorWrapper class that knows its database's alias and
vendor name.
"""
class CursorWrapper(cursor_class):
"""Extends the base CursorWrapper to count events."""
def execute(self, *args, **kwargs):
execute_total.labels(alias, vendor).inc()
with ExceptionCounterByType(errors_total, extra_labels={
'alias': alias, 'vendor': vendor}):
return super(CursorWrapper, self).execute(*args, **kwargs)
def executemany(self, query, param_list, *args, **kwargs):
execute_total.labels(alias, vendor).inc(len(param_list))
execute_many_total.labels(alias, vendor).inc(len(param_list))
with ExceptionCounterByType(errors_total, extra_labels={
'alias': alias, 'vendor': vendor}):
return super(CursorWrapper, self).executemany(
query, param_list, *args, **kwargs)
return CursorWrapper | Returns a CursorWrapper class that knows its database's alias and
vendor name. | Below is the the instruction that describes the task:
### Input:
Returns a CursorWrapper class that knows its database's alias and
vendor name.
### Response:
def ExportingCursorWrapper(cursor_class, alias, vendor):
"""Returns a CursorWrapper class that knows its database's alias and
vendor name.
"""
class CursorWrapper(cursor_class):
"""Extends the base CursorWrapper to count events."""
def execute(self, *args, **kwargs):
execute_total.labels(alias, vendor).inc()
with ExceptionCounterByType(errors_total, extra_labels={
'alias': alias, 'vendor': vendor}):
return super(CursorWrapper, self).execute(*args, **kwargs)
def executemany(self, query, param_list, *args, **kwargs):
execute_total.labels(alias, vendor).inc(len(param_list))
execute_many_total.labels(alias, vendor).inc(len(param_list))
with ExceptionCounterByType(errors_total, extra_labels={
'alias': alias, 'vendor': vendor}):
return super(CursorWrapper, self).executemany(
query, param_list, *args, **kwargs)
return CursorWrapper |
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals) | Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True. | Below is the the instruction that describes the task:
### Input:
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
### Response:
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals) |
def gradient(self):
"""Gradient operator of the functional."""
functional = self
class KLCrossEntCCGradient(Operator):
"""The gradient operator of this functional."""
def __init__(self):
"""Initialize a new instance."""
super(KLCrossEntCCGradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point."""
if functional.prior is None:
return self.domain.element(np.exp(x))
else:
return functional.prior * np.exp(x)
return KLCrossEntCCGradient() | Gradient operator of the functional. | Below is the the instruction that describes the task:
### Input:
Gradient operator of the functional.
### Response:
def gradient(self):
"""Gradient operator of the functional."""
functional = self
class KLCrossEntCCGradient(Operator):
"""The gradient operator of this functional."""
def __init__(self):
"""Initialize a new instance."""
super(KLCrossEntCCGradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point."""
if functional.prior is None:
return self.domain.element(np.exp(x))
else:
return functional.prior * np.exp(x)
return KLCrossEntCCGradient() |
def check_proxy_code(self, address) -> bool:
"""
Check if proxy is valid
:param address: address of the proxy
:return: True if proxy is valid, False otherwise
"""
deployed_proxy_code = self.w3.eth.getCode(address)
proxy_code_fns = (get_paying_proxy_deployed_bytecode,
get_proxy_factory_contract(self.w3,
self.proxy_factory_address).functions.proxyRuntimeCode().call)
for proxy_code_fn in proxy_code_fns:
if deployed_proxy_code == proxy_code_fn():
return True
return False | Check if proxy is valid
:param address: address of the proxy
:return: True if proxy is valid, False otherwise | Below is the the instruction that describes the task:
### Input:
Check if proxy is valid
:param address: address of the proxy
:return: True if proxy is valid, False otherwise
### Response:
def check_proxy_code(self, address) -> bool:
"""
Check if proxy is valid
:param address: address of the proxy
:return: True if proxy is valid, False otherwise
"""
deployed_proxy_code = self.w3.eth.getCode(address)
proxy_code_fns = (get_paying_proxy_deployed_bytecode,
get_proxy_factory_contract(self.w3,
self.proxy_factory_address).functions.proxyRuntimeCode().call)
for proxy_code_fn in proxy_code_fns:
if deployed_proxy_code == proxy_code_fn():
return True
return False |
def remove_opinion_layer(self):
"""
Removes the opinion layer (if exists) of the object (in memory)
"""
if self.opinion_layer is not None:
this_node = self.opinion_layer.get_node()
self.root.remove(this_node)
self.opinion_layer = None
if self.header is not None:
self.header.remove_lp('opinions') | Removes the opinion layer (if exists) of the object (in memory) | Below is the the instruction that describes the task:
### Input:
Removes the opinion layer (if exists) of the object (in memory)
### Response:
def remove_opinion_layer(self):
"""
Removes the opinion layer (if exists) of the object (in memory)
"""
if self.opinion_layer is not None:
this_node = self.opinion_layer.get_node()
self.root.remove(this_node)
self.opinion_layer = None
if self.header is not None:
self.header.remove_lp('opinions') |
def is_valid_hendecasyllables(self, scanned_line: str) -> bool:
"""Determine if a scansion pattern is one of the valid Hendecasyllables metrical patterns
:param scanned_line: a line containing a sequence of stressed and unstressed syllables
:return bool
>>> print(MetricalValidator().is_valid_hendecasyllables("-U-UU-U-U-U"))
True
"""
line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "")
line = line.replace(" ", "")
if len(line) < 11:
return False
line = line[:-1] + self.constants.OPTIONAL_ENDING
return self.VALID_HENDECASYLLABLES.__contains__(line) | Determine if a scansion pattern is one of the valid Hendecasyllables metrical patterns
:param scanned_line: a line containing a sequence of stressed and unstressed syllables
:return bool
>>> print(MetricalValidator().is_valid_hendecasyllables("-U-UU-U-U-U"))
True | Below is the the instruction that describes the task:
### Input:
Determine if a scansion pattern is one of the valid Hendecasyllables metrical patterns
:param scanned_line: a line containing a sequence of stressed and unstressed syllables
:return bool
>>> print(MetricalValidator().is_valid_hendecasyllables("-U-UU-U-U-U"))
True
### Response:
def is_valid_hendecasyllables(self, scanned_line: str) -> bool:
"""Determine if a scansion pattern is one of the valid Hendecasyllables metrical patterns
:param scanned_line: a line containing a sequence of stressed and unstressed syllables
:return bool
>>> print(MetricalValidator().is_valid_hendecasyllables("-U-UU-U-U-U"))
True
"""
line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "")
line = line.replace(" ", "")
if len(line) < 11:
return False
line = line[:-1] + self.constants.OPTIONAL_ENDING
return self.VALID_HENDECASYLLABLES.__contains__(line) |
def int80(self, cpu):
"""
32 bit dispatcher.
:param cpu: current CPU.
_terminate, transmit, receive, fdwait, allocate, deallocate and random
"""
syscalls = {0x00000001: self.sys_terminate,
0x00000002: self.sys_transmit,
0x00000003: self.sys_receive,
0x00000004: self.sys_fdwait,
0x00000005: self.sys_allocate,
0x00000006: self.sys_deallocate,
0x00000007: self.sys_random,
}
if cpu.EAX not in syscalls.keys():
raise TerminateState(f"32 bit DECREE system call number {cpu.EAX} Not Implemented")
func = syscalls[cpu.EAX]
logger.debug("SYSCALL32: %s (nargs: %d)", func.__name__, func.__code__.co_argcount)
nargs = func.__code__.co_argcount
args = [cpu, cpu.EBX, cpu.ECX, cpu.EDX, cpu.ESI, cpu.EDI, cpu.EBP]
cpu.EAX = func(*args[:nargs - 1]) | 32 bit dispatcher.
:param cpu: current CPU.
_terminate, transmit, receive, fdwait, allocate, deallocate and random | Below is the the instruction that describes the task:
### Input:
32 bit dispatcher.
:param cpu: current CPU.
_terminate, transmit, receive, fdwait, allocate, deallocate and random
### Response:
def int80(self, cpu):
"""
32 bit dispatcher.
:param cpu: current CPU.
_terminate, transmit, receive, fdwait, allocate, deallocate and random
"""
syscalls = {0x00000001: self.sys_terminate,
0x00000002: self.sys_transmit,
0x00000003: self.sys_receive,
0x00000004: self.sys_fdwait,
0x00000005: self.sys_allocate,
0x00000006: self.sys_deallocate,
0x00000007: self.sys_random,
}
if cpu.EAX not in syscalls.keys():
raise TerminateState(f"32 bit DECREE system call number {cpu.EAX} Not Implemented")
func = syscalls[cpu.EAX]
logger.debug("SYSCALL32: %s (nargs: %d)", func.__name__, func.__code__.co_argcount)
nargs = func.__code__.co_argcount
args = [cpu, cpu.EBX, cpu.ECX, cpu.EDX, cpu.ESI, cpu.EDI, cpu.EBP]
cpu.EAX = func(*args[:nargs - 1]) |
def aes_cbc_no_padding_decrypt(key, data, iv):
"""
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no
padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt('aes', key, data, iv, False) | Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no
padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext | Below is the the instruction that describes the task:
### Input:
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no
padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
### Response:
def aes_cbc_no_padding_decrypt(key, data, iv):
"""
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no
padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt('aes', key, data, iv, False) |
def heron_class(class_name, lib_jars, extra_jars=None, args=None, java_defines=None):
'''
Execute a heron class given the args and the jars needed for class path
:param class_name:
:param lib_jars:
:param extra_jars:
:param args:
:param java_defines:
:return:
'''
# default optional params to empty list if not provided
if extra_jars is None:
extra_jars = []
if args is None:
args = []
if java_defines is None:
java_defines = []
# Format all java -D options that need to be passed while running
# the class locally.
java_opts = ['-D' + opt for opt in java_defines]
# Construct the command line for the sub process to run
# Because of the way Python execute works,
# the java opts must be passed as part of the list
all_args = [config.get_java_path(), "-client", "-Xmx1g"] + \
java_opts + \
["-cp", config.get_classpath(extra_jars + lib_jars)]
all_args += [class_name] + list(args)
# set heron_config environment variable
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
# print the verbose message
Log.debug("Invoking class using command: ``%s''", ' '.join(all_args))
Log.debug("Heron options: {%s}", str(heron_env["HERON_OPTIONS"]))
# invoke the command with subprocess and print error message, if any
process = subprocess.Popen(all_args, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=1)
# stdout message has the information Java program sends back
# stderr message has extra information, such as debugging message
return ProcessResult(process) | Execute a heron class given the args and the jars needed for class path
:param class_name:
:param lib_jars:
:param extra_jars:
:param args:
:param java_defines:
:return: | Below is the the instruction that describes the task:
### Input:
Execute a heron class given the args and the jars needed for class path
:param class_name:
:param lib_jars:
:param extra_jars:
:param args:
:param java_defines:
:return:
### Response:
def heron_class(class_name, lib_jars, extra_jars=None, args=None, java_defines=None):
'''
Execute a heron class given the args and the jars needed for class path
:param class_name:
:param lib_jars:
:param extra_jars:
:param args:
:param java_defines:
:return:
'''
# default optional params to empty list if not provided
if extra_jars is None:
extra_jars = []
if args is None:
args = []
if java_defines is None:
java_defines = []
# Format all java -D options that need to be passed while running
# the class locally.
java_opts = ['-D' + opt for opt in java_defines]
# Construct the command line for the sub process to run
# Because of the way Python execute works,
# the java opts must be passed as part of the list
all_args = [config.get_java_path(), "-client", "-Xmx1g"] + \
java_opts + \
["-cp", config.get_classpath(extra_jars + lib_jars)]
all_args += [class_name] + list(args)
# set heron_config environment variable
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
# print the verbose message
Log.debug("Invoking class using command: ``%s''", ' '.join(all_args))
Log.debug("Heron options: {%s}", str(heron_env["HERON_OPTIONS"]))
# invoke the command with subprocess and print error message, if any
process = subprocess.Popen(all_args, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=1)
# stdout message has the information Java program sends back
# stderr message has extra information, such as debugging message
return ProcessResult(process) |
def run_checked (cmd, ret_ok=(0,), **kwargs):
"""Run command and raise PatoolError on error."""
retcode = run(cmd, **kwargs)
if retcode not in ret_ok:
msg = "Command `%s' returned non-zero exit status %d" % (cmd, retcode)
raise PatoolError(msg)
return retcode | Run command and raise PatoolError on error. | Below is the the instruction that describes the task:
### Input:
Run command and raise PatoolError on error.
### Response:
def run_checked (cmd, ret_ok=(0,), **kwargs):
"""Run command and raise PatoolError on error."""
retcode = run(cmd, **kwargs)
if retcode not in ret_ok:
msg = "Command `%s' returned non-zero exit status %d" % (cmd, retcode)
raise PatoolError(msg)
return retcode |
def setup(self, address, port):
"""Connects to server at `address`:`port`.
Connects to a TCP server listening at `address`:`port` that implements
the protocol described in the file "Generic TCP I:O Protocol.md"
@arg address IP or address to connect to.
@arg port port to connect to.
@throw RuntimeError if connection was successiful but protocol isn't
supported.
@throw any exception thrown by `socket.socket`'s methods.
"""
address = str(address)
port = int(port)
self._socket = socket.socket()
self._socket.connect((address, port))
self._socket.send(b'HELLO 1.0\n')
with self._socket.makefile() as f:
if f.readline().strip() != 'OK':
raise RuntimeError('Protocol not supported') | Connects to server at `address`:`port`.
Connects to a TCP server listening at `address`:`port` that implements
the protocol described in the file "Generic TCP I:O Protocol.md"
@arg address IP or address to connect to.
@arg port port to connect to.
@throw RuntimeError if connection was successiful but protocol isn't
supported.
@throw any exception thrown by `socket.socket`'s methods. | Below is the the instruction that describes the task:
### Input:
Connects to server at `address`:`port`.
Connects to a TCP server listening at `address`:`port` that implements
the protocol described in the file "Generic TCP I:O Protocol.md"
@arg address IP or address to connect to.
@arg port port to connect to.
@throw RuntimeError if connection was successiful but protocol isn't
supported.
@throw any exception thrown by `socket.socket`'s methods.
### Response:
def setup(self, address, port):
"""Connects to server at `address`:`port`.
Connects to a TCP server listening at `address`:`port` that implements
the protocol described in the file "Generic TCP I:O Protocol.md"
@arg address IP or address to connect to.
@arg port port to connect to.
@throw RuntimeError if connection was successiful but protocol isn't
supported.
@throw any exception thrown by `socket.socket`'s methods.
"""
address = str(address)
port = int(port)
self._socket = socket.socket()
self._socket.connect((address, port))
self._socket.send(b'HELLO 1.0\n')
with self._socket.makefile() as f:
if f.readline().strip() != 'OK':
raise RuntimeError('Protocol not supported') |
def _handle_ls(self):
""" Handles the ls subsubcommand. """
try:
arg1 = self.argument(1)
arg2 = self.argument(2)
todos = []
if arg2 == 'to' or arg1 == 'before':
# dep ls 1 to OR dep ls before 1
number = arg1 if arg2 == 'to' else arg2
todo = self.todolist.todo(number)
todos = self.todolist.children(todo)
elif arg1 in {'to', 'after'}:
# dep ls to 1 OR dep ls after 1
number = arg2
todo = self.todolist.todo(number)
todos = self.todolist.parents(todo)
else:
raise InvalidCommandArgument
sorter = Sorter(config().sort_string())
instance_filter = Filter.InstanceFilter(todos)
view = View(sorter, [instance_filter], self.todolist)
self.out(self.printer.print_list(view.todos))
except InvalidTodoException:
self.error("Invalid todo number given.")
except InvalidCommandArgument:
self.error(self.usage()) | Handles the ls subsubcommand. | Below is the the instruction that describes the task:
### Input:
Handles the ls subsubcommand.
### Response:
def _handle_ls(self):
""" Handles the ls subsubcommand. """
try:
arg1 = self.argument(1)
arg2 = self.argument(2)
todos = []
if arg2 == 'to' or arg1 == 'before':
# dep ls 1 to OR dep ls before 1
number = arg1 if arg2 == 'to' else arg2
todo = self.todolist.todo(number)
todos = self.todolist.children(todo)
elif arg1 in {'to', 'after'}:
# dep ls to 1 OR dep ls after 1
number = arg2
todo = self.todolist.todo(number)
todos = self.todolist.parents(todo)
else:
raise InvalidCommandArgument
sorter = Sorter(config().sort_string())
instance_filter = Filter.InstanceFilter(todos)
view = View(sorter, [instance_filter], self.todolist)
self.out(self.printer.print_list(view.todos))
except InvalidTodoException:
self.error("Invalid todo number given.")
except InvalidCommandArgument:
self.error(self.usage()) |
def trifurcate_base(cls, newick):
""" Rewrites a newick string so that the base is a trifurcation
(usually means an unrooted tree) """
t = cls(newick)
t._tree.deroot()
return t.newick | Rewrites a newick string so that the base is a trifurcation
(usually means an unrooted tree) | Below is the the instruction that describes the task:
### Input:
Rewrites a newick string so that the base is a trifurcation
(usually means an unrooted tree)
### Response:
def trifurcate_base(cls, newick):
""" Rewrites a newick string so that the base is a trifurcation
(usually means an unrooted tree) """
t = cls(newick)
t._tree.deroot()
return t.newick |
def run(self):
"""
Starts a development server for the zengine application
"""
from zengine.wf_daemon import run_workers, Worker
worker_count = int(self.manager.args.workers or 1)
if not self.manager.args.daemonize:
print("Starting worker(s)")
if worker_count > 1 or self.manager.args.autoreload:
run_workers(worker_count,
self.manager.args.paths.split(' '),
self.manager.args.daemonize)
else:
worker = Worker()
worker.run() | Starts a development server for the zengine application | Below is the the instruction that describes the task:
### Input:
Starts a development server for the zengine application
### Response:
def run(self):
"""
Starts a development server for the zengine application
"""
from zengine.wf_daemon import run_workers, Worker
worker_count = int(self.manager.args.workers or 1)
if not self.manager.args.daemonize:
print("Starting worker(s)")
if worker_count > 1 or self.manager.args.autoreload:
run_workers(worker_count,
self.manager.args.paths.split(' '),
self.manager.args.daemonize)
else:
worker = Worker()
worker.run() |
def hit_ratio_table(self, train=False, valid=False, xval=False):
"""
Retrieve the Hit Ratios.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param train: If train is True, then return the hit ratio value for the training data.
:param valid: If valid is True, then return the hit ratio value for the validation data.
:param xval: If xval is True, then return the hit ratio value for the cross validation data.
:return: The hit ratio for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in zip(list(tm.keys()), list(tm.values())): m[k] = None if v is None else v.hit_ratio_table()
return list(m.values())[0] if len(m) == 1 else m | Retrieve the Hit Ratios.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param train: If train is True, then return the hit ratio value for the training data.
:param valid: If valid is True, then return the hit ratio value for the validation data.
:param xval: If xval is True, then return the hit ratio value for the cross validation data.
:return: The hit ratio for this regression model. | Below is the the instruction that describes the task:
### Input:
Retrieve the Hit Ratios.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param train: If train is True, then return the hit ratio value for the training data.
:param valid: If valid is True, then return the hit ratio value for the validation data.
:param xval: If xval is True, then return the hit ratio value for the cross validation data.
:return: The hit ratio for this regression model.
### Response:
def hit_ratio_table(self, train=False, valid=False, xval=False):
"""
Retrieve the Hit Ratios.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param train: If train is True, then return the hit ratio value for the training data.
:param valid: If valid is True, then return the hit ratio value for the validation data.
:param xval: If xval is True, then return the hit ratio value for the cross validation data.
:return: The hit ratio for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in zip(list(tm.keys()), list(tm.values())): m[k] = None if v is None else v.hit_ratio_table()
return list(m.values())[0] if len(m) == 1 else m |
def plot_weight_posteriors(names, qm_vals, qs_vals, fname):
"""Save a PNG plot with histograms of weight means and stddevs.
Args:
names: A Python `iterable` of `str` variable names.
qm_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior means of weight varibles.
qs_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior standard deviations of weight varibles.
fname: Python `str` filename to save the plot to.
"""
fig = figure.Figure(figsize=(6, 3))
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 2, 1)
for n, qm in zip(names, qm_vals):
sns.distplot(qm.flatten(), ax=ax, label=n)
ax.set_title("weight means")
ax.set_xlim([-1.5, 1.5])
ax.legend()
ax = fig.add_subplot(1, 2, 2)
for n, qs in zip(names, qs_vals):
sns.distplot(qs.flatten(), ax=ax)
ax.set_title("weight stddevs")
ax.set_xlim([0, 1.])
fig.tight_layout()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname)) | Save a PNG plot with histograms of weight means and stddevs.
Args:
names: A Python `iterable` of `str` variable names.
qm_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior means of weight varibles.
qs_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior standard deviations of weight varibles.
fname: Python `str` filename to save the plot to. | Below is the the instruction that describes the task:
### Input:
Save a PNG plot with histograms of weight means and stddevs.
Args:
names: A Python `iterable` of `str` variable names.
qm_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior means of weight varibles.
qs_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior standard deviations of weight varibles.
fname: Python `str` filename to save the plot to.
### Response:
def plot_weight_posteriors(names, qm_vals, qs_vals, fname):
"""Save a PNG plot with histograms of weight means and stddevs.
Args:
names: A Python `iterable` of `str` variable names.
qm_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior means of weight varibles.
qs_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior standard deviations of weight varibles.
fname: Python `str` filename to save the plot to.
"""
fig = figure.Figure(figsize=(6, 3))
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 2, 1)
for n, qm in zip(names, qm_vals):
sns.distplot(qm.flatten(), ax=ax, label=n)
ax.set_title("weight means")
ax.set_xlim([-1.5, 1.5])
ax.legend()
ax = fig.add_subplot(1, 2, 2)
for n, qs in zip(names, qs_vals):
sns.distplot(qs.flatten(), ax=ax)
ax.set_title("weight stddevs")
ax.set_xlim([0, 1.])
fig.tight_layout()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname)) |
def count_elements(fname, element):
"""
returns (511, 35082) for ANC__WhereToHongKong.xml
"""
num = 0
tot = 0
for event, elem in iterparse(fname):
tot += 1
if elem.text != '':
#print(' tag = ', elem.tag)
#print(' event = ', event # always end
#print(' text = ', elem.text)
pass
if element in elem.tag:
#print(elem.xpath( 'description/text( )' ))
#print(elem.text)
num += 1
elem.clear()
return num, tot | returns (511, 35082) for ANC__WhereToHongKong.xml | Below is the the instruction that describes the task:
### Input:
returns (511, 35082) for ANC__WhereToHongKong.xml
### Response:
def count_elements(fname, element):
"""
returns (511, 35082) for ANC__WhereToHongKong.xml
"""
num = 0
tot = 0
for event, elem in iterparse(fname):
tot += 1
if elem.text != '':
#print(' tag = ', elem.tag)
#print(' event = ', event # always end
#print(' text = ', elem.text)
pass
if element in elem.tag:
#print(elem.xpath( 'description/text( )' ))
#print(elem.text)
num += 1
elem.clear()
return num, tot |
def user_exists(name, password=None, htpasswd_file=None, options='',
force=False, runas=None, update=False):
'''
Make sure the user is inside the specified htpasswd file
name
User name
password
User password
htpasswd_file
Path to the htpasswd file
options
See :mod:`salt.modules.htpasswd.useradd`
force
Touch the file even if user already created
runas
The system user to run htpasswd command with
update
Update an existing user's password if it's different from what's in
the htpasswd file (unlike force, which updates regardless)
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': None}
exists = __salt__['file.grep'](
htpasswd_file, '^{0}:'.format(name))['retcode'] == 0
# If user exists, but we're supposed to update the password, find out if
# it's changed, but not if we're forced to update the file regardless.
password_changed = False
if exists and update and not force:
password_changed = not __salt__['webutil.verify'](
htpasswd_file, name, password, opts=options, runas=runas)
if not exists or password_changed or force:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User \'{0}\' is set to be added to htpasswd file'.format(name)
ret['changes'] = {name: True}
return ret
useradd_ret = __salt__['webutil.useradd'](htpasswd_file, name,
password, opts=options,
runas=runas)
if useradd_ret['retcode'] == 0:
ret['result'] = True
ret['comment'] = useradd_ret['stderr']
ret['changes'] = {name: True}
return ret
else:
ret['result'] = False
ret['comment'] = useradd_ret['stderr']
return ret
if __opts__['test'] and ret['changes']:
ret['result'] = None
else:
ret['result'] = True
ret['comment'] = 'User already known'
return ret | Make sure the user is inside the specified htpasswd file
name
User name
password
User password
htpasswd_file
Path to the htpasswd file
options
See :mod:`salt.modules.htpasswd.useradd`
force
Touch the file even if user already created
runas
The system user to run htpasswd command with
update
Update an existing user's password if it's different from what's in
the htpasswd file (unlike force, which updates regardless) | Below is the the instruction that describes the task:
### Input:
Make sure the user is inside the specified htpasswd file
name
User name
password
User password
htpasswd_file
Path to the htpasswd file
options
See :mod:`salt.modules.htpasswd.useradd`
force
Touch the file even if user already created
runas
The system user to run htpasswd command with
update
Update an existing user's password if it's different from what's in
the htpasswd file (unlike force, which updates regardless)
### Response:
def user_exists(name, password=None, htpasswd_file=None, options='',
force=False, runas=None, update=False):
'''
Make sure the user is inside the specified htpasswd file
name
User name
password
User password
htpasswd_file
Path to the htpasswd file
options
See :mod:`salt.modules.htpasswd.useradd`
force
Touch the file even if user already created
runas
The system user to run htpasswd command with
update
Update an existing user's password if it's different from what's in
the htpasswd file (unlike force, which updates regardless)
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': None}
exists = __salt__['file.grep'](
htpasswd_file, '^{0}:'.format(name))['retcode'] == 0
# If user exists, but we're supposed to update the password, find out if
# it's changed, but not if we're forced to update the file regardless.
password_changed = False
if exists and update and not force:
password_changed = not __salt__['webutil.verify'](
htpasswd_file, name, password, opts=options, runas=runas)
if not exists or password_changed or force:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User \'{0}\' is set to be added to htpasswd file'.format(name)
ret['changes'] = {name: True}
return ret
useradd_ret = __salt__['webutil.useradd'](htpasswd_file, name,
password, opts=options,
runas=runas)
if useradd_ret['retcode'] == 0:
ret['result'] = True
ret['comment'] = useradd_ret['stderr']
ret['changes'] = {name: True}
return ret
else:
ret['result'] = False
ret['comment'] = useradd_ret['stderr']
return ret
if __opts__['test'] and ret['changes']:
ret['result'] = None
else:
ret['result'] = True
ret['comment'] = 'User already known'
return ret |
def is_server_or_cluster_db_address(value):
"""
checks if the specified value is in the form of
[server or cluster id][/database]
"""
# check if value is an id string
id_path = value.split("/")
id = id_path[0]
return len(id_path) <= 2 and (repository.lookup_server(id) or
repository.lookup_cluster(id)) | checks if the specified value is in the form of
[server or cluster id][/database] | Below is the the instruction that describes the task:
### Input:
checks if the specified value is in the form of
[server or cluster id][/database]
### Response:
def is_server_or_cluster_db_address(value):
"""
checks if the specified value is in the form of
[server or cluster id][/database]
"""
# check if value is an id string
id_path = value.split("/")
id = id_path[0]
return len(id_path) <= 2 and (repository.lookup_server(id) or
repository.lookup_cluster(id)) |
def filepaths(path, exclude=(), hidden=True, empty=True):
"""
Return list of absolute, sorted file paths
path: Path to file or directory
exclude: List of file name patterns to exclude
hidden: Whether to include hidden files
empty: Whether to include empty files
Raise PathNotFoundError if path doesn't exist.
"""
if not os.path.exists(path):
raise error.PathNotFoundError(path)
elif not os.access(path, os.R_OK,
effective_ids=os.access in os.supports_effective_ids):
raise error.ReadError(errno.EACCES, path)
if os.path.isfile(path):
return [path]
else:
filepaths = []
for dirpath, dirnames, filenames in os.walk(path):
# Ignore hidden directory
if not hidden and is_hidden(dirpath):
continue
for filename in filenames:
# Ignore hidden file
if not hidden and is_hidden(filename):
continue
filepath = os.path.join(dirpath, filename)
# Ignore excluded file
if any(is_match(filepath, pattern) for pattern in exclude):
continue
else:
# Ignore empty file
if empty or os.path.getsize(os.path.realpath(filepath)) > 0:
filepaths.append(filepath)
return sorted(filepaths, key=lambda fp: fp.casefold()) | Return list of absolute, sorted file paths
path: Path to file or directory
exclude: List of file name patterns to exclude
hidden: Whether to include hidden files
empty: Whether to include empty files
Raise PathNotFoundError if path doesn't exist. | Below is the the instruction that describes the task:
### Input:
Return list of absolute, sorted file paths
path: Path to file or directory
exclude: List of file name patterns to exclude
hidden: Whether to include hidden files
empty: Whether to include empty files
Raise PathNotFoundError if path doesn't exist.
### Response:
def filepaths(path, exclude=(), hidden=True, empty=True):
"""
Return list of absolute, sorted file paths
path: Path to file or directory
exclude: List of file name patterns to exclude
hidden: Whether to include hidden files
empty: Whether to include empty files
Raise PathNotFoundError if path doesn't exist.
"""
if not os.path.exists(path):
raise error.PathNotFoundError(path)
elif not os.access(path, os.R_OK,
effective_ids=os.access in os.supports_effective_ids):
raise error.ReadError(errno.EACCES, path)
if os.path.isfile(path):
return [path]
else:
filepaths = []
for dirpath, dirnames, filenames in os.walk(path):
# Ignore hidden directory
if not hidden and is_hidden(dirpath):
continue
for filename in filenames:
# Ignore hidden file
if not hidden and is_hidden(filename):
continue
filepath = os.path.join(dirpath, filename)
# Ignore excluded file
if any(is_match(filepath, pattern) for pattern in exclude):
continue
else:
# Ignore empty file
if empty or os.path.getsize(os.path.realpath(filepath)) > 0:
filepaths.append(filepath)
return sorted(filepaths, key=lambda fp: fp.casefold()) |
def intensity(image):
'''Calculates the average intensity of the pixels in an image.
Accepts both RGB and grayscale images.
:param image: numpy.ndarray
:returns: image intensity
:rtype: float
'''
if len(image.shape) > 2:
# Convert to grayscale
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) / 255
elif issubclass(image.dtype.type, np.integer):
image /= 255
return float(np.sum(image) / np.prod(image.shape)) | Calculates the average intensity of the pixels in an image.
Accepts both RGB and grayscale images.
:param image: numpy.ndarray
:returns: image intensity
:rtype: float | Below is the the instruction that describes the task:
### Input:
Calculates the average intensity of the pixels in an image.
Accepts both RGB and grayscale images.
:param image: numpy.ndarray
:returns: image intensity
:rtype: float
### Response:
def intensity(image):
'''Calculates the average intensity of the pixels in an image.
Accepts both RGB and grayscale images.
:param image: numpy.ndarray
:returns: image intensity
:rtype: float
'''
if len(image.shape) > 2:
# Convert to grayscale
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) / 255
elif issubclass(image.dtype.type, np.integer):
image /= 255
return float(np.sum(image) / np.prod(image.shape)) |
def make_container_tree(folders,files,path_delim="/",parse_files=True):
'''make_container_tree will convert a list of folders and files into a json structure that represents a graph.
:param folders: a list of folders in the image
:param files: a list of files in the folder
:param parse_files: return 'files' lookup in result, to associate ID of node with files (default True)
:param path_delim: the path delimiter, default is '/'
'''
nodes = {} # first we will make a list of nodes
lookup = {}
count = 1 # count will hold an id for nodes
max_depth = 0
for folder in folders:
if folder != ".":
folder = re.sub("^[.]/","",folder)
path_components = folder.split(path_delim)
for p in range(len(path_components)):
path_component = path_components[p]
fullpath = path_delim.join(path_components[0:p+1])
# Have we created the node yet?
if fullpath not in lookup:
lookup[fullpath] = count
node = {"id":count,"name":path_component,"path":fullpath,"level":p,"children":[]}
count +=1
# Did we find a deeper level?
if p > max_depth:
max_depth = p
# Does the node have a parent?
if p==0: # base node, no parent
parent_id = 0
else: # look up the parent id
parent_path = path_delim.join(path_components[0:p])
parent_id = lookup[parent_path]
node["parent"] = parent_id
nodes[node['id']] = node
# Now make the graph, we simply append children to their parents
seen = []
graph = []
iters = list(range(max_depth+1)) # 0,1,2,3...
iters.reverse() # ...3,2,1,0
iters.pop() # remove 0
for level in iters:
children = {x:y for x,y in nodes.items() if y['level'] == level}
seen = seen + [y['id'] for x,y in children.items()]
nodes = {x:y for x,y in nodes.items() if y['id'] not in seen}
for node_id,child_node in children.items():
if node_id == 0: #base node
graph[node_id] = child_node
else:
parent_id = child_node['parent']
nodes[parent_id]["children"].append(child_node)
# Now add the parents to graph, with name as main lookup
for parent,parent_info in nodes.items():
graph.append(parent_info)
graph = {"name":"base","children":graph}
result = {"graph":graph,"lookup":lookup,"depth":max_depth+1}
# Parse files to include in tree
if parse_files == True:
file_lookup = {}
for filey in files:
filey = re.sub("^[.]/","",filey)
filepath,filename = os.path.split(filey)
if filepath in lookup:
folder_id = lookup[filepath]
if folder_id in file_lookup:
file_lookup[folder_id].append(filename)
else:
file_lookup[folder_id] = [filename]
elif filepath == '': # base folder
if 0 in file_lookup:
file_lookup[0].append(filename)
else:
file_lookup[0] = [filename]
result['files'] = file_lookup
return result | make_container_tree will convert a list of folders and files into a json structure that represents a graph.
:param folders: a list of folders in the image
:param files: a list of files in the folder
:param parse_files: return 'files' lookup in result, to associate ID of node with files (default True)
:param path_delim: the path delimiter, default is '/' | Below is the the instruction that describes the task:
### Input:
make_container_tree will convert a list of folders and files into a json structure that represents a graph.
:param folders: a list of folders in the image
:param files: a list of files in the folder
:param parse_files: return 'files' lookup in result, to associate ID of node with files (default True)
:param path_delim: the path delimiter, default is '/'
### Response:
def make_container_tree(folders,files,path_delim="/",parse_files=True):
'''make_container_tree will convert a list of folders and files into a json structure that represents a graph.
:param folders: a list of folders in the image
:param files: a list of files in the folder
:param parse_files: return 'files' lookup in result, to associate ID of node with files (default True)
:param path_delim: the path delimiter, default is '/'
'''
nodes = {} # first we will make a list of nodes
lookup = {}
count = 1 # count will hold an id for nodes
max_depth = 0
for folder in folders:
if folder != ".":
folder = re.sub("^[.]/","",folder)
path_components = folder.split(path_delim)
for p in range(len(path_components)):
path_component = path_components[p]
fullpath = path_delim.join(path_components[0:p+1])
# Have we created the node yet?
if fullpath not in lookup:
lookup[fullpath] = count
node = {"id":count,"name":path_component,"path":fullpath,"level":p,"children":[]}
count +=1
# Did we find a deeper level?
if p > max_depth:
max_depth = p
# Does the node have a parent?
if p==0: # base node, no parent
parent_id = 0
else: # look up the parent id
parent_path = path_delim.join(path_components[0:p])
parent_id = lookup[parent_path]
node["parent"] = parent_id
nodes[node['id']] = node
# Now make the graph, we simply append children to their parents
seen = []
graph = []
iters = list(range(max_depth+1)) # 0,1,2,3...
iters.reverse() # ...3,2,1,0
iters.pop() # remove 0
for level in iters:
children = {x:y for x,y in nodes.items() if y['level'] == level}
seen = seen + [y['id'] for x,y in children.items()]
nodes = {x:y for x,y in nodes.items() if y['id'] not in seen}
for node_id,child_node in children.items():
if node_id == 0: #base node
graph[node_id] = child_node
else:
parent_id = child_node['parent']
nodes[parent_id]["children"].append(child_node)
# Now add the parents to graph, with name as main lookup
for parent,parent_info in nodes.items():
graph.append(parent_info)
graph = {"name":"base","children":graph}
result = {"graph":graph,"lookup":lookup,"depth":max_depth+1}
# Parse files to include in tree
if parse_files == True:
file_lookup = {}
for filey in files:
filey = re.sub("^[.]/","",filey)
filepath,filename = os.path.split(filey)
if filepath in lookup:
folder_id = lookup[filepath]
if folder_id in file_lookup:
file_lookup[folder_id].append(filename)
else:
file_lookup[folder_id] = [filename]
elif filepath == '': # base folder
if 0 in file_lookup:
file_lookup[0].append(filename)
else:
file_lookup[0] = [filename]
result['files'] = file_lookup
return result |
def getAttributesList(self):
'''
getAttributesList - Get a copy of all attributes as a list of tuples (name, value)
ALL values are converted to string and copied, so modifications will not affect the original attributes.
If you want types like "style" to work as before, you'll need to recreate those elements (like StyleAttribute(strValue) ).
@return list< tuple< str(name), str(value) > > - A list of tuples of attrName, attrValue pairs, all converted to strings.
This is suitable for passing back into AdvancedTag when creating a new tag.
'''
return [ (tostr(name)[:], tostr(value)[:]) for name, value in self._attributes.items() ] | getAttributesList - Get a copy of all attributes as a list of tuples (name, value)
ALL values are converted to string and copied, so modifications will not affect the original attributes.
If you want types like "style" to work as before, you'll need to recreate those elements (like StyleAttribute(strValue) ).
@return list< tuple< str(name), str(value) > > - A list of tuples of attrName, attrValue pairs, all converted to strings.
This is suitable for passing back into AdvancedTag when creating a new tag. | Below is the the instruction that describes the task:
### Input:
getAttributesList - Get a copy of all attributes as a list of tuples (name, value)
ALL values are converted to string and copied, so modifications will not affect the original attributes.
If you want types like "style" to work as before, you'll need to recreate those elements (like StyleAttribute(strValue) ).
@return list< tuple< str(name), str(value) > > - A list of tuples of attrName, attrValue pairs, all converted to strings.
This is suitable for passing back into AdvancedTag when creating a new tag.
### Response:
def getAttributesList(self):
'''
getAttributesList - Get a copy of all attributes as a list of tuples (name, value)
ALL values are converted to string and copied, so modifications will not affect the original attributes.
If you want types like "style" to work as before, you'll need to recreate those elements (like StyleAttribute(strValue) ).
@return list< tuple< str(name), str(value) > > - A list of tuples of attrName, attrValue pairs, all converted to strings.
This is suitable for passing back into AdvancedTag when creating a new tag.
'''
return [ (tostr(name)[:], tostr(value)[:]) for name, value in self._attributes.items() ] |
def _display_interval(i):
"""Convert a time interval into a human-readable string.
:param i: The interval to convert, in seconds.
"""
sigils = ["d", "h", "m", "s"]
factors = [24 * 60 * 60, 60 * 60, 60, 1]
remain = int(i)
result = ""
for fac, sig in zip(factors, sigils):
if remain < fac:
continue
result += "{}{}".format(remain // fac, sig)
remain = remain % fac
return result | Convert a time interval into a human-readable string.
:param i: The interval to convert, in seconds. | Below is the the instruction that describes the task:
### Input:
Convert a time interval into a human-readable string.
:param i: The interval to convert, in seconds.
### Response:
def _display_interval(i):
"""Convert a time interval into a human-readable string.
:param i: The interval to convert, in seconds.
"""
sigils = ["d", "h", "m", "s"]
factors = [24 * 60 * 60, 60 * 60, 60, 1]
remain = int(i)
result = ""
for fac, sig in zip(factors, sigils):
if remain < fac:
continue
result += "{}{}".format(remain // fac, sig)
remain = remain % fac
return result |
def accpro_results(self):
"""Parse the ACCpro output file and return a dict of secondary structure compositions.
"""
return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_accpro) | Parse the ACCpro output file and return a dict of secondary structure compositions. | Below is the the instruction that describes the task:
### Input:
Parse the ACCpro output file and return a dict of secondary structure compositions.
### Response:
def accpro_results(self):
"""Parse the ACCpro output file and return a dict of secondary structure compositions.
"""
return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_accpro) |
def copy(self, new_grab=None):
"""
Clone the Response object.
"""
obj = self.__class__()
obj.process_grab(new_grab if new_grab else self.grab)
copy_keys = ('status', 'code', 'head', 'body', 'total_time',
'connect_time', 'name_lookup_time',
'url', 'charset', '_unicode_body',
'_grab_config')
for key in copy_keys:
setattr(obj, key, getattr(self, key))
obj.headers = copy(self.headers)
# TODO: Maybe, deepcopy?
obj.cookies = copy(self.cookies)
return obj | Clone the Response object. | Below is the the instruction that describes the task:
### Input:
Clone the Response object.
### Response:
def copy(self, new_grab=None):
"""
Clone the Response object.
"""
obj = self.__class__()
obj.process_grab(new_grab if new_grab else self.grab)
copy_keys = ('status', 'code', 'head', 'body', 'total_time',
'connect_time', 'name_lookup_time',
'url', 'charset', '_unicode_body',
'_grab_config')
for key in copy_keys:
setattr(obj, key, getattr(self, key))
obj.headers = copy(self.headers)
# TODO: Maybe, deepcopy?
obj.cookies = copy(self.cookies)
return obj |
def _transform_value(value, policy, transform_type):
'''
helper function to transform the policy value into something that more
closely matches how the policy is displayed in the gpedit GUI
'''
t_kwargs = {}
if 'Transform' in policy:
if transform_type in policy['Transform']:
_policydata = _policy_info()
if transform_type + 'Args' in policy['Transform']:
t_kwargs = policy['Transform'][transform_type + 'Args']
return getattr(_policydata, policy['Transform'][transform_type])(value, **t_kwargs)
else:
return value
else:
if 'Registry' in policy:
if value == '(value not set)':
return 'Not Defined'
return value | helper function to transform the policy value into something that more
closely matches how the policy is displayed in the gpedit GUI | Below is the the instruction that describes the task:
### Input:
helper function to transform the policy value into something that more
closely matches how the policy is displayed in the gpedit GUI
### Response:
def _transform_value(value, policy, transform_type):
'''
helper function to transform the policy value into something that more
closely matches how the policy is displayed in the gpedit GUI
'''
t_kwargs = {}
if 'Transform' in policy:
if transform_type in policy['Transform']:
_policydata = _policy_info()
if transform_type + 'Args' in policy['Transform']:
t_kwargs = policy['Transform'][transform_type + 'Args']
return getattr(_policydata, policy['Transform'][transform_type])(value, **t_kwargs)
else:
return value
else:
if 'Registry' in policy:
if value == '(value not set)':
return 'Not Defined'
return value |
def login(self, broker_name, account_cookie, account=None):
"""login 登录到交易前置
2018-07-02 在实盘中,登录到交易前置后,需要同步资产状态
Arguments:
broker_name {[type]} -- [description]
account_cookie {[type]} -- [description]
Keyword Arguments:
account {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
res = False
if account is None:
if account_cookie not in self.session.keys():
self.session[account_cookie] = QA_Account(
account_cookie=account_cookie,
broker=broker_name
)
if self.sync_account(broker_name, account_cookie):
res = True
if self.if_start_orderthreading and res:
#
self.order_handler.subscribe(
self.session[account_cookie],
self.broker[broker_name]
)
else:
if account_cookie not in self.session.keys():
account.broker = broker_name
self.session[account_cookie] = account
if self.sync_account(broker_name, account_cookie):
res = True
if self.if_start_orderthreading and res:
#
self.order_handler.subscribe(
account,
self.broker[broker_name]
)
if res:
return res
else:
try:
self.session.pop(account_cookie)
except:
pass
return False | login 登录到交易前置
2018-07-02 在实盘中,登录到交易前置后,需要同步资产状态
Arguments:
broker_name {[type]} -- [description]
account_cookie {[type]} -- [description]
Keyword Arguments:
account {[type]} -- [description] (default: {None})
Returns:
[type] -- [description] | Below is the the instruction that describes the task:
### Input:
login 登录到交易前置
2018-07-02 在实盘中,登录到交易前置后,需要同步资产状态
Arguments:
broker_name {[type]} -- [description]
account_cookie {[type]} -- [description]
Keyword Arguments:
account {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
### Response:
def login(self, broker_name, account_cookie, account=None):
"""login 登录到交易前置
2018-07-02 在实盘中,登录到交易前置后,需要同步资产状态
Arguments:
broker_name {[type]} -- [description]
account_cookie {[type]} -- [description]
Keyword Arguments:
account {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
res = False
if account is None:
if account_cookie not in self.session.keys():
self.session[account_cookie] = QA_Account(
account_cookie=account_cookie,
broker=broker_name
)
if self.sync_account(broker_name, account_cookie):
res = True
if self.if_start_orderthreading and res:
#
self.order_handler.subscribe(
self.session[account_cookie],
self.broker[broker_name]
)
else:
if account_cookie not in self.session.keys():
account.broker = broker_name
self.session[account_cookie] = account
if self.sync_account(broker_name, account_cookie):
res = True
if self.if_start_orderthreading and res:
#
self.order_handler.subscribe(
account,
self.broker[broker_name]
)
if res:
return res
else:
try:
self.session.pop(account_cookie)
except:
pass
return False |
def _ReadUUIDDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads an UUID data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UUIDDataTypeDefinition: UUID data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.UUIDDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, default_size=16,
is_member=is_member, supported_size_values=(16, )) | Reads an UUID data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UUIDDataTypeDefinition: UUID data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect. | Below is the the instruction that describes the task:
### Input:
Reads an UUID data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UUIDDataTypeDefinition: UUID data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
### Response:
def _ReadUUIDDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads an UUID data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UUIDDataTypeDefinition: UUID data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.UUIDDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, default_size=16,
is_member=is_member, supported_size_values=(16, )) |
def _flush(self):
"""
Decorator for flushing handlers with an lock, catching exceptions
"""
if not self.enabled:
return
try:
try:
self.lock.acquire()
self.flush()
except Exception:
self.log.error(traceback.format_exc())
finally:
if self.lock.locked():
self.lock.release() | Decorator for flushing handlers with an lock, catching exceptions | Below is the the instruction that describes the task:
### Input:
Decorator for flushing handlers with an lock, catching exceptions
### Response:
def _flush(self):
"""
Decorator for flushing handlers with an lock, catching exceptions
"""
if not self.enabled:
return
try:
try:
self.lock.acquire()
self.flush()
except Exception:
self.log.error(traceback.format_exc())
finally:
if self.lock.locked():
self.lock.release() |
async def close(self):
"""This function is a coroutine.
Closes all connections."""
if self._is_closed:
return
else:
await self.http.close()
self._is_closed = True | This function is a coroutine.
Closes all connections. | Below is the the instruction that describes the task:
### Input:
This function is a coroutine.
Closes all connections.
### Response:
async def close(self):
"""This function is a coroutine.
Closes all connections."""
if self._is_closed:
return
else:
await self.http.close()
self._is_closed = True |
def get_lines(self, first, last):
"""Return SourceLines for lines between and including first & last."""
line = 1
linestring = []
linestrings = []
for char in self.string:
if line >= first and line <= last:
linestring.append(char)
if char == '\n':
linestrings.append((''.join(linestring), line))
linestring = []
elif line > last:
break
if char == '\n':
line += 1
if linestring:
linestrings.append((''.join(linestring), line))
elif not linestrings:
return None
return [SourceLine(string, lineno) for string, lineno in linestrings] | Return SourceLines for lines between and including first & last. | Below is the the instruction that describes the task:
### Input:
Return SourceLines for lines between and including first & last.
### Response:
def get_lines(self, first, last):
"""Return SourceLines for lines between and including first & last."""
line = 1
linestring = []
linestrings = []
for char in self.string:
if line >= first and line <= last:
linestring.append(char)
if char == '\n':
linestrings.append((''.join(linestring), line))
linestring = []
elif line > last:
break
if char == '\n':
line += 1
if linestring:
linestrings.append((''.join(linestring), line))
elif not linestrings:
return None
return [SourceLine(string, lineno) for string, lineno in linestrings] |
def list(self) -> Iterable[ListEntry]:
"""Return all the entries in the list tree."""
for entry in self._iter(self._root, ''):
yield entry | Return all the entries in the list tree. | Below is the the instruction that describes the task:
### Input:
Return all the entries in the list tree.
### Response:
def list(self) -> Iterable[ListEntry]:
"""Return all the entries in the list tree."""
for entry in self._iter(self._root, ''):
yield entry |
def _request(self, typ, id=0, method='GET', params=None, data=None, url=None):
"""
send the request, return response obj
"""
headers = { "Accept": "application/json" }
auth = None
if self.user:
auth = (self.user, self.password)
if not url:
if id:
url = "%s/%s/%s" % (self.url, typ, id)
else:
url = "%s/%s" % (self.url, typ)
return requests.request(method, url, params=params, data=data, auth=auth, headers=headers) | send the request, return response obj | Below is the the instruction that describes the task:
### Input:
send the request, return response obj
### Response:
def _request(self, typ, id=0, method='GET', params=None, data=None, url=None):
"""
send the request, return response obj
"""
headers = { "Accept": "application/json" }
auth = None
if self.user:
auth = (self.user, self.password)
if not url:
if id:
url = "%s/%s/%s" % (self.url, typ, id)
else:
url = "%s/%s" % (self.url, typ)
return requests.request(method, url, params=params, data=data, auth=auth, headers=headers) |
def add_overlay(orig, area, coast_dir, color=(0, 0, 0), width=0.5, resolution=None,
level_coast=1, level_borders=1, fill_value=None,
grid=None):
"""Add coastline, political borders and grid(graticules) to image.
Uses ``color`` for feature colors where ``color`` is a 3-element tuple
of integers between 0 and 255 representing (R, G, B).
.. warning::
This function currently loses the data mask (alpha band).
``resolution`` is chosen automatically if None (default), otherwise it should be one of:
+-----+-------------------------+---------+
| 'f' | Full resolution | 0.04 km |
| 'h' | High resolution | 0.2 km |
| 'i' | Intermediate resolution | 1.0 km |
| 'l' | Low resolution | 5.0 km |
| 'c' | Crude resolution | 25 km |
+-----+-------------------------+---------+
``grid`` is a dictionary with key values as documented in detail in pycoast
eg. overlay={'grid': {'major_lonlat': (10, 10),
'write_text': False,
'outline': (224, 224, 224),
'width': 0.5}}
Here major_lonlat is plotted every 10 deg for both longitude and latitude,
no labels for the grid lines are plotted, the color used for the grid lines
is light gray, and the width of the gratucules is 0.5 pixels.
For grid if aggdraw is used, font option is mandatory, if not write_text is set to False
eg. font = aggdraw.Font('black', '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf',
opacity=127, size=16)
"""
if area is None:
raise ValueError("Area of image is None, can't add overlay.")
from pycoast import ContourWriterAGG
if isinstance(area, str):
area = get_area_def(area)
LOG.info("Add coastlines and political borders to image.")
if resolution is None:
x_resolution = ((area.area_extent[2] -
area.area_extent[0]) /
area.x_size)
y_resolution = ((area.area_extent[3] -
area.area_extent[1]) /
area.y_size)
res = min(x_resolution, y_resolution)
if res > 25000:
resolution = "c"
elif res > 5000:
resolution = "l"
elif res > 1000:
resolution = "i"
elif res > 200:
resolution = "h"
else:
resolution = "f"
LOG.debug("Automagically choose resolution %s", resolution)
if hasattr(orig, 'convert'):
# image must be in RGB space to work with pycoast/pydecorate
orig = orig.convert('RGBA' if orig.mode.endswith('A') else 'RGB')
elif not orig.mode.startswith('RGB'):
raise RuntimeError("'trollimage' 1.6+ required to support adding "
"overlays/decorations to non-RGB data.")
img = orig.pil_image(fill_value=fill_value)
cw_ = ContourWriterAGG(coast_dir)
cw_.add_coastlines(img, area, outline=color,
resolution=resolution, width=width, level=level_coast)
cw_.add_borders(img, area, outline=color,
resolution=resolution, width=width, level=level_borders)
# Only add grid if major_lonlat is given.
if grid and 'major_lonlat' in grid and grid['major_lonlat']:
major_lonlat = grid.pop('major_lonlat')
minor_lonlat = grid.pop('minor_lonlat', major_lonlat)
cw_.add_grid(img, area, major_lonlat, minor_lonlat, **grid)
arr = da.from_array(np.array(img) / 255.0, chunks=CHUNK_SIZE)
new_data = xr.DataArray(arr, dims=['y', 'x', 'bands'],
coords={'y': orig.data.coords['y'],
'x': orig.data.coords['x'],
'bands': list(img.mode)},
attrs=orig.data.attrs)
return XRImage(new_data) | Add coastline, political borders and grid(graticules) to image.
Uses ``color`` for feature colors where ``color`` is a 3-element tuple
of integers between 0 and 255 representing (R, G, B).
.. warning::
This function currently loses the data mask (alpha band).
``resolution`` is chosen automatically if None (default), otherwise it should be one of:
+-----+-------------------------+---------+
| 'f' | Full resolution | 0.04 km |
| 'h' | High resolution | 0.2 km |
| 'i' | Intermediate resolution | 1.0 km |
| 'l' | Low resolution | 5.0 km |
| 'c' | Crude resolution | 25 km |
+-----+-------------------------+---------+
``grid`` is a dictionary with key values as documented in detail in pycoast
eg. overlay={'grid': {'major_lonlat': (10, 10),
'write_text': False,
'outline': (224, 224, 224),
'width': 0.5}}
Here major_lonlat is plotted every 10 deg for both longitude and latitude,
no labels for the grid lines are plotted, the color used for the grid lines
is light gray, and the width of the gratucules is 0.5 pixels.
For grid if aggdraw is used, font option is mandatory, if not write_text is set to False
eg. font = aggdraw.Font('black', '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf',
opacity=127, size=16) | Below is the the instruction that describes the task:
### Input:
Add coastline, political borders and grid(graticules) to image.
Uses ``color`` for feature colors where ``color`` is a 3-element tuple
of integers between 0 and 255 representing (R, G, B).
.. warning::
This function currently loses the data mask (alpha band).
``resolution`` is chosen automatically if None (default), otherwise it should be one of:
+-----+-------------------------+---------+
| 'f' | Full resolution | 0.04 km |
| 'h' | High resolution | 0.2 km |
| 'i' | Intermediate resolution | 1.0 km |
| 'l' | Low resolution | 5.0 km |
| 'c' | Crude resolution | 25 km |
+-----+-------------------------+---------+
``grid`` is a dictionary with key values as documented in detail in pycoast
eg. overlay={'grid': {'major_lonlat': (10, 10),
'write_text': False,
'outline': (224, 224, 224),
'width': 0.5}}
Here major_lonlat is plotted every 10 deg for both longitude and latitude,
no labels for the grid lines are plotted, the color used for the grid lines
is light gray, and the width of the gratucules is 0.5 pixels.
For grid if aggdraw is used, font option is mandatory, if not write_text is set to False
eg. font = aggdraw.Font('black', '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf',
opacity=127, size=16)
### Response:
def add_overlay(orig, area, coast_dir, color=(0, 0, 0), width=0.5, resolution=None,
level_coast=1, level_borders=1, fill_value=None,
grid=None):
"""Add coastline, political borders and grid(graticules) to image.
Uses ``color`` for feature colors where ``color`` is a 3-element tuple
of integers between 0 and 255 representing (R, G, B).
.. warning::
This function currently loses the data mask (alpha band).
``resolution`` is chosen automatically if None (default), otherwise it should be one of:
+-----+-------------------------+---------+
| 'f' | Full resolution | 0.04 km |
| 'h' | High resolution | 0.2 km |
| 'i' | Intermediate resolution | 1.0 km |
| 'l' | Low resolution | 5.0 km |
| 'c' | Crude resolution | 25 km |
+-----+-------------------------+---------+
``grid`` is a dictionary with key values as documented in detail in pycoast
eg. overlay={'grid': {'major_lonlat': (10, 10),
'write_text': False,
'outline': (224, 224, 224),
'width': 0.5}}
Here major_lonlat is plotted every 10 deg for both longitude and latitude,
no labels for the grid lines are plotted, the color used for the grid lines
is light gray, and the width of the gratucules is 0.5 pixels.
For grid if aggdraw is used, font option is mandatory, if not write_text is set to False
eg. font = aggdraw.Font('black', '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf',
opacity=127, size=16)
"""
if area is None:
raise ValueError("Area of image is None, can't add overlay.")
from pycoast import ContourWriterAGG
if isinstance(area, str):
area = get_area_def(area)
LOG.info("Add coastlines and political borders to image.")
if resolution is None:
x_resolution = ((area.area_extent[2] -
area.area_extent[0]) /
area.x_size)
y_resolution = ((area.area_extent[3] -
area.area_extent[1]) /
area.y_size)
res = min(x_resolution, y_resolution)
if res > 25000:
resolution = "c"
elif res > 5000:
resolution = "l"
elif res > 1000:
resolution = "i"
elif res > 200:
resolution = "h"
else:
resolution = "f"
LOG.debug("Automagically choose resolution %s", resolution)
if hasattr(orig, 'convert'):
# image must be in RGB space to work with pycoast/pydecorate
orig = orig.convert('RGBA' if orig.mode.endswith('A') else 'RGB')
elif not orig.mode.startswith('RGB'):
raise RuntimeError("'trollimage' 1.6+ required to support adding "
"overlays/decorations to non-RGB data.")
img = orig.pil_image(fill_value=fill_value)
cw_ = ContourWriterAGG(coast_dir)
cw_.add_coastlines(img, area, outline=color,
resolution=resolution, width=width, level=level_coast)
cw_.add_borders(img, area, outline=color,
resolution=resolution, width=width, level=level_borders)
# Only add grid if major_lonlat is given.
if grid and 'major_lonlat' in grid and grid['major_lonlat']:
major_lonlat = grid.pop('major_lonlat')
minor_lonlat = grid.pop('minor_lonlat', major_lonlat)
cw_.add_grid(img, area, major_lonlat, minor_lonlat, **grid)
arr = da.from_array(np.array(img) / 255.0, chunks=CHUNK_SIZE)
new_data = xr.DataArray(arr, dims=['y', 'x', 'bands'],
coords={'y': orig.data.coords['y'],
'x': orig.data.coords['x'],
'bands': list(img.mode)},
attrs=orig.data.attrs)
return XRImage(new_data) |
def generate_kubernetes(self):
""" Generate a sample kubernetes
"""
example = {}
example['spec'] = {}
example['spec']['containers'] = []
example['spec']['containers'].append({"name": '', "image": '', "env": []})
for key, value in self.spec.items():
if value['type'] in (dict, list):
kvalue = f"\'{json.dumps(value.get('example', ''))}\'"
else:
kvalue = f"{value.get('example', '')}"
entry = {"name": f"{self.env_prefix}_{key.upper()}", "value": kvalue}
example['spec']['containers'][0]['env'].append(entry)
print(yaml.dump(example, default_flow_style=False)) | Generate a sample kubernetes | Below is the the instruction that describes the task:
### Input:
Generate a sample kubernetes
### Response:
def generate_kubernetes(self):
""" Generate a sample kubernetes
"""
example = {}
example['spec'] = {}
example['spec']['containers'] = []
example['spec']['containers'].append({"name": '', "image": '', "env": []})
for key, value in self.spec.items():
if value['type'] in (dict, list):
kvalue = f"\'{json.dumps(value.get('example', ''))}\'"
else:
kvalue = f"{value.get('example', '')}"
entry = {"name": f"{self.env_prefix}_{key.upper()}", "value": kvalue}
example['spec']['containers'][0]['env'].append(entry)
print(yaml.dump(example, default_flow_style=False)) |
def _CreateMultipleValuesCondition(self, values, operator):
"""Creates a condition with the provided list of values and operator."""
values = ['"%s"' % value if isinstance(value, str) or
isinstance(value, unicode) else str(value) for value in values]
return '%s %s [%s]' % (self._field, operator, ', '.join(values)) | Creates a condition with the provided list of values and operator. | Below is the the instruction that describes the task:
### Input:
Creates a condition with the provided list of values and operator.
### Response:
def _CreateMultipleValuesCondition(self, values, operator):
"""Creates a condition with the provided list of values and operator."""
values = ['"%s"' % value if isinstance(value, str) or
isinstance(value, unicode) else str(value) for value in values]
return '%s %s [%s]' % (self._field, operator, ', '.join(values)) |
def set_container_setting(name, container, settings):
'''
Set the value of the setting for an IIS container.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS container.
container (str): The type of IIS container. The container types are:
AppPools, Sites, SslBindings
settings (dict): A dictionary of the setting names and their values.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.set_container_setting name='MyTestPool' container='AppPools'
settings="{'managedPipeLineMode': 'Integrated'}"
'''
identityType_map2string = {'0': 'LocalSystem', '1': 'LocalService', '2': 'NetworkService', '3': 'SpecificUser', '4': 'ApplicationPoolIdentity'}
identityType_map2numeric = {'LocalSystem': '0', 'LocalService': '1', 'NetworkService': '2', 'SpecificUser': '3', 'ApplicationPoolIdentity': '4'}
ps_cmd = list()
container_path = r"IIS:\{0}\{1}".format(container, name)
if not settings:
log.warning('No settings provided')
return False
# Treat all values as strings for the purpose of comparing them to existing values.
for setting in settings:
settings[setting] = six.text_type(settings[setting])
current_settings = get_container_setting(
name=name, container=container, settings=settings.keys())
if settings == current_settings:
log.debug('Settings already contain the provided values.')
return True
for setting in settings:
# If the value is numeric, don't treat it as a string in PowerShell.
try:
complex(settings[setting])
value = settings[setting]
except ValueError:
value = "'{0}'".format(settings[setting])
# Map to numeric to support server 2008
if setting == 'processModel.identityType' and settings[setting] in identityType_map2numeric.keys():
value = identityType_map2numeric[settings[setting]]
ps_cmd.extend(['Set-ItemProperty',
'-Path', "'{0}'".format(container_path),
'-Name', "'{0}'".format(setting),
'-Value', '{0};'.format(value)])
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to set settings for {0}: {1}'.format(container, name)
raise CommandExecutionError(msg)
# Get the fields post-change so that we can verify tht all values
# were modified successfully. Track the ones that weren't.
new_settings = get_container_setting(
name=name, container=container, settings=settings.keys())
failed_settings = dict()
for setting in settings:
# map identity type from numeric to string for comparing
if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys():
settings[setting] = identityType_map2string[settings[setting]]
if six.text_type(settings[setting]) != six.text_type(new_settings[setting]):
failed_settings[setting] = settings[setting]
if failed_settings:
log.error('Failed to change settings: %s', failed_settings)
return False
log.debug('Settings configured successfully: %s', settings.keys())
return True | Set the value of the setting for an IIS container.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS container.
container (str): The type of IIS container. The container types are:
AppPools, Sites, SslBindings
settings (dict): A dictionary of the setting names and their values.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.set_container_setting name='MyTestPool' container='AppPools'
settings="{'managedPipeLineMode': 'Integrated'}" | Below is the the instruction that describes the task:
### Input:
Set the value of the setting for an IIS container.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS container.
container (str): The type of IIS container. The container types are:
AppPools, Sites, SslBindings
settings (dict): A dictionary of the setting names and their values.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.set_container_setting name='MyTestPool' container='AppPools'
settings="{'managedPipeLineMode': 'Integrated'}"
### Response:
def set_container_setting(name, container, settings):
'''
Set the value of the setting for an IIS container.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS container.
container (str): The type of IIS container. The container types are:
AppPools, Sites, SslBindings
settings (dict): A dictionary of the setting names and their values.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.set_container_setting name='MyTestPool' container='AppPools'
settings="{'managedPipeLineMode': 'Integrated'}"
'''
identityType_map2string = {'0': 'LocalSystem', '1': 'LocalService', '2': 'NetworkService', '3': 'SpecificUser', '4': 'ApplicationPoolIdentity'}
identityType_map2numeric = {'LocalSystem': '0', 'LocalService': '1', 'NetworkService': '2', 'SpecificUser': '3', 'ApplicationPoolIdentity': '4'}
ps_cmd = list()
container_path = r"IIS:\{0}\{1}".format(container, name)
if not settings:
log.warning('No settings provided')
return False
# Treat all values as strings for the purpose of comparing them to existing values.
for setting in settings:
settings[setting] = six.text_type(settings[setting])
current_settings = get_container_setting(
name=name, container=container, settings=settings.keys())
if settings == current_settings:
log.debug('Settings already contain the provided values.')
return True
for setting in settings:
# If the value is numeric, don't treat it as a string in PowerShell.
try:
complex(settings[setting])
value = settings[setting]
except ValueError:
value = "'{0}'".format(settings[setting])
# Map to numeric to support server 2008
if setting == 'processModel.identityType' and settings[setting] in identityType_map2numeric.keys():
value = identityType_map2numeric[settings[setting]]
ps_cmd.extend(['Set-ItemProperty',
'-Path', "'{0}'".format(container_path),
'-Name', "'{0}'".format(setting),
'-Value', '{0};'.format(value)])
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to set settings for {0}: {1}'.format(container, name)
raise CommandExecutionError(msg)
# Get the fields post-change so that we can verify tht all values
# were modified successfully. Track the ones that weren't.
new_settings = get_container_setting(
name=name, container=container, settings=settings.keys())
failed_settings = dict()
for setting in settings:
# map identity type from numeric to string for comparing
if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys():
settings[setting] = identityType_map2string[settings[setting]]
if six.text_type(settings[setting]) != six.text_type(new_settings[setting]):
failed_settings[setting] = settings[setting]
if failed_settings:
log.error('Failed to change settings: %s', failed_settings)
return False
log.debug('Settings configured successfully: %s', settings.keys())
return True |
def traverse(self, node):
"""Traverse the document tree rooted at node.
node(node): docutils node.
"""
self.find_and_replace(node)
for c in node.children:
self.traverse(c) | Traverse the document tree rooted at node.
node(node): docutils node. | Below is the the instruction that describes the task:
### Input:
Traverse the document tree rooted at node.
node(node): docutils node.
### Response:
def traverse(self, node):
"""Traverse the document tree rooted at node.
node(node): docutils node.
"""
self.find_and_replace(node)
for c in node.children:
self.traverse(c) |
def stab(nick, rest):
"Stab, shank or shiv some(one|thing)!"
if rest:
stabee = rest
else:
stabee = 'wildly at anything'
if random.random() < 0.9:
karma.Karma.store.change(stabee, -1)
weapon = random.choice(phrases.weapon_opts)
weaponadj = random.choice(phrases.weapon_adjs)
violentact = random.choice(phrases.violent_acts)
return "/me grabs a %s %s and %s %s!" % (
weaponadj, weapon, violentact, stabee)
elif random.random() < 0.6:
karma.Karma.store.change(stabee, -1)
return (
"/me is going to become rich and famous after i invent a "
"device that allows you to stab people in the face over the "
"internet")
else:
karma.Karma.store.change(nick, -1)
return (
"/me turns on its master and shivs %s. This is reality man, "
"and you never know what you're going to get!" % nick) | Stab, shank or shiv some(one|thing)! | Below is the the instruction that describes the task:
### Input:
Stab, shank or shiv some(one|thing)!
### Response:
def stab(nick, rest):
"Stab, shank or shiv some(one|thing)!"
if rest:
stabee = rest
else:
stabee = 'wildly at anything'
if random.random() < 0.9:
karma.Karma.store.change(stabee, -1)
weapon = random.choice(phrases.weapon_opts)
weaponadj = random.choice(phrases.weapon_adjs)
violentact = random.choice(phrases.violent_acts)
return "/me grabs a %s %s and %s %s!" % (
weaponadj, weapon, violentact, stabee)
elif random.random() < 0.6:
karma.Karma.store.change(stabee, -1)
return (
"/me is going to become rich and famous after i invent a "
"device that allows you to stab people in the face over the "
"internet")
else:
karma.Karma.store.change(nick, -1)
return (
"/me turns on its master and shivs %s. This is reality man, "
"and you never know what you're going to get!" % nick) |
def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
"""Non-differentiable Intersection over Union (IoU) for comparing the
similarity of two batch of data, usually be used for evaluating binary image segmentation.
The coefficient between 0 to 1, and 1 means totally match.
Parameters
-----------
output : tensor
A batch of distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
Notes
------
- IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.
"""
pre = tf.cast(output > threshold, dtype=tf.float32)
truth = tf.cast(target > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND
union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR
# old axis=[0,1,2,3]
# epsilon = 1e-5
# batch_iou = inse / (union + epsilon)
# new haodong
batch_iou = (inse + smooth) / (union + smooth)
iou = tf.reduce_mean(batch_iou, name='iou_coe')
return iou | Non-differentiable Intersection over Union (IoU) for comparing the
similarity of two batch of data, usually be used for evaluating binary image segmentation.
The coefficient between 0 to 1, and 1 means totally match.
Parameters
-----------
output : tensor
A batch of distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
Notes
------
- IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating. | Below is the the instruction that describes the task:
### Input:
Non-differentiable Intersection over Union (IoU) for comparing the
similarity of two batch of data, usually be used for evaluating binary image segmentation.
The coefficient between 0 to 1, and 1 means totally match.
Parameters
-----------
output : tensor
A batch of distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
Notes
------
- IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.
### Response:
def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
"""Non-differentiable Intersection over Union (IoU) for comparing the
similarity of two batch of data, usually be used for evaluating binary image segmentation.
The coefficient between 0 to 1, and 1 means totally match.
Parameters
-----------
output : tensor
A batch of distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
Notes
------
- IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.
"""
pre = tf.cast(output > threshold, dtype=tf.float32)
truth = tf.cast(target > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND
union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR
# old axis=[0,1,2,3]
# epsilon = 1e-5
# batch_iou = inse / (union + epsilon)
# new haodong
batch_iou = (inse + smooth) / (union + smooth)
iou = tf.reduce_mean(batch_iou, name='iou_coe')
return iou |
def main():
"""Handles external calling for this module
Execute this python module and provide the args shown below to
external call this module to send email messages!
:return: None
"""
log = logging.getLogger(mod_logger + '.main')
parser = argparse.ArgumentParser(description='This module allows sending email messages.')
parser.add_argument('-f', '--file', help='Full path to a plain text file', required=False)
parser.add_argument('-s', '--sender', help='Email address of the sender', required=False)
parser.add_argument('-r', '--recipient', help='Email address of the recipient', required=False)
args = parser.parse_args()
am = AssetMailer()
err = None
if args.file:
try:
am.send_text_file(text_file=args.file, sender=args.sender, recipient=args.recipient)
except AssetMailerError:
_, ex, trace = sys.exc_info()
err = '{n}: There was a problem sending email with file {f} from sender {s} to recipient {r}:\n{e}'.format(
n=ex.__class__.__name__, f=args.file, s=args.sender, r=args.recipient, e=str(ex))
log.error(err)
else:
try:
am.send_cons3rt_agent_logs()
except AssetMailerError:
_, ex, trace = sys.exc_info()
err = '{n}: There was a problem sending cons3rt agent log files:\n{e}'.format(
n=ex.__class__.__name__, e=str(ex))
log.error(err)
if err is None:
log.info('Successfully send email') | Handles external calling for this module
Execute this python module and provide the args shown below to
external call this module to send email messages!
:return: None | Below is the the instruction that describes the task:
### Input:
Handles external calling for this module
Execute this python module and provide the args shown below to
external call this module to send email messages!
:return: None
### Response:
def main():
"""Handles external calling for this module
Execute this python module and provide the args shown below to
external call this module to send email messages!
:return: None
"""
log = logging.getLogger(mod_logger + '.main')
parser = argparse.ArgumentParser(description='This module allows sending email messages.')
parser.add_argument('-f', '--file', help='Full path to a plain text file', required=False)
parser.add_argument('-s', '--sender', help='Email address of the sender', required=False)
parser.add_argument('-r', '--recipient', help='Email address of the recipient', required=False)
args = parser.parse_args()
am = AssetMailer()
err = None
if args.file:
try:
am.send_text_file(text_file=args.file, sender=args.sender, recipient=args.recipient)
except AssetMailerError:
_, ex, trace = sys.exc_info()
err = '{n}: There was a problem sending email with file {f} from sender {s} to recipient {r}:\n{e}'.format(
n=ex.__class__.__name__, f=args.file, s=args.sender, r=args.recipient, e=str(ex))
log.error(err)
else:
try:
am.send_cons3rt_agent_logs()
except AssetMailerError:
_, ex, trace = sys.exc_info()
err = '{n}: There was a problem sending cons3rt agent log files:\n{e}'.format(
n=ex.__class__.__name__, e=str(ex))
log.error(err)
if err is None:
log.info('Successfully send email') |
def split_window(self, attach=False, vertical=True, start_directory=None):
"""
Split window at pane and return newly created :class:`Pane`.
Parameters
----------
attach : bool, optional
Attach / select pane after creation.
vertical : bool, optional
split vertically
start_directory : str, optional
specifies the working directory in which the new pane is created.
Returns
-------
:class:`Pane`
"""
return self.window.split_window(
target=self.get('pane_id'),
start_directory=start_directory,
attach=attach,
vertical=vertical,
) | Split window at pane and return newly created :class:`Pane`.
Parameters
----------
attach : bool, optional
Attach / select pane after creation.
vertical : bool, optional
split vertically
start_directory : str, optional
specifies the working directory in which the new pane is created.
Returns
-------
:class:`Pane` | Below is the the instruction that describes the task:
### Input:
Split window at pane and return newly created :class:`Pane`.
Parameters
----------
attach : bool, optional
Attach / select pane after creation.
vertical : bool, optional
split vertically
start_directory : str, optional
specifies the working directory in which the new pane is created.
Returns
-------
:class:`Pane`
### Response:
def split_window(self, attach=False, vertical=True, start_directory=None):
"""
Split window at pane and return newly created :class:`Pane`.
Parameters
----------
attach : bool, optional
Attach / select pane after creation.
vertical : bool, optional
split vertically
start_directory : str, optional
specifies the working directory in which the new pane is created.
Returns
-------
:class:`Pane`
"""
return self.window.split_window(
target=self.get('pane_id'),
start_directory=start_directory,
attach=attach,
vertical=vertical,
) |
def load_module(self, filename):
'''Load a benchmark module from file'''
if not isinstance(filename, string_types):
return filename
basename = os.path.splitext(os.path.basename(filename))[0]
basename = basename.replace('.bench', '')
modulename = 'benchmarks.{0}'.format(basename)
return load_module(modulename, filename) | Load a benchmark module from file | Below is the the instruction that describes the task:
### Input:
Load a benchmark module from file
### Response:
def load_module(self, filename):
'''Load a benchmark module from file'''
if not isinstance(filename, string_types):
return filename
basename = os.path.splitext(os.path.basename(filename))[0]
basename = basename.replace('.bench', '')
modulename = 'benchmarks.{0}'.format(basename)
return load_module(modulename, filename) |
def loaded_instruments(self) -> Dict[str, Optional['InstrumentContext']]:
""" Get the instruments that have been loaded into the protocol.
:returns: A dict mapping mount names in lowercase to the instrument
in that mount, or `None` if no instrument is present.
"""
return {mount.name.lower(): instr for mount, instr
in self._instruments.items()} | Get the instruments that have been loaded into the protocol.
:returns: A dict mapping mount names in lowercase to the instrument
in that mount, or `None` if no instrument is present. | Below is the the instruction that describes the task:
### Input:
Get the instruments that have been loaded into the protocol.
:returns: A dict mapping mount names in lowercase to the instrument
in that mount, or `None` if no instrument is present.
### Response:
def loaded_instruments(self) -> Dict[str, Optional['InstrumentContext']]:
""" Get the instruments that have been loaded into the protocol.
:returns: A dict mapping mount names in lowercase to the instrument
in that mount, or `None` if no instrument is present.
"""
return {mount.name.lower(): instr for mount, instr
in self._instruments.items()} |
def numenta(self, X, Y):
r"""
Method that updates the network's connections.
Numenta's classic update:
- Visible to hidden: $ \Delta W_{ij} = y_i \cdot ( \varepsilon_{\small{+}} \ x_j - \varepsilon_{\small{-}} \ \bar x_j ) $
- Bias: $ b_{i} = B_b \cdot \alpha_i $
"""
batchSize = len(X)
W = self.connections.visible_to_hidden
n, m = W.shape
bias = self.connections.hidden_bias
incr = self.weight_incr
decr = self.weight_decr
boost_bias = self.boost_strength_bias
alpha = self.average_activity
#---------------------------
# visible-to-hidden updates
#---------------------------
for i in range(batchSize):
y = Y[i]
x = X[i]
x_bar = np.ones(m) - x
# Hebbian-like update
W[ np.where(y == 1)[0] ] += incr*x - decr*x_bar
# Clip the visible-to-hidden connections
# to be between $0$ and $1$
tooSmall = np.where(W < 0.)
tooBig = np.where(W > 1.)
W[ tooSmall ] = 0.
W[ tooBig ] = 1.
#---------------
# (Hidden) Bias
#---------------
for i in range(n):
bias[i] = boost_bias * alpha[i,i] | r"""
Method that updates the network's connections.
Numenta's classic update:
- Visible to hidden: $ \Delta W_{ij} = y_i \cdot ( \varepsilon_{\small{+}} \ x_j - \varepsilon_{\small{-}} \ \bar x_j ) $
- Bias: $ b_{i} = B_b \cdot \alpha_i $ | Below is the the instruction that describes the task:
### Input:
r"""
Method that updates the network's connections.
Numenta's classic update:
- Visible to hidden: $ \Delta W_{ij} = y_i \cdot ( \varepsilon_{\small{+}} \ x_j - \varepsilon_{\small{-}} \ \bar x_j ) $
- Bias: $ b_{i} = B_b \cdot \alpha_i $
### Response:
def numenta(self, X, Y):
r"""
Method that updates the network's connections.
Numenta's classic update:
- Visible to hidden: $ \Delta W_{ij} = y_i \cdot ( \varepsilon_{\small{+}} \ x_j - \varepsilon_{\small{-}} \ \bar x_j ) $
- Bias: $ b_{i} = B_b \cdot \alpha_i $
"""
batchSize = len(X)
W = self.connections.visible_to_hidden
n, m = W.shape
bias = self.connections.hidden_bias
incr = self.weight_incr
decr = self.weight_decr
boost_bias = self.boost_strength_bias
alpha = self.average_activity
#---------------------------
# visible-to-hidden updates
#---------------------------
for i in range(batchSize):
y = Y[i]
x = X[i]
x_bar = np.ones(m) - x
# Hebbian-like update
W[ np.where(y == 1)[0] ] += incr*x - decr*x_bar
# Clip the visible-to-hidden connections
# to be between $0$ and $1$
tooSmall = np.where(W < 0.)
tooBig = np.where(W > 1.)
W[ tooSmall ] = 0.
W[ tooBig ] = 1.
#---------------
# (Hidden) Bias
#---------------
for i in range(n):
bias[i] = boost_bias * alpha[i,i] |
def filter_dict_by_key(d, keys):
"""Filter the dict *d* to remove keys not in *keys*."""
return {k: v for k, v in d.items() if k in keys} | Filter the dict *d* to remove keys not in *keys*. | Below is the the instruction that describes the task:
### Input:
Filter the dict *d* to remove keys not in *keys*.
### Response:
def filter_dict_by_key(d, keys):
"""Filter the dict *d* to remove keys not in *keys*."""
return {k: v for k, v in d.items() if k in keys} |
def get_composers(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='composers'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(['composers'] + list(args))
return self.get_music_library_information(*args, **kwargs) | Convenience method for `get_music_library_information`
with ``search_type='composers'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_. | Below is the the instruction that describes the task:
### Input:
Convenience method for `get_music_library_information`
with ``search_type='composers'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
### Response:
def get_composers(self, *args, **kwargs):
"""Convenience method for `get_music_library_information`
with ``search_type='composers'``. For details of other arguments,
see `that method
<#soco.music_library.MusicLibrary.get_music_library_information>`_.
"""
args = tuple(['composers'] + list(args))
return self.get_music_library_information(*args, **kwargs) |
def set_(key, value, profile=None):
'''
Set a key/value pair in the vault service
'''
if '?' in key:
__utils__['versions.warn_until'](
'Neon',
(
'Using ? to seperate between the path and key for vault has been deprecated '
'and will be removed in {version}. Please just use a /.'
),
)
path, key = key.split('?')
else:
path, key = key.rsplit('/', 1)
try:
url = 'v1/{0}'.format(path)
data = {key: value}
response = __utils__['vault.make_request'](
'POST',
url,
profile,
json=data)
if response.status_code != 204:
response.raise_for_status()
return True
except Exception as e:
log.error('Failed to write secret! %s: %s', type(e).__name__, e)
raise salt.exceptions.CommandExecutionError(e) | Set a key/value pair in the vault service | Below is the the instruction that describes the task:
### Input:
Set a key/value pair in the vault service
### Response:
def set_(key, value, profile=None):
'''
Set a key/value pair in the vault service
'''
if '?' in key:
__utils__['versions.warn_until'](
'Neon',
(
'Using ? to seperate between the path and key for vault has been deprecated '
'and will be removed in {version}. Please just use a /.'
),
)
path, key = key.split('?')
else:
path, key = key.rsplit('/', 1)
try:
url = 'v1/{0}'.format(path)
data = {key: value}
response = __utils__['vault.make_request'](
'POST',
url,
profile,
json=data)
if response.status_code != 204:
response.raise_for_status()
return True
except Exception as e:
log.error('Failed to write secret! %s: %s', type(e).__name__, e)
raise salt.exceptions.CommandExecutionError(e) |
def return_dat(self, chan, begsam, endsam):
"""Return the data as 2D numpy.ndarray.
Parameters
----------
chan : list of int
index (indices) of the channels to read
begsam : int
index of the first sample (inclusively)
endsam : int
index of the last sample (exclusively)
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples.
"""
#n_sam = self.hdr[4]
interval = endsam - begsam
dat = empty((len(chan), interval))
#beg_block = floor((begsam / n_sam) * n_block)
#end_block = floor((endsam / n_sam) * n_block)
for i, chan in enumerate(chan):
k = 0
with open(self.chan_files[chan], 'rt') as f:
f.readline()
for j, datum in enumerate(f):
if begsam <= j + 1 < endsam:
dat[i, k] = float64(datum)
k += 1
if k == interval:
break
# calibration
phys_range = self.phys_max - self.phys_min
dig_range = self.dig_max - self.dig_min
gain = phys_range / dig_range
dat *= gain
return dat | Return the data as 2D numpy.ndarray.
Parameters
----------
chan : list of int
index (indices) of the channels to read
begsam : int
index of the first sample (inclusively)
endsam : int
index of the last sample (exclusively)
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples. | Below is the the instruction that describes the task:
### Input:
Return the data as 2D numpy.ndarray.
Parameters
----------
chan : list of int
index (indices) of the channels to read
begsam : int
index of the first sample (inclusively)
endsam : int
index of the last sample (exclusively)
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples.
### Response:
def return_dat(self, chan, begsam, endsam):
"""Return the data as 2D numpy.ndarray.
Parameters
----------
chan : list of int
index (indices) of the channels to read
begsam : int
index of the first sample (inclusively)
endsam : int
index of the last sample (exclusively)
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples.
"""
#n_sam = self.hdr[4]
interval = endsam - begsam
dat = empty((len(chan), interval))
#beg_block = floor((begsam / n_sam) * n_block)
#end_block = floor((endsam / n_sam) * n_block)
for i, chan in enumerate(chan):
k = 0
with open(self.chan_files[chan], 'rt') as f:
f.readline()
for j, datum in enumerate(f):
if begsam <= j + 1 < endsam:
dat[i, k] = float64(datum)
k += 1
if k == interval:
break
# calibration
phys_range = self.phys_max - self.phys_min
dig_range = self.dig_max - self.dig_min
gain = phys_range / dig_range
dat *= gain
return dat |
def file_fingerprint(fullpath):
""" Get a metadata fingerprint for a file """
stat = os.stat(fullpath)
return ','.join([str(value) for value in [stat.st_ino, stat.st_mtime, stat.st_size] if value]) | Get a metadata fingerprint for a file | Below is the the instruction that describes the task:
### Input:
Get a metadata fingerprint for a file
### Response:
def file_fingerprint(fullpath):
""" Get a metadata fingerprint for a file """
stat = os.stat(fullpath)
return ','.join([str(value) for value in [stat.st_ino, stat.st_mtime, stat.st_size] if value]) |
def initWithComplexQuery(query):
"""
create a query using a complex article query
"""
q = QueryArticles()
# provided an instance of ComplexArticleQuery
if isinstance(query, ComplexArticleQuery):
q._setVal("query", json.dumps(query.getQuery()))
# provided query as a string containing the json object
elif isinstance(query, six.string_types):
foo = json.loads(query)
q._setVal("query", query)
# provided query as a python dict
elif isinstance(query, dict):
q._setVal("query", json.dumps(query))
else:
assert False, "The instance of query parameter was not a ComplexArticleQuery, a string or a python dict"
return q | create a query using a complex article query | Below is the the instruction that describes the task:
### Input:
create a query using a complex article query
### Response:
def initWithComplexQuery(query):
"""
create a query using a complex article query
"""
q = QueryArticles()
# provided an instance of ComplexArticleQuery
if isinstance(query, ComplexArticleQuery):
q._setVal("query", json.dumps(query.getQuery()))
# provided query as a string containing the json object
elif isinstance(query, six.string_types):
foo = json.loads(query)
q._setVal("query", query)
# provided query as a python dict
elif isinstance(query, dict):
q._setVal("query", json.dumps(query))
else:
assert False, "The instance of query parameter was not a ComplexArticleQuery, a string or a python dict"
return q |
def p_arg1(p):
"""
arg1 : STRING
| NUMBER
| IDENT
| GLOBAL
"""
# a hack to support "clear global"
p[0] = node.string(value=str(p[1]), lineno=p.lineno(1), lexpos=p.lexpos(1)) | arg1 : STRING
| NUMBER
| IDENT
| GLOBAL | Below is the the instruction that describes the task:
### Input:
arg1 : STRING
| NUMBER
| IDENT
| GLOBAL
### Response:
def p_arg1(p):
"""
arg1 : STRING
| NUMBER
| IDENT
| GLOBAL
"""
# a hack to support "clear global"
p[0] = node.string(value=str(p[1]), lineno=p.lineno(1), lexpos=p.lexpos(1)) |
def reference(self, ):
"""Reference a file
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.reference(tfi) | Reference a file
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Reference a file
:returns: None
:rtype: None
:raises: None
### Response:
def reference(self, ):
"""Reference a file
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.reference(tfi) |
def spatial_slice_zeros(x):
"""Experimental summary that shows how many planes are unused for a batch."""
return tf.cast(tf.reduce_all(tf.less_equal(x, 0.0), [0, 1, 2]),
tf.float32) | Experimental summary that shows how many planes are unused for a batch. | Below is the the instruction that describes the task:
### Input:
Experimental summary that shows how many planes are unused for a batch.
### Response:
def spatial_slice_zeros(x):
"""Experimental summary that shows how many planes are unused for a batch."""
return tf.cast(tf.reduce_all(tf.less_equal(x, 0.0), [0, 1, 2]),
tf.float32) |
def vT(self,*args,**kwargs):
"""
NAME:
vT
PURPOSE:
return tangential velocity at time t
INPUT:
t - (optional) time at which to get the tangential velocity
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
vT(t)
HISTORY:
2010-09-21 - Written - Bovy (NYU)
"""
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet: return thiso[2]
else: return thiso[2,:] | NAME:
vT
PURPOSE:
return tangential velocity at time t
INPUT:
t - (optional) time at which to get the tangential velocity
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
vT(t)
HISTORY:
2010-09-21 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
vT
PURPOSE:
return tangential velocity at time t
INPUT:
t - (optional) time at which to get the tangential velocity
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
vT(t)
HISTORY:
2010-09-21 - Written - Bovy (NYU)
### Response:
def vT(self,*args,**kwargs):
"""
NAME:
vT
PURPOSE:
return tangential velocity at time t
INPUT:
t - (optional) time at which to get the tangential velocity
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
vT(t)
HISTORY:
2010-09-21 - Written - Bovy (NYU)
"""
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet: return thiso[2]
else: return thiso[2,:] |
def setup(self):
"""
Called by pytest to setup the collector cells in .
Here we start a kernel and setup the sanitize patterns.
"""
if self.parent.config.option.current_env:
kernel_name = CURRENT_ENV_KERNEL_NAME
else:
kernel_name = self.nb.metadata.get(
'kernelspec', {}).get('name', 'python')
self.kernel = RunningKernel(kernel_name, str(self.fspath.dirname))
self.setup_sanitize_files()
if getattr(self.parent.config.option, 'cov_source', None):
setup_coverage(self.parent.config, self.kernel, getattr(self, "fspath", None)) | Called by pytest to setup the collector cells in .
Here we start a kernel and setup the sanitize patterns. | Below is the the instruction that describes the task:
### Input:
Called by pytest to setup the collector cells in .
Here we start a kernel and setup the sanitize patterns.
### Response:
def setup(self):
"""
Called by pytest to setup the collector cells in .
Here we start a kernel and setup the sanitize patterns.
"""
if self.parent.config.option.current_env:
kernel_name = CURRENT_ENV_KERNEL_NAME
else:
kernel_name = self.nb.metadata.get(
'kernelspec', {}).get('name', 'python')
self.kernel = RunningKernel(kernel_name, str(self.fspath.dirname))
self.setup_sanitize_files()
if getattr(self.parent.config.option, 'cov_source', None):
setup_coverage(self.parent.config, self.kernel, getattr(self, "fspath", None)) |
def add_element(self, name, ns_uri=None, attributes=None,
text=None, before_this_element=False):
"""
Add a new child element to this element, with an optional namespace
definition. If no namespace is provided the child will be assigned
to the default namespace.
:param string name: a name for the child node. The name may be used
to apply a namespace to the child by including:
- a prefix component in the name of the form
``ns_prefix:element_name``, where the prefix has already been
defined for a namespace URI (such as via :meth:`set_ns_prefix`).
- a literal namespace URI value delimited by curly braces, of
the form ``{ns_uri}element_name``.
:param ns_uri: a URI specifying the new element's namespace. If the
``name`` parameter specifies a namespace this parameter is ignored.
:type ns_uri: string or None
:param attributes: collection of attributes to assign to the new child.
:type attributes: dict, list, tuple, or None
:param text: text value to assign to the new child.
:type text: string or None
:param bool before_this_element: if *True* the new element is
added as a sibling preceding this element, instead of as a child.
In other words, the new element will be a child of this element's
parent node, and will immediately precent this element in the DOM.
:return: the new child as a an :class:`Element` node.
"""
# Determine local name, namespace and prefix info from tag name
prefix, local_name, node_ns_uri = \
self.adapter.get_ns_info_from_node_name(name, self.impl_node)
if prefix:
qname = u'%s:%s' % (prefix, local_name)
else:
qname = local_name
# If no name-derived namespace, apply an alternate namespace
if node_ns_uri is None:
if ns_uri is None:
# Default document namespace
node_ns_uri = self.adapter.get_ns_uri_for_prefix(
self.impl_node, None)
else:
# keyword-parameter namespace
node_ns_uri = ns_uri
# Create element
child_elem = self.adapter.new_impl_element(
qname, node_ns_uri, parent=self.impl_node)
# If element's default namespace was defined by literal uri prefix,
# create corresponding xmlns attribute for element...
if not prefix and '}' in name:
self._set_element_attributes(child_elem,
{'xmlns': node_ns_uri}, ns_uri=self.XMLNS_URI)
# ...otherwise define keyword-defined namespace as the default, if any
elif ns_uri is not None:
self._set_element_attributes(child_elem,
{'xmlns': ns_uri}, ns_uri=self.XMLNS_URI)
# Create subordinate nodes
if attributes is not None:
self._set_element_attributes(child_elem, attr_obj=attributes)
if text is not None:
self._add_text(child_elem, text)
# Add new element to its parent before a given node...
if before_this_element:
self.adapter.add_node_child(
self.adapter.get_node_parent(self.impl_node),
child_elem, before_sibling=self.impl_node)
# ...or in the default position, appended after existing nodes
else:
self.adapter.add_node_child(self.impl_node, child_elem)
return self.adapter.wrap_node(
child_elem, self.adapter.impl_document, self.adapter) | Add a new child element to this element, with an optional namespace
definition. If no namespace is provided the child will be assigned
to the default namespace.
:param string name: a name for the child node. The name may be used
to apply a namespace to the child by including:
- a prefix component in the name of the form
``ns_prefix:element_name``, where the prefix has already been
defined for a namespace URI (such as via :meth:`set_ns_prefix`).
- a literal namespace URI value delimited by curly braces, of
the form ``{ns_uri}element_name``.
:param ns_uri: a URI specifying the new element's namespace. If the
``name`` parameter specifies a namespace this parameter is ignored.
:type ns_uri: string or None
:param attributes: collection of attributes to assign to the new child.
:type attributes: dict, list, tuple, or None
:param text: text value to assign to the new child.
:type text: string or None
:param bool before_this_element: if *True* the new element is
added as a sibling preceding this element, instead of as a child.
In other words, the new element will be a child of this element's
parent node, and will immediately precent this element in the DOM.
:return: the new child as a an :class:`Element` node. | Below is the the instruction that describes the task:
### Input:
Add a new child element to this element, with an optional namespace
definition. If no namespace is provided the child will be assigned
to the default namespace.
:param string name: a name for the child node. The name may be used
to apply a namespace to the child by including:
- a prefix component in the name of the form
``ns_prefix:element_name``, where the prefix has already been
defined for a namespace URI (such as via :meth:`set_ns_prefix`).
- a literal namespace URI value delimited by curly braces, of
the form ``{ns_uri}element_name``.
:param ns_uri: a URI specifying the new element's namespace. If the
``name`` parameter specifies a namespace this parameter is ignored.
:type ns_uri: string or None
:param attributes: collection of attributes to assign to the new child.
:type attributes: dict, list, tuple, or None
:param text: text value to assign to the new child.
:type text: string or None
:param bool before_this_element: if *True* the new element is
added as a sibling preceding this element, instead of as a child.
In other words, the new element will be a child of this element's
parent node, and will immediately precent this element in the DOM.
:return: the new child as a an :class:`Element` node.
### Response:
def add_element(self, name, ns_uri=None, attributes=None,
text=None, before_this_element=False):
"""
Add a new child element to this element, with an optional namespace
definition. If no namespace is provided the child will be assigned
to the default namespace.
:param string name: a name for the child node. The name may be used
to apply a namespace to the child by including:
- a prefix component in the name of the form
``ns_prefix:element_name``, where the prefix has already been
defined for a namespace URI (such as via :meth:`set_ns_prefix`).
- a literal namespace URI value delimited by curly braces, of
the form ``{ns_uri}element_name``.
:param ns_uri: a URI specifying the new element's namespace. If the
``name`` parameter specifies a namespace this parameter is ignored.
:type ns_uri: string or None
:param attributes: collection of attributes to assign to the new child.
:type attributes: dict, list, tuple, or None
:param text: text value to assign to the new child.
:type text: string or None
:param bool before_this_element: if *True* the new element is
added as a sibling preceding this element, instead of as a child.
In other words, the new element will be a child of this element's
parent node, and will immediately precent this element in the DOM.
:return: the new child as a an :class:`Element` node.
"""
# Determine local name, namespace and prefix info from tag name
prefix, local_name, node_ns_uri = \
self.adapter.get_ns_info_from_node_name(name, self.impl_node)
if prefix:
qname = u'%s:%s' % (prefix, local_name)
else:
qname = local_name
# If no name-derived namespace, apply an alternate namespace
if node_ns_uri is None:
if ns_uri is None:
# Default document namespace
node_ns_uri = self.adapter.get_ns_uri_for_prefix(
self.impl_node, None)
else:
# keyword-parameter namespace
node_ns_uri = ns_uri
# Create element
child_elem = self.adapter.new_impl_element(
qname, node_ns_uri, parent=self.impl_node)
# If element's default namespace was defined by literal uri prefix,
# create corresponding xmlns attribute for element...
if not prefix and '}' in name:
self._set_element_attributes(child_elem,
{'xmlns': node_ns_uri}, ns_uri=self.XMLNS_URI)
# ...otherwise define keyword-defined namespace as the default, if any
elif ns_uri is not None:
self._set_element_attributes(child_elem,
{'xmlns': ns_uri}, ns_uri=self.XMLNS_URI)
# Create subordinate nodes
if attributes is not None:
self._set_element_attributes(child_elem, attr_obj=attributes)
if text is not None:
self._add_text(child_elem, text)
# Add new element to its parent before a given node...
if before_this_element:
self.adapter.add_node_child(
self.adapter.get_node_parent(self.impl_node),
child_elem, before_sibling=self.impl_node)
# ...or in the default position, appended after existing nodes
else:
self.adapter.add_node_child(self.impl_node, child_elem)
return self.adapter.wrap_node(
child_elem, self.adapter.impl_document, self.adapter) |
def plain_text_iter(fname: str, text_type: str, data_side: str) -> Iterable[str]:
"""
Extract plain text from file as iterable. Also take steps to ensure that
whitespace characters (including unicode newlines) are normalized and
outputs are line-parallel with inputs considering ASCII newlines only.
:param fname: Path of possibly gzipped input file.
:param text_type: One of TEXT_*, indicating data format.
:param data_side: DATA_SRC or DATA_TRG.
"""
if text_type in (TEXT_UTF8_RAW, TEXT_UTF8_TOKENIZED):
with third_party.bin_open(fname) as inp:
for line in inp:
line = re.sub(r"\s", " ", line.decode("utf-8"))
yield line.strip()
elif text_type == TEXT_UTF8_RAW_SGML:
with third_party.bin_open(fname) as inp:
for line in inp:
line = re.sub(r"\s", " ", line.decode("utf-8"))
if line.startswith("<seg "):
# Extract segment text
text = re.sub(r"<seg.*?>(.*)</seg>.*?", "\\1", line)
text = re.sub(r"\s+", " ", text.strip())
# Unescape XML entities
text = text.replace(""", "\"")
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("&", "&")
yield text
elif text_type in (TEXT_UTF8_RAW_BITEXT, TEXT_UTF8_RAW_BITEXT_REVERSE):
# Select source or target field, reversing if needed
if text_type == TEXT_UTF8_RAW_BITEXT:
field_id = 0 if data_side == DATA_SRC else 1
else:
field_id = 1 if data_side == DATA_SRC else 0
with third_party.bin_open(fname) as inp:
for line in inp:
line = re.sub(r"\s", " ", line.decode("utf-8"))
fields = line.split("|||")
yield fields[field_id].strip()
else:
raise RuntimeError("Unknown text type: %s" % text_type) | Extract plain text from file as iterable. Also take steps to ensure that
whitespace characters (including unicode newlines) are normalized and
outputs are line-parallel with inputs considering ASCII newlines only.
:param fname: Path of possibly gzipped input file.
:param text_type: One of TEXT_*, indicating data format.
:param data_side: DATA_SRC or DATA_TRG. | Below is the the instruction that describes the task:
### Input:
Extract plain text from file as iterable. Also take steps to ensure that
whitespace characters (including unicode newlines) are normalized and
outputs are line-parallel with inputs considering ASCII newlines only.
:param fname: Path of possibly gzipped input file.
:param text_type: One of TEXT_*, indicating data format.
:param data_side: DATA_SRC or DATA_TRG.
### Response:
def plain_text_iter(fname: str, text_type: str, data_side: str) -> Iterable[str]:
"""
Extract plain text from file as iterable. Also take steps to ensure that
whitespace characters (including unicode newlines) are normalized and
outputs are line-parallel with inputs considering ASCII newlines only.
:param fname: Path of possibly gzipped input file.
:param text_type: One of TEXT_*, indicating data format.
:param data_side: DATA_SRC or DATA_TRG.
"""
if text_type in (TEXT_UTF8_RAW, TEXT_UTF8_TOKENIZED):
with third_party.bin_open(fname) as inp:
for line in inp:
line = re.sub(r"\s", " ", line.decode("utf-8"))
yield line.strip()
elif text_type == TEXT_UTF8_RAW_SGML:
with third_party.bin_open(fname) as inp:
for line in inp:
line = re.sub(r"\s", " ", line.decode("utf-8"))
if line.startswith("<seg "):
# Extract segment text
text = re.sub(r"<seg.*?>(.*)</seg>.*?", "\\1", line)
text = re.sub(r"\s+", " ", text.strip())
# Unescape XML entities
text = text.replace(""", "\"")
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("&", "&")
yield text
elif text_type in (TEXT_UTF8_RAW_BITEXT, TEXT_UTF8_RAW_BITEXT_REVERSE):
# Select source or target field, reversing if needed
if text_type == TEXT_UTF8_RAW_BITEXT:
field_id = 0 if data_side == DATA_SRC else 1
else:
field_id = 1 if data_side == DATA_SRC else 0
with third_party.bin_open(fname) as inp:
for line in inp:
line = re.sub(r"\s", " ", line.decode("utf-8"))
fields = line.split("|||")
yield fields[field_id].strip()
else:
raise RuntimeError("Unknown text type: %s" % text_type) |
def in_distance(self, distance, fields, points, annotate='_ed_distance'):
"""Filter rows inside a circunference of radius distance `distance`
:param distance: max distance to allow
:param fields: `tuple` with the fields to filter (latitude, longitude)
:param points: center of the circunference (latitude, longitude)
:param annotate: name where the distance will be annotated
"""
clone = self._clone()
return clone.annotate(
**{annotate: EarthDistance([
LlToEarth(fields), LlToEarth(points)])
}).filter(**{'{0}__lte'.format(annotate): distance}) | Filter rows inside a circunference of radius distance `distance`
:param distance: max distance to allow
:param fields: `tuple` with the fields to filter (latitude, longitude)
:param points: center of the circunference (latitude, longitude)
:param annotate: name where the distance will be annotated | Below is the the instruction that describes the task:
### Input:
Filter rows inside a circunference of radius distance `distance`
:param distance: max distance to allow
:param fields: `tuple` with the fields to filter (latitude, longitude)
:param points: center of the circunference (latitude, longitude)
:param annotate: name where the distance will be annotated
### Response:
def in_distance(self, distance, fields, points, annotate='_ed_distance'):
"""Filter rows inside a circunference of radius distance `distance`
:param distance: max distance to allow
:param fields: `tuple` with the fields to filter (latitude, longitude)
:param points: center of the circunference (latitude, longitude)
:param annotate: name where the distance will be annotated
"""
clone = self._clone()
return clone.annotate(
**{annotate: EarthDistance([
LlToEarth(fields), LlToEarth(points)])
}).filter(**{'{0}__lte'.format(annotate): distance}) |
def lzwdecode(data):
"""
>>> lzwdecode(b'\x80\x0b\x60\x50\x22\x0c\x0c\x85\x01')
'\x2d\x2d\x2d\x2d\x2d\x41\x2d\x2d\x2d\x42'
"""
fp = BytesIO(data)
return b''.join(LZWDecoder(fp).run()) | >>> lzwdecode(b'\x80\x0b\x60\x50\x22\x0c\x0c\x85\x01')
'\x2d\x2d\x2d\x2d\x2d\x41\x2d\x2d\x2d\x42' | Below is the the instruction that describes the task:
### Input:
>>> lzwdecode(b'\x80\x0b\x60\x50\x22\x0c\x0c\x85\x01')
'\x2d\x2d\x2d\x2d\x2d\x41\x2d\x2d\x2d\x42'
### Response:
def lzwdecode(data):
"""
>>> lzwdecode(b'\x80\x0b\x60\x50\x22\x0c\x0c\x85\x01')
'\x2d\x2d\x2d\x2d\x2d\x41\x2d\x2d\x2d\x42'
"""
fp = BytesIO(data)
return b''.join(LZWDecoder(fp).run()) |
def get_gallery_folder_list(self, scope='content/read'):
"""
Retrieve a list of the Mxit user's gallery folders
User authentication required with the following scope: 'content/read'
"""
folder_list = _get(
token=self.oauth.get_user_token(scope),
uri='/user/media'
)
try:
return json.loads(folder_list)
except:
raise MxitAPIException('Error parsing gallery folder list') | Retrieve a list of the Mxit user's gallery folders
User authentication required with the following scope: 'content/read' | Below is the the instruction that describes the task:
### Input:
Retrieve a list of the Mxit user's gallery folders
User authentication required with the following scope: 'content/read'
### Response:
def get_gallery_folder_list(self, scope='content/read'):
"""
Retrieve a list of the Mxit user's gallery folders
User authentication required with the following scope: 'content/read'
"""
folder_list = _get(
token=self.oauth.get_user_token(scope),
uri='/user/media'
)
try:
return json.loads(folder_list)
except:
raise MxitAPIException('Error parsing gallery folder list') |
def constraint_range_dict(self,*args,**kwargs):
"""
Creates a list of dictionaries which each give a constraint for a certain
section of the dimension.
bins arguments overwrites resolution
"""
bins = self.bins(*args,**kwargs)
return [{self.name+'__gte': a,self.name+'__lt': b} for a,b in zip(bins[:-1],bins[1:])]
space = self.space(*args,**kwargs)
resolution = space[1] - space[0]
return [{self.name+'__gte': s,self.name+'__lt': s+resolution} for s in space] | Creates a list of dictionaries which each give a constraint for a certain
section of the dimension.
bins arguments overwrites resolution | Below is the the instruction that describes the task:
### Input:
Creates a list of dictionaries which each give a constraint for a certain
section of the dimension.
bins arguments overwrites resolution
### Response:
def constraint_range_dict(self,*args,**kwargs):
"""
Creates a list of dictionaries which each give a constraint for a certain
section of the dimension.
bins arguments overwrites resolution
"""
bins = self.bins(*args,**kwargs)
return [{self.name+'__gte': a,self.name+'__lt': b} for a,b in zip(bins[:-1],bins[1:])]
space = self.space(*args,**kwargs)
resolution = space[1] - space[0]
return [{self.name+'__gte': s,self.name+'__lt': s+resolution} for s in space] |
def add_noise(data, sigma=1.0, noise_type='gauss'):
r"""Add noise to data
This method adds Gaussian or Poisson noise to the input data
Parameters
----------
data : np.ndarray, list or tuple
Input data array
sigma : float or list, optional
Standard deviation of the noise to be added ('gauss' only)
noise_type : str {'gauss', 'poisson'}
Type of noise to be added (default is 'gauss')
Returns
-------
np.ndarray input data with added noise
Raises
------
ValueError
If `noise_type` is not 'gauss' or 'poisson'
ValueError
If number of `sigma` values does not match the first dimension of the
input data
Examples
--------
>>> import numpy as np
>>> from modopt.signal.noise import add_noise
>>> x = np.arange(9).reshape(3, 3).astype(float)
>>> x
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.random.seed(1)
>>> add_noise(x, noise_type='poisson')
array([[ 0., 2., 2.],
[ 4., 5., 10.],
[ 11., 15., 18.]])
>>> import numpy as np
>>> from modopt.signal.noise import add_noise
>>> x = np.zeros(5)
>>> x
array([ 0., 0., 0., 0., 0.])
>>> np.random.seed(1)
>>> add_noise(x, sigma=2.0)
array([ 3.24869073, -1.22351283, -1.0563435 , -2.14593724, 1.73081526])
"""
data = np.array(data)
if noise_type not in ('gauss', 'poisson'):
raise ValueError('Invalid noise type. Options are "gauss" or'
'"poisson"')
if isinstance(sigma, (list, tuple, np.ndarray)):
if len(sigma) != data.shape[0]:
raise ValueError('Number of sigma values must match first '
'dimension of input data')
if noise_type is 'gauss':
random = np.random.randn(*data.shape)
elif noise_type is 'poisson':
random = np.random.poisson(np.abs(data))
if isinstance(sigma, (int, float)):
return data + sigma * random
else:
return data + np.array([s * r for s, r in zip(sigma, random)]) | r"""Add noise to data
This method adds Gaussian or Poisson noise to the input data
Parameters
----------
data : np.ndarray, list or tuple
Input data array
sigma : float or list, optional
Standard deviation of the noise to be added ('gauss' only)
noise_type : str {'gauss', 'poisson'}
Type of noise to be added (default is 'gauss')
Returns
-------
np.ndarray input data with added noise
Raises
------
ValueError
If `noise_type` is not 'gauss' or 'poisson'
ValueError
If number of `sigma` values does not match the first dimension of the
input data
Examples
--------
>>> import numpy as np
>>> from modopt.signal.noise import add_noise
>>> x = np.arange(9).reshape(3, 3).astype(float)
>>> x
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.random.seed(1)
>>> add_noise(x, noise_type='poisson')
array([[ 0., 2., 2.],
[ 4., 5., 10.],
[ 11., 15., 18.]])
>>> import numpy as np
>>> from modopt.signal.noise import add_noise
>>> x = np.zeros(5)
>>> x
array([ 0., 0., 0., 0., 0.])
>>> np.random.seed(1)
>>> add_noise(x, sigma=2.0)
array([ 3.24869073, -1.22351283, -1.0563435 , -2.14593724, 1.73081526]) | Below is the the instruction that describes the task:
### Input:
r"""Add noise to data
This method adds Gaussian or Poisson noise to the input data
Parameters
----------
data : np.ndarray, list or tuple
Input data array
sigma : float or list, optional
Standard deviation of the noise to be added ('gauss' only)
noise_type : str {'gauss', 'poisson'}
Type of noise to be added (default is 'gauss')
Returns
-------
np.ndarray input data with added noise
Raises
------
ValueError
If `noise_type` is not 'gauss' or 'poisson'
ValueError
If number of `sigma` values does not match the first dimension of the
input data
Examples
--------
>>> import numpy as np
>>> from modopt.signal.noise import add_noise
>>> x = np.arange(9).reshape(3, 3).astype(float)
>>> x
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.random.seed(1)
>>> add_noise(x, noise_type='poisson')
array([[ 0., 2., 2.],
[ 4., 5., 10.],
[ 11., 15., 18.]])
>>> import numpy as np
>>> from modopt.signal.noise import add_noise
>>> x = np.zeros(5)
>>> x
array([ 0., 0., 0., 0., 0.])
>>> np.random.seed(1)
>>> add_noise(x, sigma=2.0)
array([ 3.24869073, -1.22351283, -1.0563435 , -2.14593724, 1.73081526])
### Response:
def add_noise(data, sigma=1.0, noise_type='gauss'):
r"""Add noise to data
This method adds Gaussian or Poisson noise to the input data
Parameters
----------
data : np.ndarray, list or tuple
Input data array
sigma : float or list, optional
Standard deviation of the noise to be added ('gauss' only)
noise_type : str {'gauss', 'poisson'}
Type of noise to be added (default is 'gauss')
Returns
-------
np.ndarray input data with added noise
Raises
------
ValueError
If `noise_type` is not 'gauss' or 'poisson'
ValueError
If number of `sigma` values does not match the first dimension of the
input data
Examples
--------
>>> import numpy as np
>>> from modopt.signal.noise import add_noise
>>> x = np.arange(9).reshape(3, 3).astype(float)
>>> x
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.random.seed(1)
>>> add_noise(x, noise_type='poisson')
array([[ 0., 2., 2.],
[ 4., 5., 10.],
[ 11., 15., 18.]])
>>> import numpy as np
>>> from modopt.signal.noise import add_noise
>>> x = np.zeros(5)
>>> x
array([ 0., 0., 0., 0., 0.])
>>> np.random.seed(1)
>>> add_noise(x, sigma=2.0)
array([ 3.24869073, -1.22351283, -1.0563435 , -2.14593724, 1.73081526])
"""
data = np.array(data)
if noise_type not in ('gauss', 'poisson'):
raise ValueError('Invalid noise type. Options are "gauss" or'
'"poisson"')
if isinstance(sigma, (list, tuple, np.ndarray)):
if len(sigma) != data.shape[0]:
raise ValueError('Number of sigma values must match first '
'dimension of input data')
if noise_type is 'gauss':
random = np.random.randn(*data.shape)
elif noise_type is 'poisson':
random = np.random.poisson(np.abs(data))
if isinstance(sigma, (int, float)):
return data + sigma * random
else:
return data + np.array([s * r for s, r in zip(sigma, random)]) |
def dispatch_command(function, *args, **kwargs):
"""
A wrapper for :func:`dispatch` that creates a one-command parser.
Uses :attr:`PARSER_FORMATTER`.
This::
dispatch_command(foo)
...is a shortcut for::
parser = ArgumentParser()
set_default_command(parser, foo)
dispatch(parser)
This function can be also used as a decorator.
"""
parser = argparse.ArgumentParser(formatter_class=PARSER_FORMATTER)
set_default_command(parser, function)
dispatch(parser, *args, **kwargs) | A wrapper for :func:`dispatch` that creates a one-command parser.
Uses :attr:`PARSER_FORMATTER`.
This::
dispatch_command(foo)
...is a shortcut for::
parser = ArgumentParser()
set_default_command(parser, foo)
dispatch(parser)
This function can be also used as a decorator. | Below is the the instruction that describes the task:
### Input:
A wrapper for :func:`dispatch` that creates a one-command parser.
Uses :attr:`PARSER_FORMATTER`.
This::
dispatch_command(foo)
...is a shortcut for::
parser = ArgumentParser()
set_default_command(parser, foo)
dispatch(parser)
This function can be also used as a decorator.
### Response:
def dispatch_command(function, *args, **kwargs):
"""
A wrapper for :func:`dispatch` that creates a one-command parser.
Uses :attr:`PARSER_FORMATTER`.
This::
dispatch_command(foo)
...is a shortcut for::
parser = ArgumentParser()
set_default_command(parser, foo)
dispatch(parser)
This function can be also used as a decorator.
"""
parser = argparse.ArgumentParser(formatter_class=PARSER_FORMATTER)
set_default_command(parser, function)
dispatch(parser, *args, **kwargs) |
def set_ipv4(self, ip):
""" Sets the IP address (ifr_addr) of the device
parameter 'ip' should be string representation of IP address
This does the same as ifconfig.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bin_ip = socket.inet_aton(ip)
ifreq = struct.pack('16sH2s4s8s', self.name, socket.AF_INET, '\x00'*2, bin_ip, '\x00'*8)
fcntl.ioctl(sock, self.SIOCSIFADDR, ifreq)
ifreq = struct.pack('16sH', self.name, self.IFF_UP|self.IFF_POINTOPOINT|self.IFF_RUNNING|self.IFF_MULTICAST)
fcntl.ioctl(sock, self.SIOCSIFFLAGS, ifreq) | Sets the IP address (ifr_addr) of the device
parameter 'ip' should be string representation of IP address
This does the same as ifconfig. | Below is the the instruction that describes the task:
### Input:
Sets the IP address (ifr_addr) of the device
parameter 'ip' should be string representation of IP address
This does the same as ifconfig.
### Response:
def set_ipv4(self, ip):
""" Sets the IP address (ifr_addr) of the device
parameter 'ip' should be string representation of IP address
This does the same as ifconfig.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bin_ip = socket.inet_aton(ip)
ifreq = struct.pack('16sH2s4s8s', self.name, socket.AF_INET, '\x00'*2, bin_ip, '\x00'*8)
fcntl.ioctl(sock, self.SIOCSIFADDR, ifreq)
ifreq = struct.pack('16sH', self.name, self.IFF_UP|self.IFF_POINTOPOINT|self.IFF_RUNNING|self.IFF_MULTICAST)
fcntl.ioctl(sock, self.SIOCSIFFLAGS, ifreq) |
def masked_pick_probability(x, y, temp, cos_distance):
"""The pairwise sampling probabilities for the elements of x for neighbor
points which share labels.
:param x: a matrix
:param y: a list of labels for each element of x
:param temp: Temperature
:cos_distance: Boolean for using cosine or Euclidean distance
:returns: A tensor for the pairwise sampling probabilities.
"""
return SNNLCrossEntropy.pick_probability(x, temp, cos_distance) * \
SNNLCrossEntropy.same_label_mask(y, y) | The pairwise sampling probabilities for the elements of x for neighbor
points which share labels.
:param x: a matrix
:param y: a list of labels for each element of x
:param temp: Temperature
:cos_distance: Boolean for using cosine or Euclidean distance
:returns: A tensor for the pairwise sampling probabilities. | Below is the the instruction that describes the task:
### Input:
The pairwise sampling probabilities for the elements of x for neighbor
points which share labels.
:param x: a matrix
:param y: a list of labels for each element of x
:param temp: Temperature
:cos_distance: Boolean for using cosine or Euclidean distance
:returns: A tensor for the pairwise sampling probabilities.
### Response:
def masked_pick_probability(x, y, temp, cos_distance):
"""The pairwise sampling probabilities for the elements of x for neighbor
points which share labels.
:param x: a matrix
:param y: a list of labels for each element of x
:param temp: Temperature
:cos_distance: Boolean for using cosine or Euclidean distance
:returns: A tensor for the pairwise sampling probabilities.
"""
return SNNLCrossEntropy.pick_probability(x, temp, cos_distance) * \
SNNLCrossEntropy.same_label_mask(y, y) |
def register(
self,
app: 'Quart',
first_registration: bool,
*,
url_prefix: Optional[str]=None,
) -> None:
"""Register this blueprint on the app given."""
state = self.make_setup_state(app, first_registration, url_prefix=url_prefix)
if self.has_static_folder:
state.add_url_rule(
self.static_url_path + '/<path:filename>',
view_func=self.send_static_file, endpoint='static',
)
for func in self.deferred_functions:
func(state) | Register this blueprint on the app given. | Below is the the instruction that describes the task:
### Input:
Register this blueprint on the app given.
### Response:
def register(
self,
app: 'Quart',
first_registration: bool,
*,
url_prefix: Optional[str]=None,
) -> None:
"""Register this blueprint on the app given."""
state = self.make_setup_state(app, first_registration, url_prefix=url_prefix)
if self.has_static_folder:
state.add_url_rule(
self.static_url_path + '/<path:filename>',
view_func=self.send_static_file, endpoint='static',
)
for func in self.deferred_functions:
func(state) |
def to_iterable(value, allow_none=True):
"""
Tries to convert the given value to an iterable, if necessary.
If the given value is a list, a list is returned; if it is a string, a list
containing one string is returned, ...
:param value: Any object
:param allow_none: If True, the method returns None if value is None, else
it returns an empty list
:return: A list containing the given string, or the given value
"""
if value is None:
# None given
if allow_none:
return None
return []
elif isinstance(value, (list, tuple, set, frozenset)):
# Iterable given, return it as-is
return value
# Return a one-value list
return [value] | Tries to convert the given value to an iterable, if necessary.
If the given value is a list, a list is returned; if it is a string, a list
containing one string is returned, ...
:param value: Any object
:param allow_none: If True, the method returns None if value is None, else
it returns an empty list
:return: A list containing the given string, or the given value | Below is the the instruction that describes the task:
### Input:
Tries to convert the given value to an iterable, if necessary.
If the given value is a list, a list is returned; if it is a string, a list
containing one string is returned, ...
:param value: Any object
:param allow_none: If True, the method returns None if value is None, else
it returns an empty list
:return: A list containing the given string, or the given value
### Response:
def to_iterable(value, allow_none=True):
"""
Tries to convert the given value to an iterable, if necessary.
If the given value is a list, a list is returned; if it is a string, a list
containing one string is returned, ...
:param value: Any object
:param allow_none: If True, the method returns None if value is None, else
it returns an empty list
:return: A list containing the given string, or the given value
"""
if value is None:
# None given
if allow_none:
return None
return []
elif isinstance(value, (list, tuple, set, frozenset)):
# Iterable given, return it as-is
return value
# Return a one-value list
return [value] |
def create_cluster(cluster_dict, datacenter=None, cluster=None,
service_instance=None):
'''
Creates a cluster.
Note: cluster_dict['name'] will be overridden by the cluster param value
config_dict
Dictionary with the config values of the new cluster.
datacenter
Name of datacenter containing the cluster.
Ignored if already contained by proxy details.
Default value is None.
cluster
Name of cluster.
Ignored if already contained by proxy details.
Default value is None.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
# esxdatacenter proxy
salt '*' vsphere.create_cluster cluster_dict=$cluster_dict cluster=cl1
# esxcluster proxy
salt '*' vsphere.create_cluster cluster_dict=$cluster_dict
'''
# Validate cluster dictionary
schema = ESXClusterConfigSchema.serialize()
try:
jsonschema.validate(cluster_dict, schema)
except jsonschema.exceptions.ValidationError as exc:
raise InvalidConfigError(exc)
# Get required details from the proxy
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
if not cluster:
raise ArgumentValueError('\'cluster\' needs to be specified')
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
cluster = __salt__['esxcluster.get_details']()['cluster']
if cluster_dict.get('vsan') and not \
salt.utils.vsan.vsan_supported(service_instance):
raise VMwareApiError('VSAN operations are not supported')
si = service_instance
cluster_spec = vim.ClusterConfigSpecEx()
vsan_spec = None
ha_config = None
vsan_61 = None
if cluster_dict.get('vsan'):
# XXX The correct way of retrieving the VSAN data (on the if branch)
# is not supported before 60u2 vcenter
vcenter_info = salt.utils.vmware.get_service_info(si)
if float(vcenter_info.apiVersion) >= 6.0 and \
int(vcenter_info.build) >= 3634794: # 60u2
vsan_spec = vim.vsan.ReconfigSpec(modify=True)
vsan_61 = False
# We need to keep HA disabled and enable it afterwards
if cluster_dict.get('ha', {}).get('enabled'):
enable_ha = True
ha_config = cluster_dict['ha']
del cluster_dict['ha']
else:
vsan_61 = True
# If VSAN is 6.1 the configuration of VSAN happens when configuring the
# cluster via the regular endpoint
_apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec, vsan_61)
salt.utils.vmware.create_cluster(dc_ref, cluster, cluster_spec)
if not vsan_61:
# Only available after VSAN 61
if vsan_spec:
cluster_ref = salt.utils.vmware.get_cluster(dc_ref, cluster)
salt.utils.vsan.reconfigure_cluster_vsan(cluster_ref, vsan_spec)
if enable_ha:
# Set HA after VSAN has been configured
_apply_cluster_dict(cluster_spec, {'ha': ha_config})
salt.utils.vmware.update_cluster(cluster_ref, cluster_spec)
# Set HA back on the object
cluster_dict['ha'] = ha_config
return {'create_cluster': True} | Creates a cluster.
Note: cluster_dict['name'] will be overridden by the cluster param value
config_dict
Dictionary with the config values of the new cluster.
datacenter
Name of datacenter containing the cluster.
Ignored if already contained by proxy details.
Default value is None.
cluster
Name of cluster.
Ignored if already contained by proxy details.
Default value is None.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
# esxdatacenter proxy
salt '*' vsphere.create_cluster cluster_dict=$cluster_dict cluster=cl1
# esxcluster proxy
salt '*' vsphere.create_cluster cluster_dict=$cluster_dict | Below is the the instruction that describes the task:
### Input:
Creates a cluster.
Note: cluster_dict['name'] will be overridden by the cluster param value
config_dict
Dictionary with the config values of the new cluster.
datacenter
Name of datacenter containing the cluster.
Ignored if already contained by proxy details.
Default value is None.
cluster
Name of cluster.
Ignored if already contained by proxy details.
Default value is None.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
# esxdatacenter proxy
salt '*' vsphere.create_cluster cluster_dict=$cluster_dict cluster=cl1
# esxcluster proxy
salt '*' vsphere.create_cluster cluster_dict=$cluster_dict
### Response:
def create_cluster(cluster_dict, datacenter=None, cluster=None,
service_instance=None):
'''
Creates a cluster.
Note: cluster_dict['name'] will be overridden by the cluster param value
config_dict
Dictionary with the config values of the new cluster.
datacenter
Name of datacenter containing the cluster.
Ignored if already contained by proxy details.
Default value is None.
cluster
Name of cluster.
Ignored if already contained by proxy details.
Default value is None.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
# esxdatacenter proxy
salt '*' vsphere.create_cluster cluster_dict=$cluster_dict cluster=cl1
# esxcluster proxy
salt '*' vsphere.create_cluster cluster_dict=$cluster_dict
'''
# Validate cluster dictionary
schema = ESXClusterConfigSchema.serialize()
try:
jsonschema.validate(cluster_dict, schema)
except jsonschema.exceptions.ValidationError as exc:
raise InvalidConfigError(exc)
# Get required details from the proxy
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
if not cluster:
raise ArgumentValueError('\'cluster\' needs to be specified')
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
cluster = __salt__['esxcluster.get_details']()['cluster']
if cluster_dict.get('vsan') and not \
salt.utils.vsan.vsan_supported(service_instance):
raise VMwareApiError('VSAN operations are not supported')
si = service_instance
cluster_spec = vim.ClusterConfigSpecEx()
vsan_spec = None
ha_config = None
vsan_61 = None
if cluster_dict.get('vsan'):
# XXX The correct way of retrieving the VSAN data (on the if branch)
# is not supported before 60u2 vcenter
vcenter_info = salt.utils.vmware.get_service_info(si)
if float(vcenter_info.apiVersion) >= 6.0 and \
int(vcenter_info.build) >= 3634794: # 60u2
vsan_spec = vim.vsan.ReconfigSpec(modify=True)
vsan_61 = False
# We need to keep HA disabled and enable it afterwards
if cluster_dict.get('ha', {}).get('enabled'):
enable_ha = True
ha_config = cluster_dict['ha']
del cluster_dict['ha']
else:
vsan_61 = True
# If VSAN is 6.1 the configuration of VSAN happens when configuring the
# cluster via the regular endpoint
_apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec, vsan_61)
salt.utils.vmware.create_cluster(dc_ref, cluster, cluster_spec)
if not vsan_61:
# Only available after VSAN 61
if vsan_spec:
cluster_ref = salt.utils.vmware.get_cluster(dc_ref, cluster)
salt.utils.vsan.reconfigure_cluster_vsan(cluster_ref, vsan_spec)
if enable_ha:
# Set HA after VSAN has been configured
_apply_cluster_dict(cluster_spec, {'ha': ha_config})
salt.utils.vmware.update_cluster(cluster_ref, cluster_spec)
# Set HA back on the object
cluster_dict['ha'] = ha_config
return {'create_cluster': True} |
def delete_queue(self, queue_name):
"""
Delete a queue with the specified name.
:param queue_name:
:return:
"""
self.connect()
channel = self.connection.channel()
channel.queue_delete(queue=queue_name)
self.close() | Delete a queue with the specified name.
:param queue_name:
:return: | Below is the the instruction that describes the task:
### Input:
Delete a queue with the specified name.
:param queue_name:
:return:
### Response:
def delete_queue(self, queue_name):
"""
Delete a queue with the specified name.
:param queue_name:
:return:
"""
self.connect()
channel = self.connection.channel()
channel.queue_delete(queue=queue_name)
self.close() |
def MWColorMapping(maptype='jet', reverse=True):
"""Maps amino-acid molecular weights to colors. Otherwise, this
function is identical to *KyteDoolittleColorMapping*
"""
d = {'A':89,'R':174,'N':132,'D':133,'C':121,'Q':146,'E':147,\
'G':75,'H':155,'I':131,'L':131,'K':146,'M':149,'F':165,\
'P':115,'S':105,'T':119,'W':204,'Y':181,'V':117}
aas = sorted(AA_TO_INDEX.keys())
mws = [d[aa] for aa in aas]
if reverse:
mws = [-1 * x for x in mws]
mapper = pylab.cm.ScalarMappable(cmap=maptype)
mapper.set_clim(min(mws), max(mws))
mapping_d = {'*':'#000000'}
for (aa, h) in zip(aas, mws):
tup = mapper.to_rgba(h, bytes=True)
(red, green, blue, alpha) = tup
mapping_d[aa] = '#%02x%02x%02x' % (red, green, blue)
assert len(mapping_d[aa]) == 7
cmap = mapper.get_cmap()
return (cmap, mapping_d, mapper) | Maps amino-acid molecular weights to colors. Otherwise, this
function is identical to *KyteDoolittleColorMapping* | Below is the the instruction that describes the task:
### Input:
Maps amino-acid molecular weights to colors. Otherwise, this
function is identical to *KyteDoolittleColorMapping*
### Response:
def MWColorMapping(maptype='jet', reverse=True):
"""Maps amino-acid molecular weights to colors. Otherwise, this
function is identical to *KyteDoolittleColorMapping*
"""
d = {'A':89,'R':174,'N':132,'D':133,'C':121,'Q':146,'E':147,\
'G':75,'H':155,'I':131,'L':131,'K':146,'M':149,'F':165,\
'P':115,'S':105,'T':119,'W':204,'Y':181,'V':117}
aas = sorted(AA_TO_INDEX.keys())
mws = [d[aa] for aa in aas]
if reverse:
mws = [-1 * x for x in mws]
mapper = pylab.cm.ScalarMappable(cmap=maptype)
mapper.set_clim(min(mws), max(mws))
mapping_d = {'*':'#000000'}
for (aa, h) in zip(aas, mws):
tup = mapper.to_rgba(h, bytes=True)
(red, green, blue, alpha) = tup
mapping_d[aa] = '#%02x%02x%02x' % (red, green, blue)
assert len(mapping_d[aa]) == 7
cmap = mapper.get_cmap()
return (cmap, mapping_d, mapper) |
def to_html(self):
"""Render a Cell MessageElement as html
:returns: The html representation of the Cell MessageElement
:rtype: basestring
"""
# Apply bootstrap alignment classes first
if self.align is 'left':
if self.style_class is None:
self.style_class = 'text-left'
else:
self.style_class += ' text-left'
elif self.align is 'right':
if self.style_class is None:
self.style_class = 'text-right'
else:
self.style_class += ' text-right'
elif self.align is 'center':
if self.style_class is None:
self.style_class = 'text-center'
else:
self.style_class += ' text-center'
# Special case for when we want to put a nested table in a cell
# We don't use isinstance because of recursive imports with table
class_name = self.content.__class__.__name__
if class_name in ['BulletedList', 'Table', 'Image', 'Message']:
html = self.content.to_html()
else:
html = self.content.to_html(wrap_slash=self.wrap_slash)
# Check if we have a header or not then render
if self.header_flag is True:
return '<th%s colspan=%i>%s</th>\n' % (
self.html_attributes(), self.span, html)
else:
return '<td%s colspan=%i>%s</td>\n' % (
self.html_attributes(), self.span, html) | Render a Cell MessageElement as html
:returns: The html representation of the Cell MessageElement
:rtype: basestring | Below is the the instruction that describes the task:
### Input:
Render a Cell MessageElement as html
:returns: The html representation of the Cell MessageElement
:rtype: basestring
### Response:
def to_html(self):
"""Render a Cell MessageElement as html
:returns: The html representation of the Cell MessageElement
:rtype: basestring
"""
# Apply bootstrap alignment classes first
if self.align is 'left':
if self.style_class is None:
self.style_class = 'text-left'
else:
self.style_class += ' text-left'
elif self.align is 'right':
if self.style_class is None:
self.style_class = 'text-right'
else:
self.style_class += ' text-right'
elif self.align is 'center':
if self.style_class is None:
self.style_class = 'text-center'
else:
self.style_class += ' text-center'
# Special case for when we want to put a nested table in a cell
# We don't use isinstance because of recursive imports with table
class_name = self.content.__class__.__name__
if class_name in ['BulletedList', 'Table', 'Image', 'Message']:
html = self.content.to_html()
else:
html = self.content.to_html(wrap_slash=self.wrap_slash)
# Check if we have a header or not then render
if self.header_flag is True:
return '<th%s colspan=%i>%s</th>\n' % (
self.html_attributes(), self.span, html)
else:
return '<td%s colspan=%i>%s</td>\n' % (
self.html_attributes(), self.span, html) |
def eda_EventRelated(epoch, event_length, window_post=4):
"""
Extract event-related EDA and Skin Conductance Response (SCR).
Parameters
----------
epoch : pandas.DataFrame
An epoch contains in the epochs dict returned by :function:`neurokit.create_epochs()` on dataframe returned by :function:`neurokit.bio_process()`. Index must range from -4s to +4s (relatively to event onset and end).
event_length : int
Event's length in seconds.
window_post : float
Post-stimulus window size (in seconds) to include eventual responses (usually 3 or 4).
Returns
----------
EDA_Response : dict
Event-related EDA response features.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=1000, add=data["Photosensor"])
>>> df = bio["df"]
>>> events = nk.find_events(df["Photosensor"], cut="lower")
>>> epochs = nk.create_epochs(df, events["onsets"], duration=7, onset=-0.5)
>>> for epoch in epochs:
>>> bio_response = nk.bio_EventRelated(epoch, event_length=4, window_post=3)
Notes
----------
**Looking for help**: *Experimental*: respiration artifacts correction needs to be implemented.
*Details*
- **EDA_Peak**: Max of EDA (in a window starting 1s after the stim onset) minus baseline.
- **SCR_Amplitude**: Peak of SCR. If no SCR, returns NA.
- **SCR_Magnitude**: Peak of SCR. If no SCR, returns 0.
- **SCR_Amplitude_Log**: log of 1+amplitude.
- **SCR_Magnitude_Log**: log of 1+magnitude.
- **SCR_PeakTime**: Time of peak.
- **SCR_Latency**: Time between stim onset and SCR onset.
- **SCR_RiseTime**: Time between SCR onset and peak.
- **SCR_Strength**: *Experimental*: peak divided by latency. Angle of the line between peak and onset.
- **SCR_RecoveryTime**: Time between peak and recovery point (half of the amplitude).
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- pandas
*See Also*
- https://www.biopac.com/wp-content/uploads/EDA-SCR-Analysis.pdf
References
-----------
- Schneider, R., Schmidt, S., Binder, M., Schäfer, F., & Walach, H. (2003). Respiration-related artifacts in EDA recordings: introducing a standardized method to overcome multiple interpretations. Psychological reports, 93(3), 907-920.
- Leiner, D., Fahr, A., & Früh, H. (2012). EDA positive change: A simple algorithm for electrodermal activity to measure general audience arousal during media exposure. Communication Methods and Measures, 6(4), 237-250.
"""
# Initialization
EDA_Response = {}
window_end = event_length + window_post
# Sanity check
if epoch.index[-1]-event_length < 1:
print("NeuroKit Warning: eda_EventRelated(): your epoch only lasts for about %.2f s post stimulus. You might lose some SCRs." %(epoch.index[-1]-event_length))
# EDA Based
# =================
# This is a basic and bad model
if "EDA_Filtered" in epoch.columns:
baseline = epoch["EDA_Filtered"][0:1].min()
eda_peak = epoch["EDA_Filtered"][1:window_end].max()
EDA_Response["EDA_Peak"] = eda_peak - baseline
# SCR Based
# =================
if "SCR_Onsets" in epoch.columns:
# Computation
peak_onset = epoch["SCR_Onsets"][1:window_end].idxmax()
if pd.notnull(peak_onset):
amplitude = epoch["SCR_Peaks"][peak_onset:window_end].max()
peak_time = epoch["SCR_Peaks"][peak_onset:window_end].idxmax()
if pd.isnull(amplitude):
magnitude = 0
else:
magnitude = amplitude
risetime = peak_time - peak_onset
if risetime > 0:
strength = magnitude/risetime
else:
strength = np.nan
if pd.isnull(peak_time) is False:
recovery = epoch["SCR_Recoveries"][peak_time:window_end].idxmax() - peak_time
else:
recovery = np.nan
else:
amplitude = np.nan
magnitude = 0
risetime = np.nan
strength = np.nan
peak_time = np.nan
recovery = np.nan
# Storage
EDA_Response["SCR_Amplitude"] = amplitude
EDA_Response["SCR_Magnitude"] = magnitude
EDA_Response["SCR_Amplitude_Log"] = np.log(1+amplitude)
EDA_Response["SCR_Magnitude_Log"] = np.log(1+magnitude)
EDA_Response["SCR_Latency"] = peak_onset
EDA_Response["SCR_PeakTime"] = peak_time
EDA_Response["SCR_RiseTime"] = risetime
EDA_Response["SCR_Strength"] = strength # Experimental
EDA_Response["SCR_RecoveryTime"] = recovery
# Artifact Correction
# ====================
# TODO !!
# Respiration artifacts
# if "RSP_Filtered" in epoch.columns:
# pass # I Dunno, maybe with granger causality or something?
return(EDA_Response) | Extract event-related EDA and Skin Conductance Response (SCR).
Parameters
----------
epoch : pandas.DataFrame
An epoch contains in the epochs dict returned by :function:`neurokit.create_epochs()` on dataframe returned by :function:`neurokit.bio_process()`. Index must range from -4s to +4s (relatively to event onset and end).
event_length : int
Event's length in seconds.
window_post : float
Post-stimulus window size (in seconds) to include eventual responses (usually 3 or 4).
Returns
----------
EDA_Response : dict
Event-related EDA response features.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=1000, add=data["Photosensor"])
>>> df = bio["df"]
>>> events = nk.find_events(df["Photosensor"], cut="lower")
>>> epochs = nk.create_epochs(df, events["onsets"], duration=7, onset=-0.5)
>>> for epoch in epochs:
>>> bio_response = nk.bio_EventRelated(epoch, event_length=4, window_post=3)
Notes
----------
**Looking for help**: *Experimental*: respiration artifacts correction needs to be implemented.
*Details*
- **EDA_Peak**: Max of EDA (in a window starting 1s after the stim onset) minus baseline.
- **SCR_Amplitude**: Peak of SCR. If no SCR, returns NA.
- **SCR_Magnitude**: Peak of SCR. If no SCR, returns 0.
- **SCR_Amplitude_Log**: log of 1+amplitude.
- **SCR_Magnitude_Log**: log of 1+magnitude.
- **SCR_PeakTime**: Time of peak.
- **SCR_Latency**: Time between stim onset and SCR onset.
- **SCR_RiseTime**: Time between SCR onset and peak.
- **SCR_Strength**: *Experimental*: peak divided by latency. Angle of the line between peak and onset.
- **SCR_RecoveryTime**: Time between peak and recovery point (half of the amplitude).
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- pandas
*See Also*
- https://www.biopac.com/wp-content/uploads/EDA-SCR-Analysis.pdf
References
-----------
- Schneider, R., Schmidt, S., Binder, M., Schäfer, F., & Walach, H. (2003). Respiration-related artifacts in EDA recordings: introducing a standardized method to overcome multiple interpretations. Psychological reports, 93(3), 907-920.
- Leiner, D., Fahr, A., & Früh, H. (2012). EDA positive change: A simple algorithm for electrodermal activity to measure general audience arousal during media exposure. Communication Methods and Measures, 6(4), 237-250. | Below is the the instruction that describes the task:
### Input:
Extract event-related EDA and Skin Conductance Response (SCR).
Parameters
----------
epoch : pandas.DataFrame
An epoch contains in the epochs dict returned by :function:`neurokit.create_epochs()` on dataframe returned by :function:`neurokit.bio_process()`. Index must range from -4s to +4s (relatively to event onset and end).
event_length : int
Event's length in seconds.
window_post : float
Post-stimulus window size (in seconds) to include eventual responses (usually 3 or 4).
Returns
----------
EDA_Response : dict
Event-related EDA response features.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=1000, add=data["Photosensor"])
>>> df = bio["df"]
>>> events = nk.find_events(df["Photosensor"], cut="lower")
>>> epochs = nk.create_epochs(df, events["onsets"], duration=7, onset=-0.5)
>>> for epoch in epochs:
>>> bio_response = nk.bio_EventRelated(epoch, event_length=4, window_post=3)
Notes
----------
**Looking for help**: *Experimental*: respiration artifacts correction needs to be implemented.
*Details*
- **EDA_Peak**: Max of EDA (in a window starting 1s after the stim onset) minus baseline.
- **SCR_Amplitude**: Peak of SCR. If no SCR, returns NA.
- **SCR_Magnitude**: Peak of SCR. If no SCR, returns 0.
- **SCR_Amplitude_Log**: log of 1+amplitude.
- **SCR_Magnitude_Log**: log of 1+magnitude.
- **SCR_PeakTime**: Time of peak.
- **SCR_Latency**: Time between stim onset and SCR onset.
- **SCR_RiseTime**: Time between SCR onset and peak.
- **SCR_Strength**: *Experimental*: peak divided by latency. Angle of the line between peak and onset.
- **SCR_RecoveryTime**: Time between peak and recovery point (half of the amplitude).
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- pandas
*See Also*
- https://www.biopac.com/wp-content/uploads/EDA-SCR-Analysis.pdf
References
-----------
- Schneider, R., Schmidt, S., Binder, M., Schäfer, F., & Walach, H. (2003). Respiration-related artifacts in EDA recordings: introducing a standardized method to overcome multiple interpretations. Psychological reports, 93(3), 907-920.
- Leiner, D., Fahr, A., & Früh, H. (2012). EDA positive change: A simple algorithm for electrodermal activity to measure general audience arousal during media exposure. Communication Methods and Measures, 6(4), 237-250.
### Response:
def eda_EventRelated(epoch, event_length, window_post=4):
"""
Extract event-related EDA and Skin Conductance Response (SCR).
Parameters
----------
epoch : pandas.DataFrame
An epoch contains in the epochs dict returned by :function:`neurokit.create_epochs()` on dataframe returned by :function:`neurokit.bio_process()`. Index must range from -4s to +4s (relatively to event onset and end).
event_length : int
Event's length in seconds.
window_post : float
Post-stimulus window size (in seconds) to include eventual responses (usually 3 or 4).
Returns
----------
EDA_Response : dict
Event-related EDA response features.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=1000, add=data["Photosensor"])
>>> df = bio["df"]
>>> events = nk.find_events(df["Photosensor"], cut="lower")
>>> epochs = nk.create_epochs(df, events["onsets"], duration=7, onset=-0.5)
>>> for epoch in epochs:
>>> bio_response = nk.bio_EventRelated(epoch, event_length=4, window_post=3)
Notes
----------
**Looking for help**: *Experimental*: respiration artifacts correction needs to be implemented.
*Details*
- **EDA_Peak**: Max of EDA (in a window starting 1s after the stim onset) minus baseline.
- **SCR_Amplitude**: Peak of SCR. If no SCR, returns NA.
- **SCR_Magnitude**: Peak of SCR. If no SCR, returns 0.
- **SCR_Amplitude_Log**: log of 1+amplitude.
- **SCR_Magnitude_Log**: log of 1+magnitude.
- **SCR_PeakTime**: Time of peak.
- **SCR_Latency**: Time between stim onset and SCR onset.
- **SCR_RiseTime**: Time between SCR onset and peak.
- **SCR_Strength**: *Experimental*: peak divided by latency. Angle of the line between peak and onset.
- **SCR_RecoveryTime**: Time between peak and recovery point (half of the amplitude).
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- pandas
*See Also*
- https://www.biopac.com/wp-content/uploads/EDA-SCR-Analysis.pdf
References
-----------
- Schneider, R., Schmidt, S., Binder, M., Schäfer, F., & Walach, H. (2003). Respiration-related artifacts in EDA recordings: introducing a standardized method to overcome multiple interpretations. Psychological reports, 93(3), 907-920.
- Leiner, D., Fahr, A., & Früh, H. (2012). EDA positive change: A simple algorithm for electrodermal activity to measure general audience arousal during media exposure. Communication Methods and Measures, 6(4), 237-250.
"""
# Initialization
EDA_Response = {}
window_end = event_length + window_post
# Sanity check
if epoch.index[-1]-event_length < 1:
print("NeuroKit Warning: eda_EventRelated(): your epoch only lasts for about %.2f s post stimulus. You might lose some SCRs." %(epoch.index[-1]-event_length))
# EDA Based
# =================
# This is a basic and bad model
if "EDA_Filtered" in epoch.columns:
baseline = epoch["EDA_Filtered"][0:1].min()
eda_peak = epoch["EDA_Filtered"][1:window_end].max()
EDA_Response["EDA_Peak"] = eda_peak - baseline
# SCR Based
# =================
if "SCR_Onsets" in epoch.columns:
# Computation
peak_onset = epoch["SCR_Onsets"][1:window_end].idxmax()
if pd.notnull(peak_onset):
amplitude = epoch["SCR_Peaks"][peak_onset:window_end].max()
peak_time = epoch["SCR_Peaks"][peak_onset:window_end].idxmax()
if pd.isnull(amplitude):
magnitude = 0
else:
magnitude = amplitude
risetime = peak_time - peak_onset
if risetime > 0:
strength = magnitude/risetime
else:
strength = np.nan
if pd.isnull(peak_time) is False:
recovery = epoch["SCR_Recoveries"][peak_time:window_end].idxmax() - peak_time
else:
recovery = np.nan
else:
amplitude = np.nan
magnitude = 0
risetime = np.nan
strength = np.nan
peak_time = np.nan
recovery = np.nan
# Storage
EDA_Response["SCR_Amplitude"] = amplitude
EDA_Response["SCR_Magnitude"] = magnitude
EDA_Response["SCR_Amplitude_Log"] = np.log(1+amplitude)
EDA_Response["SCR_Magnitude_Log"] = np.log(1+magnitude)
EDA_Response["SCR_Latency"] = peak_onset
EDA_Response["SCR_PeakTime"] = peak_time
EDA_Response["SCR_RiseTime"] = risetime
EDA_Response["SCR_Strength"] = strength # Experimental
EDA_Response["SCR_RecoveryTime"] = recovery
# Artifact Correction
# ====================
# TODO !!
# Respiration artifacts
# if "RSP_Filtered" in epoch.columns:
# pass # I Dunno, maybe with granger causality or something?
return(EDA_Response) |
def _get_page_elements(self):
"""Return page elements and page objects of this page object
:returns: list of page elements and page objects
"""
page_elements = []
for attribute, value in list(self.__dict__.items()) + list(self.__class__.__dict__.items()):
if attribute != 'parent' and isinstance(value, CommonObject):
page_elements.append(value)
return page_elements | Return page elements and page objects of this page object
:returns: list of page elements and page objects | Below is the the instruction that describes the task:
### Input:
Return page elements and page objects of this page object
:returns: list of page elements and page objects
### Response:
def _get_page_elements(self):
"""Return page elements and page objects of this page object
:returns: list of page elements and page objects
"""
page_elements = []
for attribute, value in list(self.__dict__.items()) + list(self.__class__.__dict__.items()):
if attribute != 'parent' and isinstance(value, CommonObject):
page_elements.append(value)
return page_elements |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.