code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def register_tc_plugins(self, plugin_name, plugin_class):
"""
Loads a plugin as a dictionary and attaches needed parts to correct areas for testing
parts.
:param plugin_name: Name of the plugins
:param plugin_class: PluginBase
:return: Nothing
"""
if plugin_name in self.registered_plugins:
raise PluginException("Plugin {} already registered! Duplicate "
"plugins?".format(plugin_name))
self.logger.debug("Registering plugin %s", plugin_name)
plugin_class.init(bench=self.bench)
if plugin_class.get_bench_api() is not None:
register_func = self.plugin_types[PluginTypes.BENCH]
register_func(plugin_name, plugin_class)
if plugin_class.get_parsers() is not None:
register_func = self.plugin_types[PluginTypes.PARSER]
register_func(plugin_name, plugin_class)
if plugin_class.get_external_services() is not None:
register_func = self.plugin_types[PluginTypes.EXTSERVICE]
register_func(plugin_name, plugin_class)
self.registered_plugins.append(plugin_name) | Loads a plugin as a dictionary and attaches needed parts to correct areas for testing
parts.
:param plugin_name: Name of the plugins
:param plugin_class: PluginBase
:return: Nothing | Below is the the instruction that describes the task:
### Input:
Loads a plugin as a dictionary and attaches needed parts to correct areas for testing
parts.
:param plugin_name: Name of the plugins
:param plugin_class: PluginBase
:return: Nothing
### Response:
def register_tc_plugins(self, plugin_name, plugin_class):
"""
Loads a plugin as a dictionary and attaches needed parts to correct areas for testing
parts.
:param plugin_name: Name of the plugins
:param plugin_class: PluginBase
:return: Nothing
"""
if plugin_name in self.registered_plugins:
raise PluginException("Plugin {} already registered! Duplicate "
"plugins?".format(plugin_name))
self.logger.debug("Registering plugin %s", plugin_name)
plugin_class.init(bench=self.bench)
if plugin_class.get_bench_api() is not None:
register_func = self.plugin_types[PluginTypes.BENCH]
register_func(plugin_name, plugin_class)
if plugin_class.get_parsers() is not None:
register_func = self.plugin_types[PluginTypes.PARSER]
register_func(plugin_name, plugin_class)
if plugin_class.get_external_services() is not None:
register_func = self.plugin_types[PluginTypes.EXTSERVICE]
register_func(plugin_name, plugin_class)
self.registered_plugins.append(plugin_name) |
def get_log_config(component, handlers, level='DEBUG', path='/var/log/vfine/'):
"""Return a log config for django project."""
config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s][%(threadName)s]' +
'[%(name)s.%(funcName)s():%(lineno)d] %(message)s'
},
'color': {
'()': 'shaw.log.SplitColoredFormatter',
'format': "%(asctime)s " +
"%(log_color)s%(bold)s[%(levelname)s]%(reset)s" +
"[%(threadName)s][%(name)s.%(funcName)s():%(lineno)d] " +
"%(blue)s%(message)s"
}
},
'handlers': {
'debug': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': path + component + '.debug.log',
'maxBytes': 1024 * 1024 * 1024,
'backupCount': 5,
'formatter': 'standard',
},
'color': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': path + component + '.color.log',
'maxBytes': 1024 * 1024 * 1024,
'backupCount': 5,
'formatter': 'color',
},
'info': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': path + component + '.info.log',
'maxBytes': 1024 * 1024 * 1024,
'backupCount': 5,
'formatter': 'standard',
},
'error': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'filename': path + component + '.error.log',
'maxBytes': 1024 * 1024 * 100,
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers': handlers,
'level': 'INFO',
'propagate': False
},
'django.request': {
'handlers': handlers,
'level': 'INFO',
'propagate': False,
},
'': {
'handlers': handlers,
'level': level,
'propagate': False
},
}
}
return config | Return a log config for django project. | Below is the the instruction that describes the task:
### Input:
Return a log config for django project.
### Response:
def get_log_config(component, handlers, level='DEBUG', path='/var/log/vfine/'):
"""Return a log config for django project."""
config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s][%(threadName)s]' +
'[%(name)s.%(funcName)s():%(lineno)d] %(message)s'
},
'color': {
'()': 'shaw.log.SplitColoredFormatter',
'format': "%(asctime)s " +
"%(log_color)s%(bold)s[%(levelname)s]%(reset)s" +
"[%(threadName)s][%(name)s.%(funcName)s():%(lineno)d] " +
"%(blue)s%(message)s"
}
},
'handlers': {
'debug': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': path + component + '.debug.log',
'maxBytes': 1024 * 1024 * 1024,
'backupCount': 5,
'formatter': 'standard',
},
'color': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': path + component + '.color.log',
'maxBytes': 1024 * 1024 * 1024,
'backupCount': 5,
'formatter': 'color',
},
'info': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': path + component + '.info.log',
'maxBytes': 1024 * 1024 * 1024,
'backupCount': 5,
'formatter': 'standard',
},
'error': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'filename': path + component + '.error.log',
'maxBytes': 1024 * 1024 * 100,
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers': handlers,
'level': 'INFO',
'propagate': False
},
'django.request': {
'handlers': handlers,
'level': 'INFO',
'propagate': False,
},
'': {
'handlers': handlers,
'level': level,
'propagate': False
},
}
}
return config |
def _temp_dew_dirint(temp_dew, times):
"""
Calculate precipitable water from surface dew point temp (Perez eqn 4),
or return a default value for use with :py:func:`_dirint_bins`.
"""
if temp_dew is not None:
# Perez eqn 4
w = pd.Series(np.exp(0.07 * temp_dew - 0.075), index=times)
else:
# do not change unless also modifying _dirint_bins
w = pd.Series(-1, index=times)
return w | Calculate precipitable water from surface dew point temp (Perez eqn 4),
or return a default value for use with :py:func:`_dirint_bins`. | Below is the the instruction that describes the task:
### Input:
Calculate precipitable water from surface dew point temp (Perez eqn 4),
or return a default value for use with :py:func:`_dirint_bins`.
### Response:
def _temp_dew_dirint(temp_dew, times):
"""
Calculate precipitable water from surface dew point temp (Perez eqn 4),
or return a default value for use with :py:func:`_dirint_bins`.
"""
if temp_dew is not None:
# Perez eqn 4
w = pd.Series(np.exp(0.07 * temp_dew - 0.075), index=times)
else:
# do not change unless also modifying _dirint_bins
w = pd.Series(-1, index=times)
return w |
def get_alias_table():
"""
Get the current alias table.
"""
try:
alias_table = get_config_parser()
alias_table.read(azext_alias.alias.GLOBAL_ALIAS_PATH)
return alias_table
except Exception: # pylint: disable=broad-except
return get_config_parser() | Get the current alias table. | Below is the the instruction that describes the task:
### Input:
Get the current alias table.
### Response:
def get_alias_table():
"""
Get the current alias table.
"""
try:
alias_table = get_config_parser()
alias_table.read(azext_alias.alias.GLOBAL_ALIAS_PATH)
return alias_table
except Exception: # pylint: disable=broad-except
return get_config_parser() |
def show_minimum_needs_configuration(self):
"""Show the minimum needs dialog."""
# import here only so that it is AFTER i18n set up
from safe.gui.tools.minimum_needs.needs_manager_dialog import (
NeedsManagerDialog)
dialog = NeedsManagerDialog(
parent=self.iface.mainWindow(),
dock=self.dock_widget)
dialog.exec_() | Show the minimum needs dialog. | Below is the the instruction that describes the task:
### Input:
Show the minimum needs dialog.
### Response:
def show_minimum_needs_configuration(self):
"""Show the minimum needs dialog."""
# import here only so that it is AFTER i18n set up
from safe.gui.tools.minimum_needs.needs_manager_dialog import (
NeedsManagerDialog)
dialog = NeedsManagerDialog(
parent=self.iface.mainWindow(),
dock=self.dock_widget)
dialog.exec_() |
def delete(self, path, auth=None, **kwargs):
"""
Manually make a DELETE request.
:param str path: relative url of the request (e.g. `/users/username`)
:param auth.Authentication auth: authentication object
:param kwargs dict: Extra arguments for the request, as supported by the
`requests <http://docs.python-requests.org/>`_ library.
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
return self._check_ok(self._delete(path, auth=auth, **kwargs)) | Manually make a DELETE request.
:param str path: relative url of the request (e.g. `/users/username`)
:param auth.Authentication auth: authentication object
:param kwargs dict: Extra arguments for the request, as supported by the
`requests <http://docs.python-requests.org/>`_ library.
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced | Below is the the instruction that describes the task:
### Input:
Manually make a DELETE request.
:param str path: relative url of the request (e.g. `/users/username`)
:param auth.Authentication auth: authentication object
:param kwargs dict: Extra arguments for the request, as supported by the
`requests <http://docs.python-requests.org/>`_ library.
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
### Response:
def delete(self, path, auth=None, **kwargs):
"""
Manually make a DELETE request.
:param str path: relative url of the request (e.g. `/users/username`)
:param auth.Authentication auth: authentication object
:param kwargs dict: Extra arguments for the request, as supported by the
`requests <http://docs.python-requests.org/>`_ library.
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
return self._check_ok(self._delete(path, auth=auth, **kwargs)) |
def skip_first(pipe, items=1):
''' this is an alias for skip to parallel the dedicated skip_last function
to provide a little more readability to the code. the action of actually
skipping does not occur until the first iteration is done
'''
pipe = iter(pipe)
for i in skip(pipe, items):
yield i | this is an alias for skip to parallel the dedicated skip_last function
to provide a little more readability to the code. the action of actually
skipping does not occur until the first iteration is done | Below is the the instruction that describes the task:
### Input:
this is an alias for skip to parallel the dedicated skip_last function
to provide a little more readability to the code. the action of actually
skipping does not occur until the first iteration is done
### Response:
def skip_first(pipe, items=1):
''' this is an alias for skip to parallel the dedicated skip_last function
to provide a little more readability to the code. the action of actually
skipping does not occur until the first iteration is done
'''
pipe = iter(pipe)
for i in skip(pipe, items):
yield i |
def to_networkx(self):
"""
Return this network in NetworkX graph object.
:return: Network as NetworkX graph object
"""
return nx_util.to_networkx(self.session.get(self.__url).json()) | Return this network in NetworkX graph object.
:return: Network as NetworkX graph object | Below is the the instruction that describes the task:
### Input:
Return this network in NetworkX graph object.
:return: Network as NetworkX graph object
### Response:
def to_networkx(self):
"""
Return this network in NetworkX graph object.
:return: Network as NetworkX graph object
"""
return nx_util.to_networkx(self.session.get(self.__url).json()) |
def spec(self):
"""Return a SourceSpec to describe this source"""
from ambry_sources.sources import SourceSpec
d = self.dict
d['url'] = self.url
# Will get the URL twice; once as ref and once as URL, but the ref is ignored
return SourceSpec(**d) | Return a SourceSpec to describe this source | Below is the the instruction that describes the task:
### Input:
Return a SourceSpec to describe this source
### Response:
def spec(self):
"""Return a SourceSpec to describe this source"""
from ambry_sources.sources import SourceSpec
d = self.dict
d['url'] = self.url
# Will get the URL twice; once as ref and once as URL, but the ref is ignored
return SourceSpec(**d) |
def consume_token(self, tokens, index, tokens_len):
"""Consume a token.
Returns a tuple of (tokens, tokens_len, index) when consumption is
completed and tokens have been merged together.
"""
if _is_really_comment(tokens, index):
self.last_line_with_comment = tokens[index].line
finished = False
if (not _is_in_comment_type(tokens[index].type) and
self.last_line_with_comment != tokens[index].line):
finished = True
end = index
elif index == (tokens_len - 1):
finished = True
end = index + 1
if finished:
return _paste_tokens_line_by_line(tokens,
TokenType.RST,
self.begin,
end) | Consume a token.
Returns a tuple of (tokens, tokens_len, index) when consumption is
completed and tokens have been merged together. | Below is the the instruction that describes the task:
### Input:
Consume a token.
Returns a tuple of (tokens, tokens_len, index) when consumption is
completed and tokens have been merged together.
### Response:
def consume_token(self, tokens, index, tokens_len):
"""Consume a token.
Returns a tuple of (tokens, tokens_len, index) when consumption is
completed and tokens have been merged together.
"""
if _is_really_comment(tokens, index):
self.last_line_with_comment = tokens[index].line
finished = False
if (not _is_in_comment_type(tokens[index].type) and
self.last_line_with_comment != tokens[index].line):
finished = True
end = index
elif index == (tokens_len - 1):
finished = True
end = index + 1
if finished:
return _paste_tokens_line_by_line(tokens,
TokenType.RST,
self.begin,
end) |
def get_validation_data(doc):
"""
Validate the docstring.
Parameters
----------
doc : Docstring
A Docstring object with the given function name.
Returns
-------
tuple
errors : list of tuple
Errors occurred during validation.
warnings : list of tuple
Warnings occurred during validation.
examples_errs : str
Examples usage displayed along the error, otherwise empty string.
Notes
-----
The errors codes are defined as:
- First two characters: Section where the error happens:
* GL: Global (no section, like section ordering errors)
* SS: Short summary
* ES: Extended summary
* PR: Parameters
* RT: Returns
* YD: Yields
* RS: Raises
* WN: Warns
* SA: See Also
* NT: Notes
* RF: References
* EX: Examples
- Last two characters: Numeric error code inside the section
For example, EX02 is the second codified error in the Examples section
(which in this case is assigned to examples that do not pass the tests).
The error codes, their corresponding error messages, and the details on how
they are validated, are not documented more than in the source code of this
function.
"""
errs = []
wrns = []
if not doc.raw_doc:
errs.append(error('GL08'))
return errs, wrns, ''
if doc.start_blank_lines != 1:
errs.append(error('GL01'))
if doc.end_blank_lines != 1:
errs.append(error('GL02'))
if doc.double_blank_lines:
errs.append(error('GL03'))
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
errs.append(error('GL04',
mentioned_private_classes=', '.join(mentioned_errs)))
for line in doc.raw_doc.splitlines():
if re.match("^ *\t", line):
errs.append(error('GL05', line_with_tabs=line.lstrip()))
unexpected_sections = [section for section in doc.section_titles
if section not in ALLOWED_SECTIONS]
for section in unexpected_sections:
errs.append(error('GL06',
section=section,
allowed_sections=', '.join(ALLOWED_SECTIONS)))
correct_order = [section for section in ALLOWED_SECTIONS
if section in doc.section_titles]
if correct_order != doc.section_titles:
errs.append(error('GL07',
correct_sections=', '.join(correct_order)))
if (doc.deprecated_with_directive
and not doc.extended_summary.startswith('.. deprecated:: ')):
errs.append(error('GL09'))
if not doc.summary:
errs.append(error('SS01'))
else:
if not doc.summary[0].isupper():
errs.append(error('SS02'))
if doc.summary[-1] != '.':
errs.append(error('SS03'))
if doc.summary != doc.summary.lstrip():
errs.append(error('SS04'))
elif (doc.is_function_or_method
and doc.summary.split(' ')[0][-1] == 's'):
errs.append(error('SS05'))
if doc.num_summary_lines > 1:
errs.append(error('SS06'))
if not doc.extended_summary:
wrns.append(('ES01', 'No extended summary found'))
# PR01: Parameters not documented
# PR02: Unknown parameters
# PR03: Wrong parameters order
errs += doc.parameter_mismatches
for param in doc.doc_parameters:
if not param.startswith("*"): # Check can ignore var / kwargs
if not doc.parameter_type(param):
if ':' in param:
errs.append(error('PR10',
param_name=param.split(':')[0]))
else:
errs.append(error('PR04', param_name=param))
else:
if doc.parameter_type(param)[-1] == '.':
errs.append(error('PR05', param_name=param))
common_type_errors = [('integer', 'int'),
('boolean', 'bool'),
('string', 'str')]
for wrong_type, right_type in common_type_errors:
if wrong_type in doc.parameter_type(param):
errs.append(error('PR06',
param_name=param,
right_type=right_type,
wrong_type=wrong_type))
if not doc.parameter_desc(param):
errs.append(error('PR07', param_name=param))
else:
if not doc.parameter_desc(param)[0].isupper():
errs.append(error('PR08', param_name=param))
if doc.parameter_desc(param)[-1] != '.':
errs.append(error('PR09', param_name=param))
if doc.is_function_or_method:
if not doc.returns:
if doc.method_returns_something:
errs.append(error('RT01'))
else:
if len(doc.returns) == 1 and doc.returns[0].name:
errs.append(error('RT02'))
for name_or_type, type_, desc in doc.returns:
if not desc:
errs.append(error('RT03'))
else:
desc = ' '.join(desc)
if not desc[0].isupper():
errs.append(error('RT04'))
if not desc.endswith('.'):
errs.append(error('RT05'))
if not doc.yields and 'yield' in doc.method_source:
errs.append(error('YD01'))
if not doc.see_also:
wrns.append(error('SA01'))
else:
for rel_name, rel_desc in doc.see_also.items():
if rel_desc:
if not rel_desc.endswith('.'):
errs.append(error('SA02', reference_name=rel_name))
if not rel_desc[0].isupper():
errs.append(error('SA03', reference_name=rel_name))
else:
errs.append(error('SA04', reference_name=rel_name))
if rel_name.startswith('pandas.'):
errs.append(error('SA05',
reference_name=rel_name,
right_reference=rel_name[len('pandas.'):]))
examples_errs = ''
if not doc.examples:
wrns.append(error('EX01'))
else:
examples_errs = doc.examples_errors
if examples_errs:
errs.append(error('EX02', doctest_log=examples_errs))
for err in doc.validate_pep8():
errs.append(error('EX03',
error_code=err.error_code,
error_message=err.message,
times_happening=' ({} times)'.format(err.count)
if err.count > 1 else ''))
examples_source_code = ''.join(doc.examples_source_code)
for wrong_import in ('numpy', 'pandas'):
if 'import {}'.format(wrong_import) in examples_source_code:
errs.append(error('EX04', imported_library=wrong_import))
return errs, wrns, examples_errs | Validate the docstring.
Parameters
----------
doc : Docstring
A Docstring object with the given function name.
Returns
-------
tuple
errors : list of tuple
Errors occurred during validation.
warnings : list of tuple
Warnings occurred during validation.
examples_errs : str
Examples usage displayed along the error, otherwise empty string.
Notes
-----
The errors codes are defined as:
- First two characters: Section where the error happens:
* GL: Global (no section, like section ordering errors)
* SS: Short summary
* ES: Extended summary
* PR: Parameters
* RT: Returns
* YD: Yields
* RS: Raises
* WN: Warns
* SA: See Also
* NT: Notes
* RF: References
* EX: Examples
- Last two characters: Numeric error code inside the section
For example, EX02 is the second codified error in the Examples section
(which in this case is assigned to examples that do not pass the tests).
The error codes, their corresponding error messages, and the details on how
they are validated, are not documented more than in the source code of this
function. | Below is the the instruction that describes the task:
### Input:
Validate the docstring.
Parameters
----------
doc : Docstring
A Docstring object with the given function name.
Returns
-------
tuple
errors : list of tuple
Errors occurred during validation.
warnings : list of tuple
Warnings occurred during validation.
examples_errs : str
Examples usage displayed along the error, otherwise empty string.
Notes
-----
The errors codes are defined as:
- First two characters: Section where the error happens:
* GL: Global (no section, like section ordering errors)
* SS: Short summary
* ES: Extended summary
* PR: Parameters
* RT: Returns
* YD: Yields
* RS: Raises
* WN: Warns
* SA: See Also
* NT: Notes
* RF: References
* EX: Examples
- Last two characters: Numeric error code inside the section
For example, EX02 is the second codified error in the Examples section
(which in this case is assigned to examples that do not pass the tests).
The error codes, their corresponding error messages, and the details on how
they are validated, are not documented more than in the source code of this
function.
### Response:
def get_validation_data(doc):
"""
Validate the docstring.
Parameters
----------
doc : Docstring
A Docstring object with the given function name.
Returns
-------
tuple
errors : list of tuple
Errors occurred during validation.
warnings : list of tuple
Warnings occurred during validation.
examples_errs : str
Examples usage displayed along the error, otherwise empty string.
Notes
-----
The errors codes are defined as:
- First two characters: Section where the error happens:
* GL: Global (no section, like section ordering errors)
* SS: Short summary
* ES: Extended summary
* PR: Parameters
* RT: Returns
* YD: Yields
* RS: Raises
* WN: Warns
* SA: See Also
* NT: Notes
* RF: References
* EX: Examples
- Last two characters: Numeric error code inside the section
For example, EX02 is the second codified error in the Examples section
(which in this case is assigned to examples that do not pass the tests).
The error codes, their corresponding error messages, and the details on how
they are validated, are not documented more than in the source code of this
function.
"""
errs = []
wrns = []
if not doc.raw_doc:
errs.append(error('GL08'))
return errs, wrns, ''
if doc.start_blank_lines != 1:
errs.append(error('GL01'))
if doc.end_blank_lines != 1:
errs.append(error('GL02'))
if doc.double_blank_lines:
errs.append(error('GL03'))
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
errs.append(error('GL04',
mentioned_private_classes=', '.join(mentioned_errs)))
for line in doc.raw_doc.splitlines():
if re.match("^ *\t", line):
errs.append(error('GL05', line_with_tabs=line.lstrip()))
unexpected_sections = [section for section in doc.section_titles
if section not in ALLOWED_SECTIONS]
for section in unexpected_sections:
errs.append(error('GL06',
section=section,
allowed_sections=', '.join(ALLOWED_SECTIONS)))
correct_order = [section for section in ALLOWED_SECTIONS
if section in doc.section_titles]
if correct_order != doc.section_titles:
errs.append(error('GL07',
correct_sections=', '.join(correct_order)))
if (doc.deprecated_with_directive
and not doc.extended_summary.startswith('.. deprecated:: ')):
errs.append(error('GL09'))
if not doc.summary:
errs.append(error('SS01'))
else:
if not doc.summary[0].isupper():
errs.append(error('SS02'))
if doc.summary[-1] != '.':
errs.append(error('SS03'))
if doc.summary != doc.summary.lstrip():
errs.append(error('SS04'))
elif (doc.is_function_or_method
and doc.summary.split(' ')[0][-1] == 's'):
errs.append(error('SS05'))
if doc.num_summary_lines > 1:
errs.append(error('SS06'))
if not doc.extended_summary:
wrns.append(('ES01', 'No extended summary found'))
# PR01: Parameters not documented
# PR02: Unknown parameters
# PR03: Wrong parameters order
errs += doc.parameter_mismatches
for param in doc.doc_parameters:
if not param.startswith("*"): # Check can ignore var / kwargs
if not doc.parameter_type(param):
if ':' in param:
errs.append(error('PR10',
param_name=param.split(':')[0]))
else:
errs.append(error('PR04', param_name=param))
else:
if doc.parameter_type(param)[-1] == '.':
errs.append(error('PR05', param_name=param))
common_type_errors = [('integer', 'int'),
('boolean', 'bool'),
('string', 'str')]
for wrong_type, right_type in common_type_errors:
if wrong_type in doc.parameter_type(param):
errs.append(error('PR06',
param_name=param,
right_type=right_type,
wrong_type=wrong_type))
if not doc.parameter_desc(param):
errs.append(error('PR07', param_name=param))
else:
if not doc.parameter_desc(param)[0].isupper():
errs.append(error('PR08', param_name=param))
if doc.parameter_desc(param)[-1] != '.':
errs.append(error('PR09', param_name=param))
if doc.is_function_or_method:
if not doc.returns:
if doc.method_returns_something:
errs.append(error('RT01'))
else:
if len(doc.returns) == 1 and doc.returns[0].name:
errs.append(error('RT02'))
for name_or_type, type_, desc in doc.returns:
if not desc:
errs.append(error('RT03'))
else:
desc = ' '.join(desc)
if not desc[0].isupper():
errs.append(error('RT04'))
if not desc.endswith('.'):
errs.append(error('RT05'))
if not doc.yields and 'yield' in doc.method_source:
errs.append(error('YD01'))
if not doc.see_also:
wrns.append(error('SA01'))
else:
for rel_name, rel_desc in doc.see_also.items():
if rel_desc:
if not rel_desc.endswith('.'):
errs.append(error('SA02', reference_name=rel_name))
if not rel_desc[0].isupper():
errs.append(error('SA03', reference_name=rel_name))
else:
errs.append(error('SA04', reference_name=rel_name))
if rel_name.startswith('pandas.'):
errs.append(error('SA05',
reference_name=rel_name,
right_reference=rel_name[len('pandas.'):]))
examples_errs = ''
if not doc.examples:
wrns.append(error('EX01'))
else:
examples_errs = doc.examples_errors
if examples_errs:
errs.append(error('EX02', doctest_log=examples_errs))
for err in doc.validate_pep8():
errs.append(error('EX03',
error_code=err.error_code,
error_message=err.message,
times_happening=' ({} times)'.format(err.count)
if err.count > 1 else ''))
examples_source_code = ''.join(doc.examples_source_code)
for wrong_import in ('numpy', 'pandas'):
if 'import {}'.format(wrong_import) in examples_source_code:
errs.append(error('EX04', imported_library=wrong_import))
return errs, wrns, examples_errs |
def compute_near_isotropic_downsampling_scales(size,
voxel_size,
dimensions_to_downsample,
max_scales=DEFAULT_MAX_DOWNSAMPLING_SCALES,
max_downsampling=DEFAULT_MAX_DOWNSAMPLING,
max_downsampled_size=DEFAULT_MAX_DOWNSAMPLED_SIZE):
"""Compute a list of successive downsampling factors."""
num_dims = len(voxel_size)
cur_scale = np.ones((num_dims, ), dtype=int)
scales = [tuple(cur_scale)]
while (len(scales) < max_scales and (np.prod(cur_scale) < max_downsampling) and
(size / cur_scale).max() > max_downsampled_size):
# Find dimension with smallest voxelsize.
cur_voxel_size = cur_scale * voxel_size
smallest_cur_voxel_size_dim = dimensions_to_downsample[np.argmin(cur_voxel_size[
dimensions_to_downsample])]
cur_scale[smallest_cur_voxel_size_dim] *= 2
target_voxel_size = cur_voxel_size[smallest_cur_voxel_size_dim] * 2
for d in dimensions_to_downsample:
if d == smallest_cur_voxel_size_dim:
continue
d_voxel_size = cur_voxel_size[d]
if abs(d_voxel_size - target_voxel_size) > abs(d_voxel_size * 2 - target_voxel_size):
cur_scale[d] *= 2
scales.append(tuple(cur_scale))
return scales | Compute a list of successive downsampling factors. | Below is the the instruction that describes the task:
### Input:
Compute a list of successive downsampling factors.
### Response:
def compute_near_isotropic_downsampling_scales(size,
voxel_size,
dimensions_to_downsample,
max_scales=DEFAULT_MAX_DOWNSAMPLING_SCALES,
max_downsampling=DEFAULT_MAX_DOWNSAMPLING,
max_downsampled_size=DEFAULT_MAX_DOWNSAMPLED_SIZE):
"""Compute a list of successive downsampling factors."""
num_dims = len(voxel_size)
cur_scale = np.ones((num_dims, ), dtype=int)
scales = [tuple(cur_scale)]
while (len(scales) < max_scales and (np.prod(cur_scale) < max_downsampling) and
(size / cur_scale).max() > max_downsampled_size):
# Find dimension with smallest voxelsize.
cur_voxel_size = cur_scale * voxel_size
smallest_cur_voxel_size_dim = dimensions_to_downsample[np.argmin(cur_voxel_size[
dimensions_to_downsample])]
cur_scale[smallest_cur_voxel_size_dim] *= 2
target_voxel_size = cur_voxel_size[smallest_cur_voxel_size_dim] * 2
for d in dimensions_to_downsample:
if d == smallest_cur_voxel_size_dim:
continue
d_voxel_size = cur_voxel_size[d]
if abs(d_voxel_size - target_voxel_size) > abs(d_voxel_size * 2 - target_voxel_size):
cur_scale[d] *= 2
scales.append(tuple(cur_scale))
return scales |
def cacheOnSameArgs(timeout=None):
""" Caches the return of the function until the the specified time has
elapsed or the arguments change. If timeout is None it will not
be considered. """
if isinstance(timeout, int):
timeout = datetime.timedelta(0, timeout)
def decorator(f):
_cache = [None]
def wrapper(*args, **kwargs):
if _cache[0] is not None:
cached_ret, dt, cached_args, cached_kwargs = _cache[0]
if (timeout is not None and
dt + timeout <= datetime.datetime.now()):
_cache[0] = None
if (cached_args, cached_kwargs) != (args, kwargs):
_cache[0] = None
if _cache[0] is None:
ret = f(*args, **kwargs)
_cache[0] = (ret, datetime.datetime.now(), args, kwargs)
return _cache[0][0]
return wrapper
return decorator | Caches the return of the function until the the specified time has
elapsed or the arguments change. If timeout is None it will not
be considered. | Below is the the instruction that describes the task:
### Input:
Caches the return of the function until the the specified time has
elapsed or the arguments change. If timeout is None it will not
be considered.
### Response:
def cacheOnSameArgs(timeout=None):
""" Caches the return of the function until the the specified time has
elapsed or the arguments change. If timeout is None it will not
be considered. """
if isinstance(timeout, int):
timeout = datetime.timedelta(0, timeout)
def decorator(f):
_cache = [None]
def wrapper(*args, **kwargs):
if _cache[0] is not None:
cached_ret, dt, cached_args, cached_kwargs = _cache[0]
if (timeout is not None and
dt + timeout <= datetime.datetime.now()):
_cache[0] = None
if (cached_args, cached_kwargs) != (args, kwargs):
_cache[0] = None
if _cache[0] is None:
ret = f(*args, **kwargs)
_cache[0] = (ret, datetime.datetime.now(), args, kwargs)
return _cache[0][0]
return wrapper
return decorator |
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Ensure the grant code is valid."""
client = client or self._clientgetter(client_id)
log.debug(
'Validate code for client %r and code %r', client.client_id, code
)
grant = self._grantgetter(client_id=client.client_id, code=code)
if not grant:
log.debug('Grant not found.')
return False
if hasattr(grant, 'expires') and \
datetime.datetime.utcnow() > grant.expires:
log.debug('Grant is expired.')
return False
request.state = kwargs.get('state')
request.user = grant.user
request.scopes = grant.scopes
return True | Ensure the grant code is valid. | Below is the the instruction that describes the task:
### Input:
Ensure the grant code is valid.
### Response:
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Ensure the grant code is valid."""
client = client or self._clientgetter(client_id)
log.debug(
'Validate code for client %r and code %r', client.client_id, code
)
grant = self._grantgetter(client_id=client.client_id, code=code)
if not grant:
log.debug('Grant not found.')
return False
if hasattr(grant, 'expires') and \
datetime.datetime.utcnow() > grant.expires:
log.debug('Grant is expired.')
return False
request.state = kwargs.get('state')
request.user = grant.user
request.scopes = grant.scopes
return True |
def time_wait(
predicate,
timeout_seconds=120,
sleep_seconds=1,
ignore_exceptions=True,
inverse_predicate=False,
noisy=True,
required_consecutive_success_count=1):
""" waits or spins for a predicate and returns the time of the wait.
An exception in the function will be returned.
A timeout will throw a TimeoutExpired Exception.
"""
start = time_module.time()
wait_for(predicate, timeout_seconds, sleep_seconds, ignore_exceptions, inverse_predicate, noisy, required_consecutive_success_count)
return elapse_time(start) | waits or spins for a predicate and returns the time of the wait.
An exception in the function will be returned.
A timeout will throw a TimeoutExpired Exception. | Below is the the instruction that describes the task:
### Input:
waits or spins for a predicate and returns the time of the wait.
An exception in the function will be returned.
A timeout will throw a TimeoutExpired Exception.
### Response:
def time_wait(
predicate,
timeout_seconds=120,
sleep_seconds=1,
ignore_exceptions=True,
inverse_predicate=False,
noisy=True,
required_consecutive_success_count=1):
""" waits or spins for a predicate and returns the time of the wait.
An exception in the function will be returned.
A timeout will throw a TimeoutExpired Exception.
"""
start = time_module.time()
wait_for(predicate, timeout_seconds, sleep_seconds, ignore_exceptions, inverse_predicate, noisy, required_consecutive_success_count)
return elapse_time(start) |
def activate(self, event):
"""Change the value."""
self._index += 1
if self._index >= len(self._values):
self._index = 0
self._selection = self._values[self._index]
self.ao2.speak(self._selection) | Change the value. | Below is the the instruction that describes the task:
### Input:
Change the value.
### Response:
def activate(self, event):
"""Change the value."""
self._index += 1
if self._index >= len(self._values):
self._index = 0
self._selection = self._values[self._index]
self.ao2.speak(self._selection) |
def loads(s, **kwargs):
'''
.. versionadded:: 2018.3.0
Wraps json.loads and prevents a traceback in the event that a bytestring is
passed to the function. (Python < 3.6 cannot load bytestrings)
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
'''
json_module = kwargs.pop('_json_module', json)
try:
return json_module.loads(s, **kwargs)
except TypeError as exc:
# json.loads cannot load bytestrings in Python < 3.6
if six.PY3 and isinstance(s, bytes):
return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)
else:
raise exc | .. versionadded:: 2018.3.0
Wraps json.loads and prevents a traceback in the event that a bytestring is
passed to the function. (Python < 3.6 cannot load bytestrings)
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument) | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2018.3.0
Wraps json.loads and prevents a traceback in the event that a bytestring is
passed to the function. (Python < 3.6 cannot load bytestrings)
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
### Response:
def loads(s, **kwargs):
'''
.. versionadded:: 2018.3.0
Wraps json.loads and prevents a traceback in the event that a bytestring is
passed to the function. (Python < 3.6 cannot load bytestrings)
You can pass an alternate json module (loaded via import_json() above)
using the _json_module argument)
'''
json_module = kwargs.pop('_json_module', json)
try:
return json_module.loads(s, **kwargs)
except TypeError as exc:
# json.loads cannot load bytestrings in Python < 3.6
if six.PY3 and isinstance(s, bytes):
return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)
else:
raise exc |
def blob_data_to_dict(stat_names, blobs):
"""Converts list of "blobs" to a dictionary of model stats.
Samplers like ``emcee`` store the extra tuple returned by ``CallModel`` to
a list called blobs. This is a list of lists of tuples with shape
niterations x nwalkers x nstats, where nstats is the number of stats
returned by the model's ``default_stats``. This converts that list to a
dictionary of arrays keyed by the stat names.
Parameters
----------
stat_names : list of str
The list of the stat names.
blobs : list of list of tuples
The data to convert.
Returns
-------
dict :
A dictionary mapping the model's ``default_stats`` to arrays of values.
Each array will have shape ``nwalkers x niterations``.
"""
# get the dtypes of each of the stats; we'll just take this from the
# first iteration and walker
dtypes = [type(val) for val in blobs[0][0]]
assert len(stat_names) == len(dtypes), (
"number of stat names must match length of tuples in the blobs")
# convert to an array; to ensure that we get the dtypes correct, we'll
# cast to a structured array
raw_stats = numpy.array(blobs, dtype=zip(stat_names, dtypes))
# transpose so that it has shape nwalkers x niterations
raw_stats = raw_stats.transpose()
# now return as a dictionary
return {stat: raw_stats[stat] for stat in stat_names} | Converts list of "blobs" to a dictionary of model stats.
Samplers like ``emcee`` store the extra tuple returned by ``CallModel`` to
a list called blobs. This is a list of lists of tuples with shape
niterations x nwalkers x nstats, where nstats is the number of stats
returned by the model's ``default_stats``. This converts that list to a
dictionary of arrays keyed by the stat names.
Parameters
----------
stat_names : list of str
The list of the stat names.
blobs : list of list of tuples
The data to convert.
Returns
-------
dict :
A dictionary mapping the model's ``default_stats`` to arrays of values.
Each array will have shape ``nwalkers x niterations``. | Below is the the instruction that describes the task:
### Input:
Converts list of "blobs" to a dictionary of model stats.
Samplers like ``emcee`` store the extra tuple returned by ``CallModel`` to
a list called blobs. This is a list of lists of tuples with shape
niterations x nwalkers x nstats, where nstats is the number of stats
returned by the model's ``default_stats``. This converts that list to a
dictionary of arrays keyed by the stat names.
Parameters
----------
stat_names : list of str
The list of the stat names.
blobs : list of list of tuples
The data to convert.
Returns
-------
dict :
A dictionary mapping the model's ``default_stats`` to arrays of values.
Each array will have shape ``nwalkers x niterations``.
### Response:
def blob_data_to_dict(stat_names, blobs):
"""Converts list of "blobs" to a dictionary of model stats.
Samplers like ``emcee`` store the extra tuple returned by ``CallModel`` to
a list called blobs. This is a list of lists of tuples with shape
niterations x nwalkers x nstats, where nstats is the number of stats
returned by the model's ``default_stats``. This converts that list to a
dictionary of arrays keyed by the stat names.
Parameters
----------
stat_names : list of str
The list of the stat names.
blobs : list of list of tuples
The data to convert.
Returns
-------
dict :
A dictionary mapping the model's ``default_stats`` to arrays of values.
Each array will have shape ``nwalkers x niterations``.
"""
# get the dtypes of each of the stats; we'll just take this from the
# first iteration and walker
dtypes = [type(val) for val in blobs[0][0]]
assert len(stat_names) == len(dtypes), (
"number of stat names must match length of tuples in the blobs")
# convert to an array; to ensure that we get the dtypes correct, we'll
# cast to a structured array
raw_stats = numpy.array(blobs, dtype=zip(stat_names, dtypes))
# transpose so that it has shape nwalkers x niterations
raw_stats = raw_stats.transpose()
# now return as a dictionary
return {stat: raw_stats[stat] for stat in stat_names} |
def dedupe_all_lists(obj, exclude_keys=()):
"""Recursively remove duplucates from all lists.
Args:
obj: collection to deduplicate
exclude_keys (Container[str]): key names to ignore for deduplication
"""
squared_dedupe_len = 10
if isinstance(obj, dict):
new_obj = {}
for key, value in obj.items():
if key in exclude_keys:
new_obj[key] = value
else:
new_obj[key] = dedupe_all_lists(value)
return new_obj
elif isinstance(obj, (list, tuple, set)):
new_elements = [dedupe_all_lists(v) for v in obj]
if len(new_elements) < squared_dedupe_len:
new_obj = dedupe_list(new_elements)
else:
new_obj = dedupe_list_of_dicts(new_elements)
return type(obj)(new_obj)
else:
return obj | Recursively remove duplucates from all lists.
Args:
obj: collection to deduplicate
exclude_keys (Container[str]): key names to ignore for deduplication | Below is the the instruction that describes the task:
### Input:
Recursively remove duplucates from all lists.
Args:
obj: collection to deduplicate
exclude_keys (Container[str]): key names to ignore for deduplication
### Response:
def dedupe_all_lists(obj, exclude_keys=()):
"""Recursively remove duplucates from all lists.
Args:
obj: collection to deduplicate
exclude_keys (Container[str]): key names to ignore for deduplication
"""
squared_dedupe_len = 10
if isinstance(obj, dict):
new_obj = {}
for key, value in obj.items():
if key in exclude_keys:
new_obj[key] = value
else:
new_obj[key] = dedupe_all_lists(value)
return new_obj
elif isinstance(obj, (list, tuple, set)):
new_elements = [dedupe_all_lists(v) for v in obj]
if len(new_elements) < squared_dedupe_len:
new_obj = dedupe_list(new_elements)
else:
new_obj = dedupe_list_of_dicts(new_elements)
return type(obj)(new_obj)
else:
return obj |
def get_unique_schema_id(schema):
# type: (GraphQLSchema) -> str
"""Get a unique id given a GraphQLSchema"""
assert isinstance(schema, GraphQLSchema), (
"Must receive a GraphQLSchema as schema. Received {}"
).format(repr(schema))
if schema not in _cached_schemas:
_cached_schemas[schema] = sha1(str(schema).encode("utf-8")).hexdigest()
return _cached_schemas[schema] | Get a unique id given a GraphQLSchema | Below is the the instruction that describes the task:
### Input:
Get a unique id given a GraphQLSchema
### Response:
def get_unique_schema_id(schema):
# type: (GraphQLSchema) -> str
"""Get a unique id given a GraphQLSchema"""
assert isinstance(schema, GraphQLSchema), (
"Must receive a GraphQLSchema as schema. Received {}"
).format(repr(schema))
if schema not in _cached_schemas:
_cached_schemas[schema] = sha1(str(schema).encode("utf-8")).hexdigest()
return _cached_schemas[schema] |
def is_not_equal_to(self, other):
"""Asserts that val is not equal to other."""
if self.val == other:
self._err('Expected <%s> to be not equal to <%s>, but was.' % (self.val, other))
return self | Asserts that val is not equal to other. | Below is the the instruction that describes the task:
### Input:
Asserts that val is not equal to other.
### Response:
def is_not_equal_to(self, other):
"""Asserts that val is not equal to other."""
if self.val == other:
self._err('Expected <%s> to be not equal to <%s>, but was.' % (self.val, other))
return self |
def _adjust_probability_vec_best(population, fitnesses, probability_vec,
adjust_rate):
"""Shift probabilities towards the best solution."""
best_solution = max(zip(fitnesses, population))[1]
# Shift probabilities towards best solution
return _adjust(probability_vec, best_solution, adjust_rate) | Shift probabilities towards the best solution. | Below is the the instruction that describes the task:
### Input:
Shift probabilities towards the best solution.
### Response:
def _adjust_probability_vec_best(population, fitnesses, probability_vec,
adjust_rate):
"""Shift probabilities towards the best solution."""
best_solution = max(zip(fitnesses, population))[1]
# Shift probabilities towards best solution
return _adjust(probability_vec, best_solution, adjust_rate) |
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = tf.convert_to_tensor(value=a, name="a")
b = tf.convert_to_tensor(value=b, name="b")
# Here we can't just do tf.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in tf.equal.
def all_shapes_equal():
return tf.reduce_all(
input_tensor=tf.equal(
tf.concat([tf.shape(input=a), tf.shape(input=b)], 0),
tf.concat([tf.shape(input=b), tf.shape(input=a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return tf.cond(
pred=tf.equal(tf.rank(a), tf.rank(b)),
true_fn=all_shapes_equal,
false_fn=lambda: tf.constant(False)) | Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape. | Below is the the instruction that describes the task:
### Input:
Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
### Response:
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = tf.convert_to_tensor(value=a, name="a")
b = tf.convert_to_tensor(value=b, name="b")
# Here we can't just do tf.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in tf.equal.
def all_shapes_equal():
return tf.reduce_all(
input_tensor=tf.equal(
tf.concat([tf.shape(input=a), tf.shape(input=b)], 0),
tf.concat([tf.shape(input=b), tf.shape(input=a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return tf.cond(
pred=tf.equal(tf.rank(a), tf.rank(b)),
true_fn=all_shapes_equal,
false_fn=lambda: tf.constant(False)) |
def _next(self, request, application, roles, next_config):
""" Continue the state machine at given state. """
# we only support state changes for POST requests
if request.method == "POST":
key = None
# If next state is a transition, process it
while True:
# We do not expect to get a direct state transition here.
assert next_config['type'] in ['goto', 'transition']
while next_config['type'] == 'goto':
key = next_config['key']
next_config = self._config[key]
instance = load_instance(next_config)
if not isinstance(instance, Transition):
break
next_config = instance.get_next_config(request, application, roles)
# lookup next state
assert key is not None
state_key = key
# enter that state
instance.enter_state(request, application)
application.state = state_key
application.save()
# log details
log.change(application.application_ptr, "state: %s" % instance.name)
# redirect to this new state
url = get_url(request, application, roles)
return HttpResponseRedirect(url)
else:
return HttpResponseBadRequest("<h1>Bad Request</h1>") | Continue the state machine at given state. | Below is the the instruction that describes the task:
### Input:
Continue the state machine at given state.
### Response:
def _next(self, request, application, roles, next_config):
""" Continue the state machine at given state. """
# we only support state changes for POST requests
if request.method == "POST":
key = None
# If next state is a transition, process it
while True:
# We do not expect to get a direct state transition here.
assert next_config['type'] in ['goto', 'transition']
while next_config['type'] == 'goto':
key = next_config['key']
next_config = self._config[key]
instance = load_instance(next_config)
if not isinstance(instance, Transition):
break
next_config = instance.get_next_config(request, application, roles)
# lookup next state
assert key is not None
state_key = key
# enter that state
instance.enter_state(request, application)
application.state = state_key
application.save()
# log details
log.change(application.application_ptr, "state: %s" % instance.name)
# redirect to this new state
url = get_url(request, application, roles)
return HttpResponseRedirect(url)
else:
return HttpResponseBadRequest("<h1>Bad Request</h1>") |
def __format_row(self, row: AssetAllocationViewModel):
""" display-format one row
Formats one Asset Class record """
output = ""
index = 0
# Name
value = row.name
# Indent according to depth.
for _ in range(0, row.depth):
value = f" {value}"
output += self.append_text_column(value, index)
# Set Allocation
value = ""
index += 1
if row.set_allocation > 0:
value = f"{row.set_allocation:.2f}"
output += self.append_num_column(value, index)
# Current Allocation
value = ""
index += 1
if row.curr_allocation > Decimal(0):
value = f"{row.curr_allocation:.2f}"
output += self.append_num_column(value, index)
# Allocation difference, percentage
value = ""
index += 1
if row.alloc_diff_perc.copy_abs() > Decimal(0):
value = f"{row.alloc_diff_perc:.0f} %"
output += self.append_num_column(value, index)
# Allocated value
index += 1
value = ""
if row.set_value:
value = f"{row.set_value:,.0f}"
output += self.append_num_column(value, index)
# Current Value
index += 1
value = f"{row.curr_value:,.0f}"
output += self.append_num_column(value, index)
# Value in security's currency. Show only if displaying full model, with stocks.
index += 1
if self.full:
value = ""
if row.curr_value_own_currency:
value = f"({row.curr_value_own_currency:,.0f}"
value += f" {row.own_currency}"
value += ")"
output += self.append_num_column(value, index)
# https://en.wikipedia.org/wiki/ANSI_escape_code
# CSI="\x1B["
# red = 31, green = 32
# output += CSI+"31;40m" + "Colored Text" + CSI + "0m"
# Value diff
index += 1
value = ""
if row.diff_value:
value = f"{row.diff_value:,.0f}"
# Color the output
# value = f"{CSI};40m{value}{CSI};40m"
output += self.append_num_column(value, index)
return output | display-format one row
Formats one Asset Class record | Below is the the instruction that describes the task:
### Input:
display-format one row
Formats one Asset Class record
### Response:
def __format_row(self, row: AssetAllocationViewModel):
""" display-format one row
Formats one Asset Class record """
output = ""
index = 0
# Name
value = row.name
# Indent according to depth.
for _ in range(0, row.depth):
value = f" {value}"
output += self.append_text_column(value, index)
# Set Allocation
value = ""
index += 1
if row.set_allocation > 0:
value = f"{row.set_allocation:.2f}"
output += self.append_num_column(value, index)
# Current Allocation
value = ""
index += 1
if row.curr_allocation > Decimal(0):
value = f"{row.curr_allocation:.2f}"
output += self.append_num_column(value, index)
# Allocation difference, percentage
value = ""
index += 1
if row.alloc_diff_perc.copy_abs() > Decimal(0):
value = f"{row.alloc_diff_perc:.0f} %"
output += self.append_num_column(value, index)
# Allocated value
index += 1
value = ""
if row.set_value:
value = f"{row.set_value:,.0f}"
output += self.append_num_column(value, index)
# Current Value
index += 1
value = f"{row.curr_value:,.0f}"
output += self.append_num_column(value, index)
# Value in security's currency. Show only if displaying full model, with stocks.
index += 1
if self.full:
value = ""
if row.curr_value_own_currency:
value = f"({row.curr_value_own_currency:,.0f}"
value += f" {row.own_currency}"
value += ")"
output += self.append_num_column(value, index)
# https://en.wikipedia.org/wiki/ANSI_escape_code
# CSI="\x1B["
# red = 31, green = 32
# output += CSI+"31;40m" + "Colored Text" + CSI + "0m"
# Value diff
index += 1
value = ""
if row.diff_value:
value = f"{row.diff_value:,.0f}"
# Color the output
# value = f"{CSI};40m{value}{CSI};40m"
output += self.append_num_column(value, index)
return output |
def confsponsor(self):
"""Sponsor(s) of the conference the abstract belongs to."""
sponsors = chained_get(self._confevent, ['confsponsors', 'confsponsor'], [])
if len(sponsors) == 0:
return None
if isinstance(sponsors, list):
return [s['$'] for s in sponsors]
return sponsors | Sponsor(s) of the conference the abstract belongs to. | Below is the the instruction that describes the task:
### Input:
Sponsor(s) of the conference the abstract belongs to.
### Response:
def confsponsor(self):
"""Sponsor(s) of the conference the abstract belongs to."""
sponsors = chained_get(self._confevent, ['confsponsors', 'confsponsor'], [])
if len(sponsors) == 0:
return None
if isinstance(sponsors, list):
return [s['$'] for s in sponsors]
return sponsors |
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3) | Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter. | Below is the the instruction that describes the task:
### Input:
Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
### Response:
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3) |
def retrieve_agent_profile_ids(self, agent, since=None):
"""Retrieve agent profile id(s) with the specified parameters
:param agent: Agent object of desired agent profiles
:type agent: :class:`tincan.agent.Agent`
:param since: Retrieve agent profile id's since this time
:type since: str | unicode
:return: LRS Response object with list of retrieved agent profile id's as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
if not isinstance(agent, Agent):
agent = Agent(agent)
request = HTTPRequest(
method="GET",
resource="agents/profile"
)
request.query_params["agent"] = agent.to_json(self.version)
if since is not None:
request.query_params["since"] = since
lrs_response = self._send_request(request)
if lrs_response.success:
lrs_response.content = json.loads(lrs_response.data)
return lrs_response | Retrieve agent profile id(s) with the specified parameters
:param agent: Agent object of desired agent profiles
:type agent: :class:`tincan.agent.Agent`
:param since: Retrieve agent profile id's since this time
:type since: str | unicode
:return: LRS Response object with list of retrieved agent profile id's as content
:rtype: :class:`tincan.lrs_response.LRSResponse` | Below is the the instruction that describes the task:
### Input:
Retrieve agent profile id(s) with the specified parameters
:param agent: Agent object of desired agent profiles
:type agent: :class:`tincan.agent.Agent`
:param since: Retrieve agent profile id's since this time
:type since: str | unicode
:return: LRS Response object with list of retrieved agent profile id's as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
### Response:
def retrieve_agent_profile_ids(self, agent, since=None):
"""Retrieve agent profile id(s) with the specified parameters
:param agent: Agent object of desired agent profiles
:type agent: :class:`tincan.agent.Agent`
:param since: Retrieve agent profile id's since this time
:type since: str | unicode
:return: LRS Response object with list of retrieved agent profile id's as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
if not isinstance(agent, Agent):
agent = Agent(agent)
request = HTTPRequest(
method="GET",
resource="agents/profile"
)
request.query_params["agent"] = agent.to_json(self.version)
if since is not None:
request.query_params["since"] = since
lrs_response = self._send_request(request)
if lrs_response.success:
lrs_response.content = json.loads(lrs_response.data)
return lrs_response |
def asr_breaking(self, tol_eigendisplacements=1e-5):
"""
Returns the breaking of the acoustic sum rule for the three acoustic modes,
if Gamma is present. None otherwise.
If eigendisplacements are available they are used to determine the acoustic
modes: selects the bands corresponding to the eigendisplacements that
represent to a translation within tol_eigendisplacements. If these are not
identified or eigendisplacements are missing the first 3 modes will be used
(indices [0:3]).
"""
for i in range(self.nb_qpoints):
if np.allclose(self.qpoints[i].frac_coords, (0, 0, 0)):
if self.has_eigendisplacements:
acoustic_modes_index = []
for j in range(self.nb_bands):
eig = self.eigendisplacements[j][i]
if np.max(np.abs(eig[1:] - eig[:1])) < tol_eigendisplacements:
acoustic_modes_index.append(j)
# if acoustic modes are not correctly identified return use
# the first three modes
if len(acoustic_modes_index) != 3:
acoustic_modes_index = [0, 1, 2]
return self.bands[acoustic_modes_index, i]
else:
return self.bands[:3, i]
return None | Returns the breaking of the acoustic sum rule for the three acoustic modes,
if Gamma is present. None otherwise.
If eigendisplacements are available they are used to determine the acoustic
modes: selects the bands corresponding to the eigendisplacements that
represent to a translation within tol_eigendisplacements. If these are not
identified or eigendisplacements are missing the first 3 modes will be used
(indices [0:3]). | Below is the the instruction that describes the task:
### Input:
Returns the breaking of the acoustic sum rule for the three acoustic modes,
if Gamma is present. None otherwise.
If eigendisplacements are available they are used to determine the acoustic
modes: selects the bands corresponding to the eigendisplacements that
represent to a translation within tol_eigendisplacements. If these are not
identified or eigendisplacements are missing the first 3 modes will be used
(indices [0:3]).
### Response:
def asr_breaking(self, tol_eigendisplacements=1e-5):
"""
Returns the breaking of the acoustic sum rule for the three acoustic modes,
if Gamma is present. None otherwise.
If eigendisplacements are available they are used to determine the acoustic
modes: selects the bands corresponding to the eigendisplacements that
represent to a translation within tol_eigendisplacements. If these are not
identified or eigendisplacements are missing the first 3 modes will be used
(indices [0:3]).
"""
for i in range(self.nb_qpoints):
if np.allclose(self.qpoints[i].frac_coords, (0, 0, 0)):
if self.has_eigendisplacements:
acoustic_modes_index = []
for j in range(self.nb_bands):
eig = self.eigendisplacements[j][i]
if np.max(np.abs(eig[1:] - eig[:1])) < tol_eigendisplacements:
acoustic_modes_index.append(j)
# if acoustic modes are not correctly identified return use
# the first three modes
if len(acoustic_modes_index) != 3:
acoustic_modes_index = [0, 1, 2]
return self.bands[acoustic_modes_index, i]
else:
return self.bands[:3, i]
return None |
def handle_error(self, error, req, schema, error_status_code, error_headers):
"""Handles errors during parsing. Aborts the current HTTP request and
responds with a 422 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
abort(
status_code,
exc=error,
messages=error.messages,
schema=schema,
headers=error_headers,
) | Handles errors during parsing. Aborts the current HTTP request and
responds with a 422 error. | Below is the the instruction that describes the task:
### Input:
Handles errors during parsing. Aborts the current HTTP request and
responds with a 422 error.
### Response:
def handle_error(self, error, req, schema, error_status_code, error_headers):
"""Handles errors during parsing. Aborts the current HTTP request and
responds with a 422 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
abort(
status_code,
exc=error,
messages=error.messages,
schema=schema,
headers=error_headers,
) |
def getTheta(k, nTrials=100000):
"""
Estimate a reasonable value of theta for this k.
"""
theDots = np.zeros(nTrials)
w1 = getSparseTensor(k, k, nTrials, fixedRange=1.0/k)
for i in range(nTrials):
theDots[i] = w1[i].dot(w1[i])
dotMean = theDots.mean()
print("k=", k, "min/mean/max diag of w dot products",
theDots.min(), dotMean, theDots.max())
theta = dotMean / 2.0
print("Using theta as mean / 2.0 = ", theta)
return theta, theDots | Estimate a reasonable value of theta for this k. | Below is the the instruction that describes the task:
### Input:
Estimate a reasonable value of theta for this k.
### Response:
def getTheta(k, nTrials=100000):
"""
Estimate a reasonable value of theta for this k.
"""
theDots = np.zeros(nTrials)
w1 = getSparseTensor(k, k, nTrials, fixedRange=1.0/k)
for i in range(nTrials):
theDots[i] = w1[i].dot(w1[i])
dotMean = theDots.mean()
print("k=", k, "min/mean/max diag of w dot products",
theDots.min(), dotMean, theDots.max())
theta = dotMean / 2.0
print("Using theta as mean / 2.0 = ", theta)
return theta, theDots |
def get_cap_files(self, *ports):
"""
:param ports: list of ports to get capture files names for.
:return: dictionary (port, capture file)
"""
cap_files = {}
for port in ports:
if port.cap_file_name:
with open(port.cap_file_name) as f:
cap_files[port] = f.read().splitlines()
else:
cap_files[port] = None
return cap_files | :param ports: list of ports to get capture files names for.
:return: dictionary (port, capture file) | Below is the the instruction that describes the task:
### Input:
:param ports: list of ports to get capture files names for.
:return: dictionary (port, capture file)
### Response:
def get_cap_files(self, *ports):
"""
:param ports: list of ports to get capture files names for.
:return: dictionary (port, capture file)
"""
cap_files = {}
for port in ports:
if port.cap_file_name:
with open(port.cap_file_name) as f:
cap_files[port] = f.read().splitlines()
else:
cap_files[port] = None
return cap_files |
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var | Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor | Below is the the instruction that describes the task:
### Input:
Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
### Response:
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var |
def OnAttributesToolbarToggle(self, event):
"""Format toolbar toggle event handler"""
self.main_window.attributes_toolbar.SetGripperVisible(True)
attributes_toolbar_info = \
self.main_window._mgr.GetPane("attributes_toolbar")
self._toggle_pane(attributes_toolbar_info)
event.Skip() | Format toolbar toggle event handler | Below is the the instruction that describes the task:
### Input:
Format toolbar toggle event handler
### Response:
def OnAttributesToolbarToggle(self, event):
"""Format toolbar toggle event handler"""
self.main_window.attributes_toolbar.SetGripperVisible(True)
attributes_toolbar_info = \
self.main_window._mgr.GetPane("attributes_toolbar")
self._toggle_pane(attributes_toolbar_info)
event.Skip() |
def GetPackages(classification,visibility):
"""Gets a list of Blueprint Packages filtered by classification and visibility.
https://t3n.zendesk.com/entries/20411357-Get-Packages
:param classification: package type filter (System, Script, Software)
:param visibility: package visibility filter (Public, Private, Shared)
"""
r = clc.v1.API.Call('post','Blueprint/GetPackages',
{'Classification': Blueprint.classification_stoi[classification],'Visibility': Blueprint.visibility_stoi[visibility]})
if int(r['StatusCode']) == 0: return(r['Packages']) | Gets a list of Blueprint Packages filtered by classification and visibility.
https://t3n.zendesk.com/entries/20411357-Get-Packages
:param classification: package type filter (System, Script, Software)
:param visibility: package visibility filter (Public, Private, Shared) | Below is the the instruction that describes the task:
### Input:
Gets a list of Blueprint Packages filtered by classification and visibility.
https://t3n.zendesk.com/entries/20411357-Get-Packages
:param classification: package type filter (System, Script, Software)
:param visibility: package visibility filter (Public, Private, Shared)
### Response:
def GetPackages(classification,visibility):
"""Gets a list of Blueprint Packages filtered by classification and visibility.
https://t3n.zendesk.com/entries/20411357-Get-Packages
:param classification: package type filter (System, Script, Software)
:param visibility: package visibility filter (Public, Private, Shared)
"""
r = clc.v1.API.Call('post','Blueprint/GetPackages',
{'Classification': Blueprint.classification_stoi[classification],'Visibility': Blueprint.visibility_stoi[visibility]})
if int(r['StatusCode']) == 0: return(r['Packages']) |
def site_url(self, url):
"""URL setter and validator for site_url property.
Parameters:
url (str): URL of on Moebooru/Danbooru based sites.
Raises:
PybooruError: When URL scheme or URL are invalid.
"""
# Regular expression to URL validate
regex = re.compile(
r'^(?:http|https)://' # Scheme only HTTP/HTTPS
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \
[A-Z0-9-]{2,}(?<!-)\.?)|' # Domain
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # or ipv6
r'(?::\d+)?' # Port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# Validate URL
if re.match('^(?:http|https)://', url):
if re.search(regex, url):
self.__site_url = url
else:
raise PybooruError("Invalid URL: {0}".format(url))
else:
raise PybooruError(
"Invalid URL scheme, use HTTP or HTTPS: {0}".format(url)) | URL setter and validator for site_url property.
Parameters:
url (str): URL of on Moebooru/Danbooru based sites.
Raises:
PybooruError: When URL scheme or URL are invalid. | Below is the the instruction that describes the task:
### Input:
URL setter and validator for site_url property.
Parameters:
url (str): URL of on Moebooru/Danbooru based sites.
Raises:
PybooruError: When URL scheme or URL are invalid.
### Response:
def site_url(self, url):
"""URL setter and validator for site_url property.
Parameters:
url (str): URL of on Moebooru/Danbooru based sites.
Raises:
PybooruError: When URL scheme or URL are invalid.
"""
# Regular expression to URL validate
regex = re.compile(
r'^(?:http|https)://' # Scheme only HTTP/HTTPS
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \
[A-Z0-9-]{2,}(?<!-)\.?)|' # Domain
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # or ipv6
r'(?::\d+)?' # Port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# Validate URL
if re.match('^(?:http|https)://', url):
if re.search(regex, url):
self.__site_url = url
else:
raise PybooruError("Invalid URL: {0}".format(url))
else:
raise PybooruError(
"Invalid URL scheme, use HTTP or HTTPS: {0}".format(url)) |
def _serialize(self, include_run_logs=False, strict_json=False):
""" Serialize a representation of this Task to a Python dict. """
result = {'command': self.command,
'name': self.name,
'started_at': self.started_at,
'completed_at': self.completed_at,
'success': self.successful,
'soft_timeout': self.soft_timeout,
'hard_timeout': self.hard_timeout,
'hostname': self.hostname}
if include_run_logs:
last_run = self.backend.get_latest_run_log(self.parent_job.job_id,
self.name)
if last_run:
run_log = last_run.get('tasks', {}).get(self.name, {})
if run_log:
result['run_log'] = run_log
if strict_json:
result = json.loads(json.dumps(result, cls=StrictJSONEncoder))
return result | Serialize a representation of this Task to a Python dict. | Below is the the instruction that describes the task:
### Input:
Serialize a representation of this Task to a Python dict.
### Response:
def _serialize(self, include_run_logs=False, strict_json=False):
""" Serialize a representation of this Task to a Python dict. """
result = {'command': self.command,
'name': self.name,
'started_at': self.started_at,
'completed_at': self.completed_at,
'success': self.successful,
'soft_timeout': self.soft_timeout,
'hard_timeout': self.hard_timeout,
'hostname': self.hostname}
if include_run_logs:
last_run = self.backend.get_latest_run_log(self.parent_job.job_id,
self.name)
if last_run:
run_log = last_run.get('tasks', {}).get(self.name, {})
if run_log:
result['run_log'] = run_log
if strict_json:
result = json.loads(json.dumps(result, cls=StrictJSONEncoder))
return result |
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath) | Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check | Below is the the instruction that describes the task:
### Input:
Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
### Response:
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath) |
def execute(self):
""""Run Checkstyle on all found non-synthetic source files."""
python_tgts = self.context.targets(
lambda tgt: isinstance(tgt, (PythonTarget))
)
if not python_tgts:
return 0
interpreter_cache = PythonInterpreterCache.global_instance()
with self.invalidated(self.get_targets(self._is_checked)) as invalidation_check:
failure_count = 0
tgts_by_compatibility, _ = interpreter_cache.partition_targets_by_compatibility(
[vt.target for vt in invalidation_check.invalid_vts]
)
for filters, targets in tgts_by_compatibility.items():
sources = self.calculate_sources([tgt for tgt in targets])
if sources:
allowed_interpreters = set(interpreter_cache.setup(filters=filters))
if not allowed_interpreters:
raise TaskError('No valid interpreters found for targets: {}\n(filters: {})'
.format(targets, filters))
interpreter = min(allowed_interpreters)
failure_count += self.checkstyle(interpreter, sources)
if failure_count > 0 and self.get_options().fail:
raise TaskError('{} Python Style issues found. You may try `./pants fmt <targets>`'
.format(failure_count))
return failure_count | Run Checkstyle on all found non-synthetic source files. | Below is the the instruction that describes the task:
### Input:
Run Checkstyle on all found non-synthetic source files.
### Response:
def execute(self):
""""Run Checkstyle on all found non-synthetic source files."""
python_tgts = self.context.targets(
lambda tgt: isinstance(tgt, (PythonTarget))
)
if not python_tgts:
return 0
interpreter_cache = PythonInterpreterCache.global_instance()
with self.invalidated(self.get_targets(self._is_checked)) as invalidation_check:
failure_count = 0
tgts_by_compatibility, _ = interpreter_cache.partition_targets_by_compatibility(
[vt.target for vt in invalidation_check.invalid_vts]
)
for filters, targets in tgts_by_compatibility.items():
sources = self.calculate_sources([tgt for tgt in targets])
if sources:
allowed_interpreters = set(interpreter_cache.setup(filters=filters))
if not allowed_interpreters:
raise TaskError('No valid interpreters found for targets: {}\n(filters: {})'
.format(targets, filters))
interpreter = min(allowed_interpreters)
failure_count += self.checkstyle(interpreter, sources)
if failure_count > 0 and self.get_options().fail:
raise TaskError('{} Python Style issues found. You may try `./pants fmt <targets>`'
.format(failure_count))
return failure_count |
def save(self):
"""Save this object to the database. Behaves very similarly to
whatever collection.save(document) would, ie. does upserts on _id
presence. If methods ``pre_save`` or ``post_save`` are defined, those
are called. If there is a spec document, then the document is
validated against it after the ``pre_save`` hook but before the save."""
if hasattr(self, 'pre_save'):
self.pre_save()
database, collection = self._collection_key.split('.')
self.validate()
_id = current()[database][collection].save(dict(self))
if _id: self._id = _id
if hasattr(self, 'post_save'):
self.post_save() | Save this object to the database. Behaves very similarly to
whatever collection.save(document) would, ie. does upserts on _id
presence. If methods ``pre_save`` or ``post_save`` are defined, those
are called. If there is a spec document, then the document is
validated against it after the ``pre_save`` hook but before the save. | Below is the the instruction that describes the task:
### Input:
Save this object to the database. Behaves very similarly to
whatever collection.save(document) would, ie. does upserts on _id
presence. If methods ``pre_save`` or ``post_save`` are defined, those
are called. If there is a spec document, then the document is
validated against it after the ``pre_save`` hook but before the save.
### Response:
def save(self):
"""Save this object to the database. Behaves very similarly to
whatever collection.save(document) would, ie. does upserts on _id
presence. If methods ``pre_save`` or ``post_save`` are defined, those
are called. If there is a spec document, then the document is
validated against it after the ``pre_save`` hook but before the save."""
if hasattr(self, 'pre_save'):
self.pre_save()
database, collection = self._collection_key.split('.')
self.validate()
_id = current()[database][collection].save(dict(self))
if _id: self._id = _id
if hasattr(self, 'post_save'):
self.post_save() |
def _construct_callbacks(self):
"""
Initializes any callbacks for streams which have defined
the plotted object as a source.
"""
cb_classes = set()
registry = list(Stream.registry.items())
callbacks = Stream._callbacks['bokeh']
for source in self.link_sources:
streams = [
s for src, streams in registry for s in streams
if src is source or (src._plot_id is not None and
src._plot_id == source._plot_id)]
cb_classes |= {(callbacks[type(stream)], stream) for stream in streams
if type(stream) in callbacks and stream.linked
and stream.source is not None}
cbs = []
sorted_cbs = sorted(cb_classes, key=lambda x: id(x[0]))
for cb, group in groupby(sorted_cbs, lambda x: x[0]):
cb_streams = [s for _, s in group]
cbs.append(cb(self, cb_streams, source))
return cbs | Initializes any callbacks for streams which have defined
the plotted object as a source. | Below is the the instruction that describes the task:
### Input:
Initializes any callbacks for streams which have defined
the plotted object as a source.
### Response:
def _construct_callbacks(self):
"""
Initializes any callbacks for streams which have defined
the plotted object as a source.
"""
cb_classes = set()
registry = list(Stream.registry.items())
callbacks = Stream._callbacks['bokeh']
for source in self.link_sources:
streams = [
s for src, streams in registry for s in streams
if src is source or (src._plot_id is not None and
src._plot_id == source._plot_id)]
cb_classes |= {(callbacks[type(stream)], stream) for stream in streams
if type(stream) in callbacks and stream.linked
and stream.source is not None}
cbs = []
sorted_cbs = sorted(cb_classes, key=lambda x: id(x[0]))
for cb, group in groupby(sorted_cbs, lambda x: x[0]):
cb_streams = [s for _, s in group]
cbs.append(cb(self, cb_streams, source))
return cbs |
def has_comic(name):
"""Check if comic name already exists."""
names = [
("Creators/%s" % name).lower(),
("GoComics/%s" % name).lower(),
]
for scraperclass in get_scraperclasses():
lname = scraperclass.getName().lower()
if lname in names:
return True
return False | Check if comic name already exists. | Below is the the instruction that describes the task:
### Input:
Check if comic name already exists.
### Response:
def has_comic(name):
"""Check if comic name already exists."""
names = [
("Creators/%s" % name).lower(),
("GoComics/%s" % name).lower(),
]
for scraperclass in get_scraperclasses():
lname = scraperclass.getName().lower()
if lname in names:
return True
return False |
def _admx_policy_parent_walk(path,
policy_namespace,
parent_category,
policy_nsmap,
return_full_policy_names,
adml_language):
'''
helper function to recursively walk up the ADMX namespaces and build the
hierarchy for the policy
'''
admx_policy_definitions = _get_policy_definitions(language=adml_language)
category_xpath_string = '/policyDefinitions/categories/{0}:category[@name="{1}"]'
using_xpath_string = '/policyDefinitions/policyNamespaces/{0}:using'
if parent_category.find(':') >= 0:
# the parent is in another namespace
policy_namespace = parent_category.split(':')[0]
parent_category = parent_category.split(':')[1]
using_xpath_string = using_xpath_string.format(policy_namespace)
policy_nsmap = dictupdate.update(policy_nsmap,
_buildElementNsmap(admx_policy_definitions.xpath(using_xpath_string,
namespaces=policy_nsmap)))
category_xpath_string = category_xpath_string.format(policy_namespace, parent_category)
if admx_policy_definitions.xpath(category_xpath_string, namespaces=policy_nsmap):
tparent_category = admx_policy_definitions.xpath(category_xpath_string,
namespaces=policy_nsmap)[0]
this_parent_name = _getFullPolicyName(
policy_item=tparent_category,
policy_name=tparent_category.attrib['name'],
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
path.append(this_parent_name)
if tparent_category.xpath('{0}:parentCategory/@ref'.format(policy_namespace), namespaces=policy_nsmap):
# parent has a parent
path = _admx_policy_parent_walk(
path=path,
policy_namespace=policy_namespace,
parent_category=tparent_category.xpath('{0}:parentCategory/@ref'.format(policy_namespace), namespaces=policy_nsmap)[0],
policy_nsmap=policy_nsmap,
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
return path | helper function to recursively walk up the ADMX namespaces and build the
hierarchy for the policy | Below is the the instruction that describes the task:
### Input:
helper function to recursively walk up the ADMX namespaces and build the
hierarchy for the policy
### Response:
def _admx_policy_parent_walk(path,
policy_namespace,
parent_category,
policy_nsmap,
return_full_policy_names,
adml_language):
'''
helper function to recursively walk up the ADMX namespaces and build the
hierarchy for the policy
'''
admx_policy_definitions = _get_policy_definitions(language=adml_language)
category_xpath_string = '/policyDefinitions/categories/{0}:category[@name="{1}"]'
using_xpath_string = '/policyDefinitions/policyNamespaces/{0}:using'
if parent_category.find(':') >= 0:
# the parent is in another namespace
policy_namespace = parent_category.split(':')[0]
parent_category = parent_category.split(':')[1]
using_xpath_string = using_xpath_string.format(policy_namespace)
policy_nsmap = dictupdate.update(policy_nsmap,
_buildElementNsmap(admx_policy_definitions.xpath(using_xpath_string,
namespaces=policy_nsmap)))
category_xpath_string = category_xpath_string.format(policy_namespace, parent_category)
if admx_policy_definitions.xpath(category_xpath_string, namespaces=policy_nsmap):
tparent_category = admx_policy_definitions.xpath(category_xpath_string,
namespaces=policy_nsmap)[0]
this_parent_name = _getFullPolicyName(
policy_item=tparent_category,
policy_name=tparent_category.attrib['name'],
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
path.append(this_parent_name)
if tparent_category.xpath('{0}:parentCategory/@ref'.format(policy_namespace), namespaces=policy_nsmap):
# parent has a parent
path = _admx_policy_parent_walk(
path=path,
policy_namespace=policy_namespace,
parent_category=tparent_category.xpath('{0}:parentCategory/@ref'.format(policy_namespace), namespaces=policy_nsmap)[0],
policy_nsmap=policy_nsmap,
return_full_policy_names=return_full_policy_names,
adml_language=adml_language)
return path |
def _add_sync_queues_and_barrier(self, name, dependencies):
"""Adds ops to enqueue on all worker queues.
Args:
name: prefixed for the shared_name of ops.
dependencies: control dependency from ops.
Returns:
an op that should be used as control dependency before starting next step.
"""
self._sync_queue_counter += 1
with tf.device(self.sync_queue_devices[self._sync_queue_counter % len(self.sync_queue_devices)]):
sync_queues = [
tf.FIFOQueue(self.num_worker, [tf.bool], shapes=[[]],
shared_name='%s%s' % (name, i))
for i in range(self.num_worker)]
queue_ops = []
# For each other worker, add an entry in a queue, signaling that it can finish this step.
token = tf.constant(False)
with tf.control_dependencies(dependencies):
for i, q in enumerate(sync_queues):
if i != self.task_index:
queue_ops.append(q.enqueue(token))
# Drain tokens off queue for this worker, one for each other worker.
queue_ops.append(
sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))
return tf.group(*queue_ops, name=name) | Adds ops to enqueue on all worker queues.
Args:
name: prefixed for the shared_name of ops.
dependencies: control dependency from ops.
Returns:
an op that should be used as control dependency before starting next step. | Below is the the instruction that describes the task:
### Input:
Adds ops to enqueue on all worker queues.
Args:
name: prefixed for the shared_name of ops.
dependencies: control dependency from ops.
Returns:
an op that should be used as control dependency before starting next step.
### Response:
def _add_sync_queues_and_barrier(self, name, dependencies):
"""Adds ops to enqueue on all worker queues.
Args:
name: prefixed for the shared_name of ops.
dependencies: control dependency from ops.
Returns:
an op that should be used as control dependency before starting next step.
"""
self._sync_queue_counter += 1
with tf.device(self.sync_queue_devices[self._sync_queue_counter % len(self.sync_queue_devices)]):
sync_queues = [
tf.FIFOQueue(self.num_worker, [tf.bool], shapes=[[]],
shared_name='%s%s' % (name, i))
for i in range(self.num_worker)]
queue_ops = []
# For each other worker, add an entry in a queue, signaling that it can finish this step.
token = tf.constant(False)
with tf.control_dependencies(dependencies):
for i, q in enumerate(sync_queues):
if i != self.task_index:
queue_ops.append(q.enqueue(token))
# Drain tokens off queue for this worker, one for each other worker.
queue_ops.append(
sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))
return tf.group(*queue_ops, name=name) |
def add_edge(self, edge):
"""
Add edge (u, v) to the graph. Raises InvariantError if adding the edge
would form a cycle.
"""
u, v = edge
both_exist = u in self.vertices and v in self.vertices
# Using `is` because if they belong to the same component, they MUST
# share the same set object!
if both_exist and self.components[u] is self.components[v]:
# Both vertices are part of the same connected component.
raise InvariantError('Adding %r would form a cycle' % (edge,))
if u == v:
raise InvariantError('Cannot add loop: %r' % (edge,))
# Ensure the vertices exist in the graph.
self.add_vertex(u)
self.add_vertex(v)
# Add the edges to each other.
self._vertices[u].add(v)
self._vertices[v].add(u)
# Add all of the smaller components to the bigger one.
smaller_component, bigger_component = self.sort_components(u, v)
for vertex in smaller_component:
bigger_component.add(vertex)
# And with this assignment, say bye-bye to the smaller component.
self.components[vertex] = bigger_component | Add edge (u, v) to the graph. Raises InvariantError if adding the edge
would form a cycle. | Below is the the instruction that describes the task:
### Input:
Add edge (u, v) to the graph. Raises InvariantError if adding the edge
would form a cycle.
### Response:
def add_edge(self, edge):
"""
Add edge (u, v) to the graph. Raises InvariantError if adding the edge
would form a cycle.
"""
u, v = edge
both_exist = u in self.vertices and v in self.vertices
# Using `is` because if they belong to the same component, they MUST
# share the same set object!
if both_exist and self.components[u] is self.components[v]:
# Both vertices are part of the same connected component.
raise InvariantError('Adding %r would form a cycle' % (edge,))
if u == v:
raise InvariantError('Cannot add loop: %r' % (edge,))
# Ensure the vertices exist in the graph.
self.add_vertex(u)
self.add_vertex(v)
# Add the edges to each other.
self._vertices[u].add(v)
self._vertices[v].add(u)
# Add all of the smaller components to the bigger one.
smaller_component, bigger_component = self.sort_components(u, v)
for vertex in smaller_component:
bigger_component.add(vertex)
# And with this assignment, say bye-bye to the smaller component.
self.components[vertex] = bigger_component |
def parse_params(self,
y_target=None,
image_target=None,
initial_num_evals=100,
max_num_evals=10000,
stepsize_search='grid_search',
num_iterations=64,
gamma=0.01,
constraint='l2',
batch_size=128,
verbose=True,
clip_min=0,
clip_max=1):
"""
:param y: A tensor of shape (1, nb_classes) for true labels.
:param y_target: A tensor of shape (1, nb_classes) for target labels.
Required for targeted attack.
:param image_target: A tensor of shape (1, **image shape) for initial
target images. Required for targeted attack.
:param initial_num_evals: initial number of evaluations for
gradient estimation.
:param max_num_evals: maximum number of evaluations for gradient estimation.
:param stepsize_search: How to search for stepsize; choices are
'geometric_progression', 'grid_search'.
'geometric progression' initializes the stepsize
by ||x_t - x||_p / sqrt(iteration), and keep
decreasing by half until reaching the target
side of the boundary. 'grid_search' chooses the
optimal epsilon over a grid, in the scale of
||x_t - x||_p.
:param num_iterations: The number of iterations.
:param gamma: The binary search threshold theta is gamma / sqrt(d) for
l2 attack and gamma / d for linf attack.
:param constraint: The distance to optimize; choices are 'l2', 'linf'.
:param batch_size: batch_size for model prediction.
:param verbose: (boolean) Whether distance at each step is printed.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# ignore the y and y_target argument
self.y_target = y_target
self.image_target = image_target
self.initial_num_evals = initial_num_evals
self.max_num_evals = max_num_evals
self.stepsize_search = stepsize_search
self.num_iterations = num_iterations
self.gamma = gamma
self.constraint = constraint
self.batch_size = batch_size
self.clip_min = clip_min
self.clip_max = clip_max
self.verbose = verbose | :param y: A tensor of shape (1, nb_classes) for true labels.
:param y_target: A tensor of shape (1, nb_classes) for target labels.
Required for targeted attack.
:param image_target: A tensor of shape (1, **image shape) for initial
target images. Required for targeted attack.
:param initial_num_evals: initial number of evaluations for
gradient estimation.
:param max_num_evals: maximum number of evaluations for gradient estimation.
:param stepsize_search: How to search for stepsize; choices are
'geometric_progression', 'grid_search'.
'geometric progression' initializes the stepsize
by ||x_t - x||_p / sqrt(iteration), and keep
decreasing by half until reaching the target
side of the boundary. 'grid_search' chooses the
optimal epsilon over a grid, in the scale of
||x_t - x||_p.
:param num_iterations: The number of iterations.
:param gamma: The binary search threshold theta is gamma / sqrt(d) for
l2 attack and gamma / d for linf attack.
:param constraint: The distance to optimize; choices are 'l2', 'linf'.
:param batch_size: batch_size for model prediction.
:param verbose: (boolean) Whether distance at each step is printed.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value | Below is the the instruction that describes the task:
### Input:
:param y: A tensor of shape (1, nb_classes) for true labels.
:param y_target: A tensor of shape (1, nb_classes) for target labels.
Required for targeted attack.
:param image_target: A tensor of shape (1, **image shape) for initial
target images. Required for targeted attack.
:param initial_num_evals: initial number of evaluations for
gradient estimation.
:param max_num_evals: maximum number of evaluations for gradient estimation.
:param stepsize_search: How to search for stepsize; choices are
'geometric_progression', 'grid_search'.
'geometric progression' initializes the stepsize
by ||x_t - x||_p / sqrt(iteration), and keep
decreasing by half until reaching the target
side of the boundary. 'grid_search' chooses the
optimal epsilon over a grid, in the scale of
||x_t - x||_p.
:param num_iterations: The number of iterations.
:param gamma: The binary search threshold theta is gamma / sqrt(d) for
l2 attack and gamma / d for linf attack.
:param constraint: The distance to optimize; choices are 'l2', 'linf'.
:param batch_size: batch_size for model prediction.
:param verbose: (boolean) Whether distance at each step is printed.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
### Response:
def parse_params(self,
y_target=None,
image_target=None,
initial_num_evals=100,
max_num_evals=10000,
stepsize_search='grid_search',
num_iterations=64,
gamma=0.01,
constraint='l2',
batch_size=128,
verbose=True,
clip_min=0,
clip_max=1):
"""
:param y: A tensor of shape (1, nb_classes) for true labels.
:param y_target: A tensor of shape (1, nb_classes) for target labels.
Required for targeted attack.
:param image_target: A tensor of shape (1, **image shape) for initial
target images. Required for targeted attack.
:param initial_num_evals: initial number of evaluations for
gradient estimation.
:param max_num_evals: maximum number of evaluations for gradient estimation.
:param stepsize_search: How to search for stepsize; choices are
'geometric_progression', 'grid_search'.
'geometric progression' initializes the stepsize
by ||x_t - x||_p / sqrt(iteration), and keep
decreasing by half until reaching the target
side of the boundary. 'grid_search' chooses the
optimal epsilon over a grid, in the scale of
||x_t - x||_p.
:param num_iterations: The number of iterations.
:param gamma: The binary search threshold theta is gamma / sqrt(d) for
l2 attack and gamma / d for linf attack.
:param constraint: The distance to optimize; choices are 'l2', 'linf'.
:param batch_size: batch_size for model prediction.
:param verbose: (boolean) Whether distance at each step is printed.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# ignore the y and y_target argument
self.y_target = y_target
self.image_target = image_target
self.initial_num_evals = initial_num_evals
self.max_num_evals = max_num_evals
self.stepsize_search = stepsize_search
self.num_iterations = num_iterations
self.gamma = gamma
self.constraint = constraint
self.batch_size = batch_size
self.clip_min = clip_min
self.clip_max = clip_max
self.verbose = verbose |
def set_variables(self, data):
"""
Set variables for the network.
Parameters
----------
data: dict
dict for variable in the form of example as shown.
Examples
--------
>>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter
>>> writer = XBNWriter()
>>> writer.set_variables({'a': {'TYPE': 'discrete', 'XPOS': '13495',
... 'YPOS': '10465', 'DESCRIPTION': '(a) Metastatic Cancer',
... 'STATES': ['Present', 'Absent']}
... 'b': {'TYPE': 'discrete', 'XPOS': '11290',
... 'YPOS': '11965', 'DESCRIPTION': '(b) Serum Calcium Increase',
... 'STATES': ['Present', 'Absent']}})
"""
variables = etree.SubElement(self.bnmodel, "VARIABLES")
for var in sorted(data):
variable = etree.SubElement(variables, 'VAR', attrib={'NAME': var, 'TYPE': data[var]['TYPE'],
'XPOS': data[var]['XPOS'], 'YPOS': data[var]['YPOS']})
etree.SubElement(variable, 'DESCRIPTION', attrib={'DESCRIPTION': data[var]['DESCRIPTION']})
for state in data[var]['STATES']:
etree.SubElement(variable, 'STATENAME').text = state | Set variables for the network.
Parameters
----------
data: dict
dict for variable in the form of example as shown.
Examples
--------
>>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter
>>> writer = XBNWriter()
>>> writer.set_variables({'a': {'TYPE': 'discrete', 'XPOS': '13495',
... 'YPOS': '10465', 'DESCRIPTION': '(a) Metastatic Cancer',
... 'STATES': ['Present', 'Absent']}
... 'b': {'TYPE': 'discrete', 'XPOS': '11290',
... 'YPOS': '11965', 'DESCRIPTION': '(b) Serum Calcium Increase',
... 'STATES': ['Present', 'Absent']}}) | Below is the the instruction that describes the task:
### Input:
Set variables for the network.
Parameters
----------
data: dict
dict for variable in the form of example as shown.
Examples
--------
>>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter
>>> writer = XBNWriter()
>>> writer.set_variables({'a': {'TYPE': 'discrete', 'XPOS': '13495',
... 'YPOS': '10465', 'DESCRIPTION': '(a) Metastatic Cancer',
... 'STATES': ['Present', 'Absent']}
... 'b': {'TYPE': 'discrete', 'XPOS': '11290',
... 'YPOS': '11965', 'DESCRIPTION': '(b) Serum Calcium Increase',
... 'STATES': ['Present', 'Absent']}})
### Response:
def set_variables(self, data):
"""
Set variables for the network.
Parameters
----------
data: dict
dict for variable in the form of example as shown.
Examples
--------
>>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter
>>> writer = XBNWriter()
>>> writer.set_variables({'a': {'TYPE': 'discrete', 'XPOS': '13495',
... 'YPOS': '10465', 'DESCRIPTION': '(a) Metastatic Cancer',
... 'STATES': ['Present', 'Absent']}
... 'b': {'TYPE': 'discrete', 'XPOS': '11290',
... 'YPOS': '11965', 'DESCRIPTION': '(b) Serum Calcium Increase',
... 'STATES': ['Present', 'Absent']}})
"""
variables = etree.SubElement(self.bnmodel, "VARIABLES")
for var in sorted(data):
variable = etree.SubElement(variables, 'VAR', attrib={'NAME': var, 'TYPE': data[var]['TYPE'],
'XPOS': data[var]['XPOS'], 'YPOS': data[var]['YPOS']})
etree.SubElement(variable, 'DESCRIPTION', attrib={'DESCRIPTION': data[var]['DESCRIPTION']})
for state in data[var]['STATES']:
etree.SubElement(variable, 'STATENAME').text = state |
def parse_hs2015(heilman_filepath):
"""convert the output of the Heilman and Sagae (2015) discourse parser
into a nltk.ParentedTree instance.
Parameters
----------
heilman_filepath : str
path to a file containing the output of Heilman and Sagae's 2015
discourse parser
Returns
-------
parented_tree : nltk.ParentedTree
nltk.ParentedTree representation of the given Heilman/Sagae RST tree
edus : list(list(unicode))
a list of EDUs, where each EDU is represented as
a list of tokens
"""
with open(heilman_filepath, 'r') as parsed_file:
heilman_json = json.load(parsed_file)
edus = heilman_json['edu_tokens']
# the Heilman/Sagae parser can theoretically produce more than one parse,
# but I've never seen more than one, so we'll just the that.
scored_rst_tree = heilman_json['scored_rst_trees'][0]
tree_str = scored_rst_tree['tree']
parented_tree = nltk.ParentedTree.fromstring(tree_str)
_add_edus_to_tree(parented_tree, edus)
return parented_tree, edus | convert the output of the Heilman and Sagae (2015) discourse parser
into a nltk.ParentedTree instance.
Parameters
----------
heilman_filepath : str
path to a file containing the output of Heilman and Sagae's 2015
discourse parser
Returns
-------
parented_tree : nltk.ParentedTree
nltk.ParentedTree representation of the given Heilman/Sagae RST tree
edus : list(list(unicode))
a list of EDUs, where each EDU is represented as
a list of tokens | Below is the the instruction that describes the task:
### Input:
convert the output of the Heilman and Sagae (2015) discourse parser
into a nltk.ParentedTree instance.
Parameters
----------
heilman_filepath : str
path to a file containing the output of Heilman and Sagae's 2015
discourse parser
Returns
-------
parented_tree : nltk.ParentedTree
nltk.ParentedTree representation of the given Heilman/Sagae RST tree
edus : list(list(unicode))
a list of EDUs, where each EDU is represented as
a list of tokens
### Response:
def parse_hs2015(heilman_filepath):
"""convert the output of the Heilman and Sagae (2015) discourse parser
into a nltk.ParentedTree instance.
Parameters
----------
heilman_filepath : str
path to a file containing the output of Heilman and Sagae's 2015
discourse parser
Returns
-------
parented_tree : nltk.ParentedTree
nltk.ParentedTree representation of the given Heilman/Sagae RST tree
edus : list(list(unicode))
a list of EDUs, where each EDU is represented as
a list of tokens
"""
with open(heilman_filepath, 'r') as parsed_file:
heilman_json = json.load(parsed_file)
edus = heilman_json['edu_tokens']
# the Heilman/Sagae parser can theoretically produce more than one parse,
# but I've never seen more than one, so we'll just the that.
scored_rst_tree = heilman_json['scored_rst_trees'][0]
tree_str = scored_rst_tree['tree']
parented_tree = nltk.ParentedTree.fromstring(tree_str)
_add_edus_to_tree(parented_tree, edus)
return parented_tree, edus |
def search(self, remote_path, keyword, recurrent='0', **kwargs):
"""按文件名搜索文件(不支持查找目录).
:param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type remote_path: str
:param keyword: 关键词
:type keyword: str
:param recurrent: 是否递归。
* "0"表示不递归
* "1"表示递归
:type recurrent: str
:return: Response 对象
"""
params = {
'path': remote_path,
'wd': keyword,
're': recurrent,
}
return self._request('file', 'search', extra_params=params, **kwargs) | 按文件名搜索文件(不支持查找目录).
:param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type remote_path: str
:param keyword: 关键词
:type keyword: str
:param recurrent: 是否递归。
* "0"表示不递归
* "1"表示递归
:type recurrent: str
:return: Response 对象 | Below is the the instruction that describes the task:
### Input:
按文件名搜索文件(不支持查找目录).
:param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type remote_path: str
:param keyword: 关键词
:type keyword: str
:param recurrent: 是否递归。
* "0"表示不递归
* "1"表示递归
:type recurrent: str
:return: Response 对象
### Response:
def search(self, remote_path, keyword, recurrent='0', **kwargs):
"""按文件名搜索文件(不支持查找目录).
:param remote_path: 需要检索的目录路径,路径必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:type remote_path: str
:param keyword: 关键词
:type keyword: str
:param recurrent: 是否递归。
* "0"表示不递归
* "1"表示递归
:type recurrent: str
:return: Response 对象
"""
params = {
'path': remote_path,
'wd': keyword,
're': recurrent,
}
return self._request('file', 'search', extra_params=params, **kwargs) |
def arp_access_list_permit_permit_list_ip_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
arp = ET.SubElement(config, "arp", xmlns="urn:brocade.com:mgmt:brocade-dai")
access_list = ET.SubElement(arp, "access-list")
acl_name_key = ET.SubElement(access_list, "acl-name")
acl_name_key.text = kwargs.pop('acl_name')
permit = ET.SubElement(access_list, "permit")
permit_list = ET.SubElement(permit, "permit-list")
host_ip_key = ET.SubElement(permit_list, "host-ip")
host_ip_key.text = kwargs.pop('host_ip')
mac_type_key = ET.SubElement(permit_list, "mac-type")
mac_type_key.text = kwargs.pop('mac_type')
host_mac_key = ET.SubElement(permit_list, "host-mac")
host_mac_key.text = kwargs.pop('host_mac')
ip_type = ET.SubElement(permit_list, "ip-type")
ip_type.text = kwargs.pop('ip_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def arp_access_list_permit_permit_list_ip_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
arp = ET.SubElement(config, "arp", xmlns="urn:brocade.com:mgmt:brocade-dai")
access_list = ET.SubElement(arp, "access-list")
acl_name_key = ET.SubElement(access_list, "acl-name")
acl_name_key.text = kwargs.pop('acl_name')
permit = ET.SubElement(access_list, "permit")
permit_list = ET.SubElement(permit, "permit-list")
host_ip_key = ET.SubElement(permit_list, "host-ip")
host_ip_key.text = kwargs.pop('host_ip')
mac_type_key = ET.SubElement(permit_list, "mac-type")
mac_type_key.text = kwargs.pop('mac_type')
host_mac_key = ET.SubElement(permit_list, "host-mac")
host_mac_key.text = kwargs.pop('host_mac')
ip_type = ET.SubElement(permit_list, "ip-type")
ip_type.text = kwargs.pop('ip_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _set_vlag_commit_mode(self, v, load=False):
"""
Setter method for vlag_commit_mode, mapped from YANG variable /vlag_commit_mode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlag_commit_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlag_commit_mode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vlag_commit_mode.vlag_commit_mode, is_container='container', presence=False, yang_name="vlag-commit-mode", rest_name="vlag-commit-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vLAG commit mode', u'cli-incomplete-no': None, u'callpoint': u'lacp_systempriority', u'sort-priority': u'55', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lacp', defining_module='brocade-lacp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlag_commit_mode must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vlag_commit_mode.vlag_commit_mode, is_container='container', presence=False, yang_name="vlag-commit-mode", rest_name="vlag-commit-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vLAG commit mode', u'cli-incomplete-no': None, u'callpoint': u'lacp_systempriority', u'sort-priority': u'55', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lacp', defining_module='brocade-lacp', yang_type='container', is_config=True)""",
})
self.__vlag_commit_mode = t
if hasattr(self, '_set'):
self._set() | Setter method for vlag_commit_mode, mapped from YANG variable /vlag_commit_mode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlag_commit_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlag_commit_mode() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for vlag_commit_mode, mapped from YANG variable /vlag_commit_mode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlag_commit_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlag_commit_mode() directly.
### Response:
def _set_vlag_commit_mode(self, v, load=False):
"""
Setter method for vlag_commit_mode, mapped from YANG variable /vlag_commit_mode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlag_commit_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlag_commit_mode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=vlag_commit_mode.vlag_commit_mode, is_container='container', presence=False, yang_name="vlag-commit-mode", rest_name="vlag-commit-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vLAG commit mode', u'cli-incomplete-no': None, u'callpoint': u'lacp_systempriority', u'sort-priority': u'55', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lacp', defining_module='brocade-lacp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlag_commit_mode must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=vlag_commit_mode.vlag_commit_mode, is_container='container', presence=False, yang_name="vlag-commit-mode", rest_name="vlag-commit-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vLAG commit mode', u'cli-incomplete-no': None, u'callpoint': u'lacp_systempriority', u'sort-priority': u'55', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-lacp', defining_module='brocade-lacp', yang_type='container', is_config=True)""",
})
self.__vlag_commit_mode = t
if hasattr(self, '_set'):
self._set() |
def per_event_source_id(event_space):
"""
:return:
a seeder function that returns an event's source id only if that event's
source space equals to ``event_space``.
"""
def f(event):
if is_event(event):
v = peel(event)
if v['source']['space'] == event_space:
return v['source']['id']
else:
return None
else:
return None
return _wrap_none(f) | :return:
a seeder function that returns an event's source id only if that event's
source space equals to ``event_space``. | Below is the the instruction that describes the task:
### Input:
:return:
a seeder function that returns an event's source id only if that event's
source space equals to ``event_space``.
### Response:
def per_event_source_id(event_space):
"""
:return:
a seeder function that returns an event's source id only if that event's
source space equals to ``event_space``.
"""
def f(event):
if is_event(event):
v = peel(event)
if v['source']['space'] == event_space:
return v['source']['id']
else:
return None
else:
return None
return _wrap_none(f) |
def get_template(template_name, using=None):
"""
Loads and returns a template for the given name.
Raises TemplateDoesNotExist if no such template exists.
"""
engines = _engine_list(using)
for engine in engines:
try:
return engine.get_template(template_name)
except TemplateDoesNotExist as e:
pass
raise TemplateDoesNotExist(template_name) | Loads and returns a template for the given name.
Raises TemplateDoesNotExist if no such template exists. | Below is the the instruction that describes the task:
### Input:
Loads and returns a template for the given name.
Raises TemplateDoesNotExist if no such template exists.
### Response:
def get_template(template_name, using=None):
"""
Loads and returns a template for the given name.
Raises TemplateDoesNotExist if no such template exists.
"""
engines = _engine_list(using)
for engine in engines:
try:
return engine.get_template(template_name)
except TemplateDoesNotExist as e:
pass
raise TemplateDoesNotExist(template_name) |
def get_cached_response(cls, key):
"""
Retrieves a CachedResponse for the provided key.
Args:
key (string)
Returns:
A CachedResponse with is_found status and value.
"""
request_cached_response = DEFAULT_REQUEST_CACHE.get_cached_response(key)
if not request_cached_response.is_found:
django_cached_response = cls._get_cached_response_from_django_cache(key)
cls._set_request_cache_if_django_cache_hit(key, django_cached_response)
return django_cached_response
return request_cached_response | Retrieves a CachedResponse for the provided key.
Args:
key (string)
Returns:
A CachedResponse with is_found status and value. | Below is the the instruction that describes the task:
### Input:
Retrieves a CachedResponse for the provided key.
Args:
key (string)
Returns:
A CachedResponse with is_found status and value.
### Response:
def get_cached_response(cls, key):
"""
Retrieves a CachedResponse for the provided key.
Args:
key (string)
Returns:
A CachedResponse with is_found status and value.
"""
request_cached_response = DEFAULT_REQUEST_CACHE.get_cached_response(key)
if not request_cached_response.is_found:
django_cached_response = cls._get_cached_response_from_django_cache(key)
cls._set_request_cache_if_django_cache_hit(key, django_cached_response)
return django_cached_response
return request_cached_response |
def parts(self):
"""
Get the parts directly below this element.
"""
for item in self.__parts_and_divisions:
if item.tag == 'part':
yield item
else:
# Divisions shouldn't be beneath a part, but here's a fallback
# for if this does happen
for part in item.parts:
yield part | Get the parts directly below this element. | Below is the the instruction that describes the task:
### Input:
Get the parts directly below this element.
### Response:
def parts(self):
"""
Get the parts directly below this element.
"""
for item in self.__parts_and_divisions:
if item.tag == 'part':
yield item
else:
# Divisions shouldn't be beneath a part, but here's a fallback
# for if this does happen
for part in item.parts:
yield part |
def printConfidence(self, aState, maxCols = 20):
"""
Print a floating point array that is the same shape as activeState.
:param aState: TODO: document
:param maxCols: TODO: document
"""
def formatFPRow(var, i):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c, i]
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatFPRow(aState, i) | Print a floating point array that is the same shape as activeState.
:param aState: TODO: document
:param maxCols: TODO: document | Below is the the instruction that describes the task:
### Input:
Print a floating point array that is the same shape as activeState.
:param aState: TODO: document
:param maxCols: TODO: document
### Response:
def printConfidence(self, aState, maxCols = 20):
"""
Print a floating point array that is the same shape as activeState.
:param aState: TODO: document
:param maxCols: TODO: document
"""
def formatFPRow(var, i):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c, i]
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatFPRow(aState, i) |
def run(self):
"""The thread's main activity. Call start() instead."""
self._create_socket()
self._running = True
self._beating = True
while self._running:
if self._pause:
# just sleep, and skip the rest of the loop
time.sleep(self.time_to_dead)
continue
since_last_heartbeat = 0.0
# io.rprint('Ping from HB channel') # dbg
# no need to catch EFSM here, because the previous event was
# either a recv or connect, which cannot be followed by EFSM
self.socket.send(b'ping')
request_time = time.time()
ready = self._poll(request_time)
if ready:
self._beating = True
# the poll above guarantees we have something to recv
self.socket.recv()
# sleep the remainder of the cycle
remainder = self.time_to_dead - (time.time() - request_time)
if remainder > 0:
time.sleep(remainder)
continue
else:
# nothing was received within the time limit, signal heart failure
self._beating = False
since_last_heartbeat = time.time() - request_time
self.call_handlers(since_last_heartbeat)
# and close/reopen the socket, because the REQ/REP cycle has been broken
self._create_socket()
continue
try:
self.socket.close()
except:
pass | The thread's main activity. Call start() instead. | Below is the the instruction that describes the task:
### Input:
The thread's main activity. Call start() instead.
### Response:
def run(self):
"""The thread's main activity. Call start() instead."""
self._create_socket()
self._running = True
self._beating = True
while self._running:
if self._pause:
# just sleep, and skip the rest of the loop
time.sleep(self.time_to_dead)
continue
since_last_heartbeat = 0.0
# io.rprint('Ping from HB channel') # dbg
# no need to catch EFSM here, because the previous event was
# either a recv or connect, which cannot be followed by EFSM
self.socket.send(b'ping')
request_time = time.time()
ready = self._poll(request_time)
if ready:
self._beating = True
# the poll above guarantees we have something to recv
self.socket.recv()
# sleep the remainder of the cycle
remainder = self.time_to_dead - (time.time() - request_time)
if remainder > 0:
time.sleep(remainder)
continue
else:
# nothing was received within the time limit, signal heart failure
self._beating = False
since_last_heartbeat = time.time() - request_time
self.call_handlers(since_last_heartbeat)
# and close/reopen the socket, because the REQ/REP cycle has been broken
self._create_socket()
continue
try:
self.socket.close()
except:
pass |
def get_server(server_class, serial_port):
""" Return instance of :param:`server_class` with :param:`request_handler`
bound to it.
This method also binds a :func:`route` method to the server instance.
>>> server = get_server(TcpServer, ('localhost', 502), RequestHandler)
>>> server.serve_forever()
:param server_class: (sub)Class of :class:`socketserver.BaseServer`.
:param request_handler_class: (sub)Class of
:class:`umodbus.server.RequestHandler`.
:return: Instance of :param:`server_class`.
"""
s = server_class()
s.serial_port = serial_port
s.route_map = Map()
s.route = MethodType(route, s)
return s | Return instance of :param:`server_class` with :param:`request_handler`
bound to it.
This method also binds a :func:`route` method to the server instance.
>>> server = get_server(TcpServer, ('localhost', 502), RequestHandler)
>>> server.serve_forever()
:param server_class: (sub)Class of :class:`socketserver.BaseServer`.
:param request_handler_class: (sub)Class of
:class:`umodbus.server.RequestHandler`.
:return: Instance of :param:`server_class`. | Below is the the instruction that describes the task:
### Input:
Return instance of :param:`server_class` with :param:`request_handler`
bound to it.
This method also binds a :func:`route` method to the server instance.
>>> server = get_server(TcpServer, ('localhost', 502), RequestHandler)
>>> server.serve_forever()
:param server_class: (sub)Class of :class:`socketserver.BaseServer`.
:param request_handler_class: (sub)Class of
:class:`umodbus.server.RequestHandler`.
:return: Instance of :param:`server_class`.
### Response:
def get_server(server_class, serial_port):
""" Return instance of :param:`server_class` with :param:`request_handler`
bound to it.
This method also binds a :func:`route` method to the server instance.
>>> server = get_server(TcpServer, ('localhost', 502), RequestHandler)
>>> server.serve_forever()
:param server_class: (sub)Class of :class:`socketserver.BaseServer`.
:param request_handler_class: (sub)Class of
:class:`umodbus.server.RequestHandler`.
:return: Instance of :param:`server_class`.
"""
s = server_class()
s.serial_port = serial_port
s.route_map = Map()
s.route = MethodType(route, s)
return s |
def print_help(filename, table, dest=sys.stdout):
"""
Print help to the given destination file object.
"""
cmds = '|'.join(sorted(table.keys()))
print >> dest, "Syntax: %s %s [args]" % (path.basename(filename), cmds) | Print help to the given destination file object. | Below is the the instruction that describes the task:
### Input:
Print help to the given destination file object.
### Response:
def print_help(filename, table, dest=sys.stdout):
"""
Print help to the given destination file object.
"""
cmds = '|'.join(sorted(table.keys()))
print >> dest, "Syntax: %s %s [args]" % (path.basename(filename), cmds) |
def get_userloved(self):
"""Whether the user loved this track"""
if not self.username:
return
params = self._get_params()
params["username"] = self.username
doc = self._request(self.ws_prefix + ".getInfo", True, params)
loved = _number(_extract(doc, "userloved"))
return bool(loved) | Whether the user loved this track | Below is the the instruction that describes the task:
### Input:
Whether the user loved this track
### Response:
def get_userloved(self):
"""Whether the user loved this track"""
if not self.username:
return
params = self._get_params()
params["username"] = self.username
doc = self._request(self.ws_prefix + ".getInfo", True, params)
loved = _number(_extract(doc, "userloved"))
return bool(loved) |
def on_rulebook(self, *args):
"""Make sure to update when the rulebook changes"""
if self.rulebook is None:
return
self.rulebook.connect(self._trigger_redata, weak=False)
self.redata() | Make sure to update when the rulebook changes | Below is the the instruction that describes the task:
### Input:
Make sure to update when the rulebook changes
### Response:
def on_rulebook(self, *args):
"""Make sure to update when the rulebook changes"""
if self.rulebook is None:
return
self.rulebook.connect(self._trigger_redata, weak=False)
self.redata() |
def clean(self, value):
"""Validates that the input can be converted to a list of decimals."""
if not value:
return None
# if any value exists, then add "0" as a placeholder to the remaining
# values.
if isinstance(value, list) and any(value):
for i, item in enumerate(value):
if not item:
value[i] = '0'
return super(MultipleDecimalField, self).clean(value) | Validates that the input can be converted to a list of decimals. | Below is the the instruction that describes the task:
### Input:
Validates that the input can be converted to a list of decimals.
### Response:
def clean(self, value):
"""Validates that the input can be converted to a list of decimals."""
if not value:
return None
# if any value exists, then add "0" as a placeholder to the remaining
# values.
if isinstance(value, list) and any(value):
for i, item in enumerate(value):
if not item:
value[i] = '0'
return super(MultipleDecimalField, self).clean(value) |
def getProcessOwner(pid):
'''
getProcessOwner - Get the process owner of a pid
@param pid <int> - process id
@return - None if process not found or can't be determined. Otherwise, a dict:
{
uid - Owner UID
name - Owner name, or None if one cannot be determined
}
'''
try:
ownerUid = os.stat('/proc/' + str(pid)).st_uid
except:
return None
try:
ownerName = pwd.getpwuid(ownerUid).pw_name
except:
ownerName = None
return {
'uid' : ownerUid,
'name' : ownerName
} | getProcessOwner - Get the process owner of a pid
@param pid <int> - process id
@return - None if process not found or can't be determined. Otherwise, a dict:
{
uid - Owner UID
name - Owner name, or None if one cannot be determined
} | Below is the the instruction that describes the task:
### Input:
getProcessOwner - Get the process owner of a pid
@param pid <int> - process id
@return - None if process not found or can't be determined. Otherwise, a dict:
{
uid - Owner UID
name - Owner name, or None if one cannot be determined
}
### Response:
def getProcessOwner(pid):
'''
getProcessOwner - Get the process owner of a pid
@param pid <int> - process id
@return - None if process not found or can't be determined. Otherwise, a dict:
{
uid - Owner UID
name - Owner name, or None if one cannot be determined
}
'''
try:
ownerUid = os.stat('/proc/' + str(pid)).st_uid
except:
return None
try:
ownerName = pwd.getpwuid(ownerUid).pw_name
except:
ownerName = None
return {
'uid' : ownerUid,
'name' : ownerName
} |
def buffered(self):
""" Whether write operations should be buffered, i.e. run against a
local graph before being stored to the main data store. """
if 'buffered' not in self.config:
return not isinstance(self.store, (Memory, IOMemory))
return self.config.get('buffered') | Whether write operations should be buffered, i.e. run against a
local graph before being stored to the main data store. | Below is the the instruction that describes the task:
### Input:
Whether write operations should be buffered, i.e. run against a
local graph before being stored to the main data store.
### Response:
def buffered(self):
""" Whether write operations should be buffered, i.e. run against a
local graph before being stored to the main data store. """
if 'buffered' not in self.config:
return not isinstance(self.store, (Memory, IOMemory))
return self.config.get('buffered') |
def filter(self, criteria, applyto='measurement', ID=None):
"""
Filter measurements according to given criteria.
Retain only Measurements for which criteria returns True.
TODO: add support for multiple criteria
Parameters
----------
criteria : callable
Returns bool.
applyto : 'measurement' | 'keys' | 'data' | mapping
'measurement' : criteria is applied to Measurement objects
'keys' : criteria is applied to the keys.
'data' : criteria is applied to the Measurement objects' data.
mapping : for each key criteria is applied to mapping value with same key.
ID : str
ID of the filtered collection.
If None is given, append '.filterd' to the current sample ID.
Returns
-------
Filtered Collection.
"""
fil = criteria
new = self.copy()
if isinstance(applyto, collections.Mapping):
remove = (k for k, v in self.items() if not fil(applyto[k]))
elif applyto == 'measurement':
remove = (k for k, v in self.items() if not fil(v))
elif applyto == 'keys':
remove = (k for k, v in self.items() if not fil(k))
elif applyto == 'data':
remove = (k for k, v in self.items() if not fil(v.get_data()))
else:
raise ValueError('Unsupported value "%s" for applyto parameter.' % applyto)
for r in remove:
del new[r]
if ID is None:
ID = self.ID
new.ID = ID
return new | Filter measurements according to given criteria.
Retain only Measurements for which criteria returns True.
TODO: add support for multiple criteria
Parameters
----------
criteria : callable
Returns bool.
applyto : 'measurement' | 'keys' | 'data' | mapping
'measurement' : criteria is applied to Measurement objects
'keys' : criteria is applied to the keys.
'data' : criteria is applied to the Measurement objects' data.
mapping : for each key criteria is applied to mapping value with same key.
ID : str
ID of the filtered collection.
If None is given, append '.filterd' to the current sample ID.
Returns
-------
Filtered Collection. | Below is the the instruction that describes the task:
### Input:
Filter measurements according to given criteria.
Retain only Measurements for which criteria returns True.
TODO: add support for multiple criteria
Parameters
----------
criteria : callable
Returns bool.
applyto : 'measurement' | 'keys' | 'data' | mapping
'measurement' : criteria is applied to Measurement objects
'keys' : criteria is applied to the keys.
'data' : criteria is applied to the Measurement objects' data.
mapping : for each key criteria is applied to mapping value with same key.
ID : str
ID of the filtered collection.
If None is given, append '.filterd' to the current sample ID.
Returns
-------
Filtered Collection.
### Response:
def filter(self, criteria, applyto='measurement', ID=None):
"""
Filter measurements according to given criteria.
Retain only Measurements for which criteria returns True.
TODO: add support for multiple criteria
Parameters
----------
criteria : callable
Returns bool.
applyto : 'measurement' | 'keys' | 'data' | mapping
'measurement' : criteria is applied to Measurement objects
'keys' : criteria is applied to the keys.
'data' : criteria is applied to the Measurement objects' data.
mapping : for each key criteria is applied to mapping value with same key.
ID : str
ID of the filtered collection.
If None is given, append '.filterd' to the current sample ID.
Returns
-------
Filtered Collection.
"""
fil = criteria
new = self.copy()
if isinstance(applyto, collections.Mapping):
remove = (k for k, v in self.items() if not fil(applyto[k]))
elif applyto == 'measurement':
remove = (k for k, v in self.items() if not fil(v))
elif applyto == 'keys':
remove = (k for k, v in self.items() if not fil(k))
elif applyto == 'data':
remove = (k for k, v in self.items() if not fil(v.get_data()))
else:
raise ValueError('Unsupported value "%s" for applyto parameter.' % applyto)
for r in remove:
del new[r]
if ID is None:
ID = self.ID
new.ID = ID
return new |
def _proc_polygon(self, tokens, filled):
""" Returns the components of a polygon. """
pts = [(p["x"], p["y"]) for p in tokens["points"]]
component = Polygon(pen=self.pen, points=pts, filled=filled)
return component | Returns the components of a polygon. | Below is the the instruction that describes the task:
### Input:
Returns the components of a polygon.
### Response:
def _proc_polygon(self, tokens, filled):
""" Returns the components of a polygon. """
pts = [(p["x"], p["y"]) for p in tokens["points"]]
component = Polygon(pen=self.pen, points=pts, filled=filled)
return component |
def _find_cf_standard_name_table(self, ds):
'''
Parse out the `standard_name_vocabulary` attribute and download that
version of the cf standard name table. If the standard name table has
already been downloaded, use the cached version. Modifies `_std_names`
attribute to store standard names. Returns True if the file exists and
False if it fails to download.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: bool
'''
# Get the standard name vocab
standard_name_vocabulary = getattr(ds, 'standard_name_vocabulary', '')
# Try to parse this attribute to get version
version = None
try:
if 'cf standard name table' in standard_name_vocabulary.lower():
version = [s.strip('(').strip(')').strip('v').strip(',') for s in standard_name_vocabulary.split()]
# This assumes that table version number won't start with 0.
version = [s for s in version if s.isdigit() and len(s) <= 2 and not s.startswith('0')]
if len(version) > 1:
return False
else:
version = version[0]
else:
# Can't parse the attribute, use the packaged version
return False
# usually raised from .lower() with an incompatible (non-string)
# data type
except AttributeError:
warn("Cannot convert standard name table to lowercase. This can "
"occur if a non-string standard_name_vocabulary global "
"attribute is supplied")
return False
if version.startswith('v'): # i.e 'v34' -> '34' drop the v
version = version[1:]
# If the packaged version is what we're after, then we're good
if version == self._std_names._version:
print("Using packaged standard name table v{0}".format(version), file=sys.stderr)
return False
# Try to download the version specified
try:
data_directory = util.create_cached_data_dir()
location = os.path.join(data_directory, 'cf-standard-name-table-test-{0}.xml'.format(version))
# Did we already download this before?
if not os.path.isfile(location):
util.download_cf_standard_name_table(version, location)
print("Using downloaded standard name table v{0}".format(version), file=sys.stderr)
else:
print("Using cached standard name table v{0} from {1}".format(version, location), file=sys.stderr)
self._std_names = util.StandardNameTable(location)
return True
except Exception as e:
# There was an error downloading the CF table. That's ok, we'll just use the packaged version
warn("Problem fetching standard name table:\n{0}\n"
"Using packaged v{1}".format(e, self._std_names._version))
return False | Parse out the `standard_name_vocabulary` attribute and download that
version of the cf standard name table. If the standard name table has
already been downloaded, use the cached version. Modifies `_std_names`
attribute to store standard names. Returns True if the file exists and
False if it fails to download.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Parse out the `standard_name_vocabulary` attribute and download that
version of the cf standard name table. If the standard name table has
already been downloaded, use the cached version. Modifies `_std_names`
attribute to store standard names. Returns True if the file exists and
False if it fails to download.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: bool
### Response:
def _find_cf_standard_name_table(self, ds):
'''
Parse out the `standard_name_vocabulary` attribute and download that
version of the cf standard name table. If the standard name table has
already been downloaded, use the cached version. Modifies `_std_names`
attribute to store standard names. Returns True if the file exists and
False if it fails to download.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: bool
'''
# Get the standard name vocab
standard_name_vocabulary = getattr(ds, 'standard_name_vocabulary', '')
# Try to parse this attribute to get version
version = None
try:
if 'cf standard name table' in standard_name_vocabulary.lower():
version = [s.strip('(').strip(')').strip('v').strip(',') for s in standard_name_vocabulary.split()]
# This assumes that table version number won't start with 0.
version = [s for s in version if s.isdigit() and len(s) <= 2 and not s.startswith('0')]
if len(version) > 1:
return False
else:
version = version[0]
else:
# Can't parse the attribute, use the packaged version
return False
# usually raised from .lower() with an incompatible (non-string)
# data type
except AttributeError:
warn("Cannot convert standard name table to lowercase. This can "
"occur if a non-string standard_name_vocabulary global "
"attribute is supplied")
return False
if version.startswith('v'): # i.e 'v34' -> '34' drop the v
version = version[1:]
# If the packaged version is what we're after, then we're good
if version == self._std_names._version:
print("Using packaged standard name table v{0}".format(version), file=sys.stderr)
return False
# Try to download the version specified
try:
data_directory = util.create_cached_data_dir()
location = os.path.join(data_directory, 'cf-standard-name-table-test-{0}.xml'.format(version))
# Did we already download this before?
if not os.path.isfile(location):
util.download_cf_standard_name_table(version, location)
print("Using downloaded standard name table v{0}".format(version), file=sys.stderr)
else:
print("Using cached standard name table v{0} from {1}".format(version, location), file=sys.stderr)
self._std_names = util.StandardNameTable(location)
return True
except Exception as e:
# There was an error downloading the CF table. That's ok, we'll just use the packaged version
warn("Problem fetching standard name table:\n{0}\n"
"Using packaged v{1}".format(e, self._std_names._version))
return False |
def parse_resource_extended(self, session, resource_name):
"""Parse a resource string to get extended interface information.
Corresponds to viParseRsrcEx function of the VISA library.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
"""
try:
parsed = rname.parse_resource_name(resource_name)
return (ResourceInfo(parsed.interface_type_const,
parsed.board,
parsed.resource_class,
str(parsed), None),
constants.StatusCode.success)
except ValueError:
return 0, constants.StatusCode.error_invalid_resource_name | Parse a resource string to get extended interface information.
Corresponds to viParseRsrcEx function of the VISA library.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode` | Below is the the instruction that describes the task:
### Input:
Parse a resource string to get extended interface information.
Corresponds to viParseRsrcEx function of the VISA library.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
### Response:
def parse_resource_extended(self, session, resource_name):
"""Parse a resource string to get extended interface information.
Corresponds to viParseRsrcEx function of the VISA library.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
"""
try:
parsed = rname.parse_resource_name(resource_name)
return (ResourceInfo(parsed.interface_type_const,
parsed.board,
parsed.resource_class,
str(parsed), None),
constants.StatusCode.success)
except ValueError:
return 0, constants.StatusCode.error_invalid_resource_name |
def check_results(tmp_):
"""Return a 3 tuple for something."""
# TODO: Fix this to work with more meaningful names
if tmp_['t'] > 0:
if tmp_['l'] > 0:
if tmp_['rr'] > 0 or tmp_['ra'] > 1:
print 1, 3, tmp_
return 3
elif tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 2, 3, tmp_
return 3
elif tmp_['mr'] > 0 or tmp_['ma'] > 1:
print 3, 2, tmp_
return 2
if tmp_['cr'] > 1 or tmp_['ca'] > 2:
print 4, 2, tmp_
return 2
if tmp_['mr'] > 0 or tmp_['ma'] > 1:
if tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 6, 0, tmp_
return 0
if tmp_['rr'] > 1 or tmp_['ra'] > 2:
print 7, 0, tmp_
return 0
if tmp_['sr'] > 1 or tmp_['sa'] > 2:
print 8, 0, tmp_
return 0
if tmp_['l'] > 0:
if tmp_['rr'] > 0 or tmp_['ra'] > 1:
print 9, 2, tmp_
return 2
if tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 10, 0, tmp_
return 0
return -1 | Return a 3 tuple for something. | Below is the the instruction that describes the task:
### Input:
Return a 3 tuple for something.
### Response:
def check_results(tmp_):
"""Return a 3 tuple for something."""
# TODO: Fix this to work with more meaningful names
if tmp_['t'] > 0:
if tmp_['l'] > 0:
if tmp_['rr'] > 0 or tmp_['ra'] > 1:
print 1, 3, tmp_
return 3
elif tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 2, 3, tmp_
return 3
elif tmp_['mr'] > 0 or tmp_['ma'] > 1:
print 3, 2, tmp_
return 2
if tmp_['cr'] > 1 or tmp_['ca'] > 2:
print 4, 2, tmp_
return 2
if tmp_['mr'] > 0 or tmp_['ma'] > 1:
if tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 6, 0, tmp_
return 0
if tmp_['rr'] > 1 or tmp_['ra'] > 2:
print 7, 0, tmp_
return 0
if tmp_['sr'] > 1 or tmp_['sa'] > 2:
print 8, 0, tmp_
return 0
if tmp_['l'] > 0:
if tmp_['rr'] > 0 or tmp_['ra'] > 1:
print 9, 2, tmp_
return 2
if tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 10, 0, tmp_
return 0
return -1 |
def authenticate(self, username, password, service='login', encoding='utf-8', resetcreds=True):
"""username and password authentication for the given service.
Returns True for success, or False for failure.
self.code (integer) and self.reason (string) are always stored and may
be referenced for the reason why authentication failed. 0/'Success' will
be stored for success.
Python3 expects bytes() for ctypes inputs. This function will make
necessary conversions using the supplied encoding.
Inputs:
username: username to authenticate
password: password in plain text
service: PAM service to authenticate against, defaults to 'login'
Returns:
success: True
failure: False
"""
@conv_func
def my_conv(n_messages, messages, p_response, app_data):
"""Simple conversation function that responds to any
prompt where the echo is off with the supplied password"""
# Create an array of n_messages response objects
addr = calloc(n_messages, sizeof(PamResponse))
response = cast(addr, POINTER(PamResponse))
p_response[0] = response
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
dst = calloc(len(password)+1, sizeof(c_char))
memmove(dst, cpassword, len(password))
response[i].resp = dst
response[i].resp_retcode = 0
return 0
# python3 ctypes prefers bytes
if sys.version_info >= (3,):
if isinstance(username, str): username = username.encode(encoding)
if isinstance(password, str): password = password.encode(encoding)
if isinstance(service, str): service = service.encode(encoding)
else:
if isinstance(username, unicode):
username = username.encode(encoding)
if isinstance(password, unicode):
password = password.encode(encoding)
if isinstance(service, unicode):
service = service.encode(encoding)
if b'\x00' in username or b'\x00' in password or b'\x00' in service:
self.code = 4 # PAM_SYSTEM_ERR in Linux-PAM
self.reason = 'strings may not contain NUL'
return False
# do this up front so we can safely throw an exception if there's
# anything wrong with it
cpassword = c_char_p(password)
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = pam_start(service, username, byref(conv), byref(handle))
if retval != 0:
# This is not an authentication error, something has gone wrong starting up PAM
self.code = retval
self.reason = "pam_start() failed"
return False
retval = pam_authenticate(handle, 0)
auth_success = retval == 0
if auth_success and resetcreds:
retval = pam_setcred(handle, PAM_REINITIALIZE_CRED);
# store information to inform the caller why we failed
self.code = retval
self.reason = pam_strerror(handle, retval)
if sys.version_info >= (3,):
self.reason = self.reason.decode(encoding)
if hasattr(libpam, 'pam_end'):
pam_end(handle, retval)
return auth_success | username and password authentication for the given service.
Returns True for success, or False for failure.
self.code (integer) and self.reason (string) are always stored and may
be referenced for the reason why authentication failed. 0/'Success' will
be stored for success.
Python3 expects bytes() for ctypes inputs. This function will make
necessary conversions using the supplied encoding.
Inputs:
username: username to authenticate
password: password in plain text
service: PAM service to authenticate against, defaults to 'login'
Returns:
success: True
failure: False | Below is the the instruction that describes the task:
### Input:
username and password authentication for the given service.
Returns True for success, or False for failure.
self.code (integer) and self.reason (string) are always stored and may
be referenced for the reason why authentication failed. 0/'Success' will
be stored for success.
Python3 expects bytes() for ctypes inputs. This function will make
necessary conversions using the supplied encoding.
Inputs:
username: username to authenticate
password: password in plain text
service: PAM service to authenticate against, defaults to 'login'
Returns:
success: True
failure: False
### Response:
def authenticate(self, username, password, service='login', encoding='utf-8', resetcreds=True):
"""username and password authentication for the given service.
Returns True for success, or False for failure.
self.code (integer) and self.reason (string) are always stored and may
be referenced for the reason why authentication failed. 0/'Success' will
be stored for success.
Python3 expects bytes() for ctypes inputs. This function will make
necessary conversions using the supplied encoding.
Inputs:
username: username to authenticate
password: password in plain text
service: PAM service to authenticate against, defaults to 'login'
Returns:
success: True
failure: False
"""
@conv_func
def my_conv(n_messages, messages, p_response, app_data):
"""Simple conversation function that responds to any
prompt where the echo is off with the supplied password"""
# Create an array of n_messages response objects
addr = calloc(n_messages, sizeof(PamResponse))
response = cast(addr, POINTER(PamResponse))
p_response[0] = response
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
dst = calloc(len(password)+1, sizeof(c_char))
memmove(dst, cpassword, len(password))
response[i].resp = dst
response[i].resp_retcode = 0
return 0
# python3 ctypes prefers bytes
if sys.version_info >= (3,):
if isinstance(username, str): username = username.encode(encoding)
if isinstance(password, str): password = password.encode(encoding)
if isinstance(service, str): service = service.encode(encoding)
else:
if isinstance(username, unicode):
username = username.encode(encoding)
if isinstance(password, unicode):
password = password.encode(encoding)
if isinstance(service, unicode):
service = service.encode(encoding)
if b'\x00' in username or b'\x00' in password or b'\x00' in service:
self.code = 4 # PAM_SYSTEM_ERR in Linux-PAM
self.reason = 'strings may not contain NUL'
return False
# do this up front so we can safely throw an exception if there's
# anything wrong with it
cpassword = c_char_p(password)
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = pam_start(service, username, byref(conv), byref(handle))
if retval != 0:
# This is not an authentication error, something has gone wrong starting up PAM
self.code = retval
self.reason = "pam_start() failed"
return False
retval = pam_authenticate(handle, 0)
auth_success = retval == 0
if auth_success and resetcreds:
retval = pam_setcred(handle, PAM_REINITIALIZE_CRED);
# store information to inform the caller why we failed
self.code = retval
self.reason = pam_strerror(handle, retval)
if sys.version_info >= (3,):
self.reason = self.reason.decode(encoding)
if hasattr(libpam, 'pam_end'):
pam_end(handle, retval)
return auth_success |
def feedparser_render(url, *args, **kwargs):
"""
Render a feed and return its builded html
Usage: ::
{% feedparser_render 'http://localhost/sample.xml' %}
Or with all accepted arguments: ::
{% feedparser_render 'http://localhost/sample.xml' renderer='CustomRenderer' template='foo/custom.html' expiration=3600 %}
"""
renderer_name = kwargs.get('renderer', settings.FEED_DEFAULT_RENDERER_ENGINE)
renderer_template = kwargs.get('template', None)
expiration = kwargs.get('expiration', 0)
renderer = get_feed_renderer(settings.FEED_RENDER_ENGINES, renderer_name)
return renderer().render(url, template=renderer_template, expiration=expiration) | Render a feed and return its builded html
Usage: ::
{% feedparser_render 'http://localhost/sample.xml' %}
Or with all accepted arguments: ::
{% feedparser_render 'http://localhost/sample.xml' renderer='CustomRenderer' template='foo/custom.html' expiration=3600 %} | Below is the the instruction that describes the task:
### Input:
Render a feed and return its builded html
Usage: ::
{% feedparser_render 'http://localhost/sample.xml' %}
Or with all accepted arguments: ::
{% feedparser_render 'http://localhost/sample.xml' renderer='CustomRenderer' template='foo/custom.html' expiration=3600 %}
### Response:
def feedparser_render(url, *args, **kwargs):
"""
Render a feed and return its builded html
Usage: ::
{% feedparser_render 'http://localhost/sample.xml' %}
Or with all accepted arguments: ::
{% feedparser_render 'http://localhost/sample.xml' renderer='CustomRenderer' template='foo/custom.html' expiration=3600 %}
"""
renderer_name = kwargs.get('renderer', settings.FEED_DEFAULT_RENDERER_ENGINE)
renderer_template = kwargs.get('template', None)
expiration = kwargs.get('expiration', 0)
renderer = get_feed_renderer(settings.FEED_RENDER_ENGINES, renderer_name)
return renderer().render(url, template=renderer_template, expiration=expiration) |
def frameify(self, state, data):
"""Split data into a sequence of frames."""
# Pull in any partially-processed data
data = state.recv_buf + data
# Loop over the data
while data:
if state.frame_len is None:
# Try to grab a frame length from the data
if len(data) < self.fmt.size:
# Not enough data; try back later
break
# Extract the length
state.frame_len = self.fmt.unpack(data[:self.fmt.size])[0]
data = data[self.fmt.size:]
# Now that we have the frame length, extract the frame
if len(data) < state.frame_len:
# Not enough data; try back later
break
# OK, we have a full frame...
frame = data[:state.frame_len]
data = data[state.frame_len:]
state.frame_len = None
# Yield the frame
try:
yield frame
except FrameSwitch:
break
# Put any remaining data back into the buffer
state.recv_buf = data | Split data into a sequence of frames. | Below is the the instruction that describes the task:
### Input:
Split data into a sequence of frames.
### Response:
def frameify(self, state, data):
"""Split data into a sequence of frames."""
# Pull in any partially-processed data
data = state.recv_buf + data
# Loop over the data
while data:
if state.frame_len is None:
# Try to grab a frame length from the data
if len(data) < self.fmt.size:
# Not enough data; try back later
break
# Extract the length
state.frame_len = self.fmt.unpack(data[:self.fmt.size])[0]
data = data[self.fmt.size:]
# Now that we have the frame length, extract the frame
if len(data) < state.frame_len:
# Not enough data; try back later
break
# OK, we have a full frame...
frame = data[:state.frame_len]
data = data[state.frame_len:]
state.frame_len = None
# Yield the frame
try:
yield frame
except FrameSwitch:
break
# Put any remaining data back into the buffer
state.recv_buf = data |
def DOMDebugger_removeDOMBreakpoint(self, nodeId, type):
"""
Function path: DOMDebugger.removeDOMBreakpoint
Domain: DOMDebugger
Method name: removeDOMBreakpoint
Parameters:
Required arguments:
'nodeId' (type: DOM.NodeId) -> Identifier of the node to remove breakpoint from.
'type' (type: DOMBreakpointType) -> Type of the breakpoint to remove.
No return value.
Description: Removes DOM breakpoint that was set using <code>setDOMBreakpoint</code>.
"""
subdom_funcs = self.synchronous_command('DOMDebugger.removeDOMBreakpoint',
nodeId=nodeId, type=type)
return subdom_funcs | Function path: DOMDebugger.removeDOMBreakpoint
Domain: DOMDebugger
Method name: removeDOMBreakpoint
Parameters:
Required arguments:
'nodeId' (type: DOM.NodeId) -> Identifier of the node to remove breakpoint from.
'type' (type: DOMBreakpointType) -> Type of the breakpoint to remove.
No return value.
Description: Removes DOM breakpoint that was set using <code>setDOMBreakpoint</code>. | Below is the the instruction that describes the task:
### Input:
Function path: DOMDebugger.removeDOMBreakpoint
Domain: DOMDebugger
Method name: removeDOMBreakpoint
Parameters:
Required arguments:
'nodeId' (type: DOM.NodeId) -> Identifier of the node to remove breakpoint from.
'type' (type: DOMBreakpointType) -> Type of the breakpoint to remove.
No return value.
Description: Removes DOM breakpoint that was set using <code>setDOMBreakpoint</code>.
### Response:
def DOMDebugger_removeDOMBreakpoint(self, nodeId, type):
"""
Function path: DOMDebugger.removeDOMBreakpoint
Domain: DOMDebugger
Method name: removeDOMBreakpoint
Parameters:
Required arguments:
'nodeId' (type: DOM.NodeId) -> Identifier of the node to remove breakpoint from.
'type' (type: DOMBreakpointType) -> Type of the breakpoint to remove.
No return value.
Description: Removes DOM breakpoint that was set using <code>setDOMBreakpoint</code>.
"""
subdom_funcs = self.synchronous_command('DOMDebugger.removeDOMBreakpoint',
nodeId=nodeId, type=type)
return subdom_funcs |
def findOrDie(s):
"""
Look up an amino acid.
@param s: A C{str} amino acid specifier. This may be a full name,
a 3-letter abbreviation or a 1-letter abbreviation. Case is ignored.
@return: An C{AminoAcid} instance, if one can be found. Else exit.
"""
aa = find(s)
if aa:
return aa
else:
print('Unknown amino acid or codon: %s' % s, file=sys.stderr)
print('Valid arguments are: %s.' % list(CODONS.keys()),
file=sys.stderr)
sys.exit(1) | Look up an amino acid.
@param s: A C{str} amino acid specifier. This may be a full name,
a 3-letter abbreviation or a 1-letter abbreviation. Case is ignored.
@return: An C{AminoAcid} instance, if one can be found. Else exit. | Below is the the instruction that describes the task:
### Input:
Look up an amino acid.
@param s: A C{str} amino acid specifier. This may be a full name,
a 3-letter abbreviation or a 1-letter abbreviation. Case is ignored.
@return: An C{AminoAcid} instance, if one can be found. Else exit.
### Response:
def findOrDie(s):
"""
Look up an amino acid.
@param s: A C{str} amino acid specifier. This may be a full name,
a 3-letter abbreviation or a 1-letter abbreviation. Case is ignored.
@return: An C{AminoAcid} instance, if one can be found. Else exit.
"""
aa = find(s)
if aa:
return aa
else:
print('Unknown amino acid or codon: %s' % s, file=sys.stderr)
print('Valid arguments are: %s.' % list(CODONS.keys()),
file=sys.stderr)
sys.exit(1) |
def is_enabled():
"""Returns ``True`` if bcrypt should be used."""
enabled = getattr(settings, "BCRYPT_ENABLED", True)
if not enabled:
return False
# Are we under a test?
if hasattr(mail, 'outbox'):
return getattr(settings, "BCRYPT_ENABLED_UNDER_TEST", False)
return True | Returns ``True`` if bcrypt should be used. | Below is the the instruction that describes the task:
### Input:
Returns ``True`` if bcrypt should be used.
### Response:
def is_enabled():
"""Returns ``True`` if bcrypt should be used."""
enabled = getattr(settings, "BCRYPT_ENABLED", True)
if not enabled:
return False
# Are we under a test?
if hasattr(mail, 'outbox'):
return getattr(settings, "BCRYPT_ENABLED_UNDER_TEST", False)
return True |
def filesByCell(fnames,cells):
"""given files and cells, return a dict of files grouped by cell."""
byCell={}
fnames=smartSort(fnames)
days = list(set([elem[:5] for elem in fnames if elem.endswith(".abf")])) # so pythonic!
for day in smartSort(days):
parent=None
for i,fname in enumerate([elem for elem in fnames if elem.startswith(day) and elem.endswith(".abf")]):
ID=os.path.splitext(fname)[0]
if len([x for x in fnames if x.startswith(ID)])-1:
parent=ID
if not parent in byCell:
byCell[parent]=[]
byCell[parent]=byCell[parent]+[fname]
return byCell | given files and cells, return a dict of files grouped by cell. | Below is the the instruction that describes the task:
### Input:
given files and cells, return a dict of files grouped by cell.
### Response:
def filesByCell(fnames,cells):
"""given files and cells, return a dict of files grouped by cell."""
byCell={}
fnames=smartSort(fnames)
days = list(set([elem[:5] for elem in fnames if elem.endswith(".abf")])) # so pythonic!
for day in smartSort(days):
parent=None
for i,fname in enumerate([elem for elem in fnames if elem.startswith(day) and elem.endswith(".abf")]):
ID=os.path.splitext(fname)[0]
if len([x for x in fnames if x.startswith(ID)])-1:
parent=ID
if not parent in byCell:
byCell[parent]=[]
byCell[parent]=byCell[parent]+[fname]
return byCell |
def load_logs(optimizer, logs):
"""Load previous ...
"""
import json
if isinstance(logs, str):
logs = [logs]
for log in logs:
with open(log, "r") as j:
while True:
try:
iteration = next(j)
except StopIteration:
break
iteration = json.loads(iteration)
try:
optimizer.register(
params=iteration["params"],
target=iteration["target"],
)
except KeyError:
pass
return optimizer | Load previous ... | Below is the the instruction that describes the task:
### Input:
Load previous ...
### Response:
def load_logs(optimizer, logs):
"""Load previous ...
"""
import json
if isinstance(logs, str):
logs = [logs]
for log in logs:
with open(log, "r") as j:
while True:
try:
iteration = next(j)
except StopIteration:
break
iteration = json.loads(iteration)
try:
optimizer.register(
params=iteration["params"],
target=iteration["target"],
)
except KeyError:
pass
return optimizer |
def get_revoked(self):
"""
Return the revocations in this certificate revocation list.
These revocations will be provided by value, not by reference.
That means it's okay to mutate them: it won't affect this CRL.
:return: The revocations in this CRL.
:rtype: :class:`tuple` of :class:`Revocation`
"""
results = []
revoked_stack = _lib.X509_CRL_get_REVOKED(self._crl)
for i in range(_lib.sk_X509_REVOKED_num(revoked_stack)):
revoked = _lib.sk_X509_REVOKED_value(revoked_stack, i)
revoked_copy = _lib.Cryptography_X509_REVOKED_dup(revoked)
pyrev = Revoked.__new__(Revoked)
pyrev._revoked = _ffi.gc(revoked_copy, _lib.X509_REVOKED_free)
results.append(pyrev)
if results:
return tuple(results) | Return the revocations in this certificate revocation list.
These revocations will be provided by value, not by reference.
That means it's okay to mutate them: it won't affect this CRL.
:return: The revocations in this CRL.
:rtype: :class:`tuple` of :class:`Revocation` | Below is the the instruction that describes the task:
### Input:
Return the revocations in this certificate revocation list.
These revocations will be provided by value, not by reference.
That means it's okay to mutate them: it won't affect this CRL.
:return: The revocations in this CRL.
:rtype: :class:`tuple` of :class:`Revocation`
### Response:
def get_revoked(self):
"""
Return the revocations in this certificate revocation list.
These revocations will be provided by value, not by reference.
That means it's okay to mutate them: it won't affect this CRL.
:return: The revocations in this CRL.
:rtype: :class:`tuple` of :class:`Revocation`
"""
results = []
revoked_stack = _lib.X509_CRL_get_REVOKED(self._crl)
for i in range(_lib.sk_X509_REVOKED_num(revoked_stack)):
revoked = _lib.sk_X509_REVOKED_value(revoked_stack, i)
revoked_copy = _lib.Cryptography_X509_REVOKED_dup(revoked)
pyrev = Revoked.__new__(Revoked)
pyrev._revoked = _ffi.gc(revoked_copy, _lib.X509_REVOKED_free)
results.append(pyrev)
if results:
return tuple(results) |
def _InternalUnpackAny(msg):
"""Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
"""
# TODO(amauryfa): Don't use the factory of generated messages.
# To make Any work with custom factories, use the message factory of the
# parent message.
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
factory = symbol_database.Default()
type_url = msg.type_url
if not type_url:
return None
# TODO(haberman): For now we just strip the hostname. Better logic will be
# required.
type_name = type_url.split('/')[-1]
descriptor = factory.pool.FindMessageTypeByName(type_name)
if descriptor is None:
return None
message_class = factory.GetPrototype(descriptor)
message = message_class()
message.ParseFromString(msg.value)
return message | Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message. | Below is the the instruction that describes the task:
### Input:
Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
### Response:
def _InternalUnpackAny(msg):
"""Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
"""
# TODO(amauryfa): Don't use the factory of generated messages.
# To make Any work with custom factories, use the message factory of the
# parent message.
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
factory = symbol_database.Default()
type_url = msg.type_url
if not type_url:
return None
# TODO(haberman): For now we just strip the hostname. Better logic will be
# required.
type_name = type_url.split('/')[-1]
descriptor = factory.pool.FindMessageTypeByName(type_name)
if descriptor is None:
return None
message_class = factory.GetPrototype(descriptor)
message = message_class()
message.ParseFromString(msg.value)
return message |
def fit_and_score_estimator(estimator, parameters, cv, X, y=None, scoring=None,
iid=True, n_jobs=1, verbose=1,
pre_dispatch='2*n_jobs'):
"""Fit and score an estimator with cross-validation
This function is basically a copy of sklearn's
model_selection._BaseSearchCV._fit(), which is the core of the GridSearchCV
fit() method. Unfortunately, that class does _not_ return the training
set scores, which we want to save in the database, and because of the
way it's written, you can't change it by subclassing or monkeypatching.
This function uses some undocumented internal sklearn APIs (non-public).
It was written against sklearn version 0.16.1. Prior Versions are likely
to fail due to changes in the design of cross_validation module.
Returns
-------
out : dict, with keys 'mean_test_score' 'test_scores', 'train_scores'
The scores on the training and test sets, as well as the mean test set
score.
"""
scorer = check_scoring(estimator, scoring=scoring)
n_samples = num_samples(X)
X, y = check_arrays(X, y, allow_lists=True, sparse_format='csr',
allow_nans=True)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv=cv, y=y, classifier=is_classifier(estimator))
out = Parallel(
n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, parameters,
fit_params=None)
for train, test in cv.split(X, y))
assert len(out) == cv.n_splits
train_scores, test_scores = [], []
n_train_samples, n_test_samples = [], []
for test_score, n_test, train_score, n_train, _ in out:
train_scores.append(train_score)
test_scores.append(test_score)
n_test_samples.append(n_test)
n_train_samples.append(n_train)
train_scores, test_scores = map(list, check_arrays(train_scores,
test_scores,
warn_nans=True,
replace_nans=True))
if iid:
if verbose > 0 and is_msmbuilder_estimator(estimator):
print('[CV] Using MSMBuilder API n_samples averaging')
print('[CV] n_train_samples: %s' % str(n_train_samples))
print('[CV] n_test_samples: %s' % str(n_test_samples))
mean_test_score = np.average(test_scores, weights=n_test_samples)
mean_train_score = np.average(train_scores, weights=n_train_samples)
else:
mean_test_score = np.average(test_scores)
mean_train_score = np.average(train_scores)
grid_scores = {
'mean_test_score': mean_test_score, 'test_scores': test_scores,
'mean_train_score': mean_train_score, 'train_scores': train_scores,
'n_test_samples': n_test_samples, 'n_train_samples': n_train_samples}
return grid_scores | Fit and score an estimator with cross-validation
This function is basically a copy of sklearn's
model_selection._BaseSearchCV._fit(), which is the core of the GridSearchCV
fit() method. Unfortunately, that class does _not_ return the training
set scores, which we want to save in the database, and because of the
way it's written, you can't change it by subclassing or monkeypatching.
This function uses some undocumented internal sklearn APIs (non-public).
It was written against sklearn version 0.16.1. Prior Versions are likely
to fail due to changes in the design of cross_validation module.
Returns
-------
out : dict, with keys 'mean_test_score' 'test_scores', 'train_scores'
The scores on the training and test sets, as well as the mean test set
score. | Below is the the instruction that describes the task:
### Input:
Fit and score an estimator with cross-validation
This function is basically a copy of sklearn's
model_selection._BaseSearchCV._fit(), which is the core of the GridSearchCV
fit() method. Unfortunately, that class does _not_ return the training
set scores, which we want to save in the database, and because of the
way it's written, you can't change it by subclassing or monkeypatching.
This function uses some undocumented internal sklearn APIs (non-public).
It was written against sklearn version 0.16.1. Prior Versions are likely
to fail due to changes in the design of cross_validation module.
Returns
-------
out : dict, with keys 'mean_test_score' 'test_scores', 'train_scores'
The scores on the training and test sets, as well as the mean test set
score.
### Response:
def fit_and_score_estimator(estimator, parameters, cv, X, y=None, scoring=None,
iid=True, n_jobs=1, verbose=1,
pre_dispatch='2*n_jobs'):
"""Fit and score an estimator with cross-validation
This function is basically a copy of sklearn's
model_selection._BaseSearchCV._fit(), which is the core of the GridSearchCV
fit() method. Unfortunately, that class does _not_ return the training
set scores, which we want to save in the database, and because of the
way it's written, you can't change it by subclassing or monkeypatching.
This function uses some undocumented internal sklearn APIs (non-public).
It was written against sklearn version 0.16.1. Prior Versions are likely
to fail due to changes in the design of cross_validation module.
Returns
-------
out : dict, with keys 'mean_test_score' 'test_scores', 'train_scores'
The scores on the training and test sets, as well as the mean test set
score.
"""
scorer = check_scoring(estimator, scoring=scoring)
n_samples = num_samples(X)
X, y = check_arrays(X, y, allow_lists=True, sparse_format='csr',
allow_nans=True)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv=cv, y=y, classifier=is_classifier(estimator))
out = Parallel(
n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, parameters,
fit_params=None)
for train, test in cv.split(X, y))
assert len(out) == cv.n_splits
train_scores, test_scores = [], []
n_train_samples, n_test_samples = [], []
for test_score, n_test, train_score, n_train, _ in out:
train_scores.append(train_score)
test_scores.append(test_score)
n_test_samples.append(n_test)
n_train_samples.append(n_train)
train_scores, test_scores = map(list, check_arrays(train_scores,
test_scores,
warn_nans=True,
replace_nans=True))
if iid:
if verbose > 0 and is_msmbuilder_estimator(estimator):
print('[CV] Using MSMBuilder API n_samples averaging')
print('[CV] n_train_samples: %s' % str(n_train_samples))
print('[CV] n_test_samples: %s' % str(n_test_samples))
mean_test_score = np.average(test_scores, weights=n_test_samples)
mean_train_score = np.average(train_scores, weights=n_train_samples)
else:
mean_test_score = np.average(test_scores)
mean_train_score = np.average(train_scores)
grid_scores = {
'mean_test_score': mean_test_score, 'test_scores': test_scores,
'mean_train_score': mean_train_score, 'train_scores': train_scores,
'n_test_samples': n_test_samples, 'n_train_samples': n_train_samples}
return grid_scores |
def available():
'''
Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available
'''
ret = []
for path in __salt__['file.find']('/boot/kernel', name='*.ko$'):
bpath = os.path.basename(path)
comps = bpath.split('.')
if 'ko' in comps:
# This is a kernel module, return it without the .ko extension
ret.append('.'.join(comps[:comps.index('ko')]))
return ret | Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available | Below is the the instruction that describes the task:
### Input:
Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available
### Response:
def available():
'''
Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available
'''
ret = []
for path in __salt__['file.find']('/boot/kernel', name='*.ko$'):
bpath = os.path.basename(path)
comps = bpath.split('.')
if 'ko' in comps:
# This is a kernel module, return it without the .ko extension
ret.append('.'.join(comps[:comps.index('ko')]))
return ret |
def set_metric(slug, value, category=None, expire=None, date=None):
"""Create/Increment a metric."""
get_r().set_metric(slug, value, category=category, expire=expire, date=date) | Create/Increment a metric. | Below is the the instruction that describes the task:
### Input:
Create/Increment a metric.
### Response:
def set_metric(slug, value, category=None, expire=None, date=None):
"""Create/Increment a metric."""
get_r().set_metric(slug, value, category=category, expire=expire, date=date) |
def _double_as_bytes(dval):
"Use struct.unpack to decode a double precision float into eight bytes"
tmp = list(struct.unpack('8B',struct.pack('d', dval)))
if not _big_endian:
tmp.reverse()
return tmp | Use struct.unpack to decode a double precision float into eight bytes | Below is the the instruction that describes the task:
### Input:
Use struct.unpack to decode a double precision float into eight bytes
### Response:
def _double_as_bytes(dval):
"Use struct.unpack to decode a double precision float into eight bytes"
tmp = list(struct.unpack('8B',struct.pack('d', dval)))
if not _big_endian:
tmp.reverse()
return tmp |
def routing(name, **kwargs):
"""
Show information about the requested routing group.
"""
ctx = Context(**kwargs)
ctx.execute_action('routing', **{
'name': name,
'locator': ctx.locator,
}) | Show information about the requested routing group. | Below is the the instruction that describes the task:
### Input:
Show information about the requested routing group.
### Response:
def routing(name, **kwargs):
"""
Show information about the requested routing group.
"""
ctx = Context(**kwargs)
ctx.execute_action('routing', **{
'name': name,
'locator': ctx.locator,
}) |
def ascending_sort_key(self):
"""
Sort protein sequences lexicographically by three criteria:
- number of unique supporting reads
- minimum mismatch versus a supporting reference transcript before variant
- minimum mismatch versus a supporting reference transcript after variant
- number of supporting reference transcripts
TODO: Add sort criterion:
- min number of reads covering each nucleotide of
the protein sequence >= 2
"""
return (
len(self.alt_reads_supporting_protein_sequence),
min(t.number_mismatches_before_variant for t in self.translations),
min(t.number_mismatches_after_variant for t in self.translations),
len(self.transcripts_supporting_protein_sequence)
) | Sort protein sequences lexicographically by three criteria:
- number of unique supporting reads
- minimum mismatch versus a supporting reference transcript before variant
- minimum mismatch versus a supporting reference transcript after variant
- number of supporting reference transcripts
TODO: Add sort criterion:
- min number of reads covering each nucleotide of
the protein sequence >= 2 | Below is the the instruction that describes the task:
### Input:
Sort protein sequences lexicographically by three criteria:
- number of unique supporting reads
- minimum mismatch versus a supporting reference transcript before variant
- minimum mismatch versus a supporting reference transcript after variant
- number of supporting reference transcripts
TODO: Add sort criterion:
- min number of reads covering each nucleotide of
the protein sequence >= 2
### Response:
def ascending_sort_key(self):
"""
Sort protein sequences lexicographically by three criteria:
- number of unique supporting reads
- minimum mismatch versus a supporting reference transcript before variant
- minimum mismatch versus a supporting reference transcript after variant
- number of supporting reference transcripts
TODO: Add sort criterion:
- min number of reads covering each nucleotide of
the protein sequence >= 2
"""
return (
len(self.alt_reads_supporting_protein_sequence),
min(t.number_mismatches_before_variant for t in self.translations),
min(t.number_mismatches_after_variant for t in self.translations),
len(self.transcripts_supporting_protein_sequence)
) |
def requires(self):
"""
Returns the default workflow requirements in an ordered dictionary, which is updated with
the return value of the task's *workflow_requires* method.
"""
reqs = OrderedDict()
reqs.update(self.task.workflow_requires())
return reqs | Returns the default workflow requirements in an ordered dictionary, which is updated with
the return value of the task's *workflow_requires* method. | Below is the the instruction that describes the task:
### Input:
Returns the default workflow requirements in an ordered dictionary, which is updated with
the return value of the task's *workflow_requires* method.
### Response:
def requires(self):
"""
Returns the default workflow requirements in an ordered dictionary, which is updated with
the return value of the task's *workflow_requires* method.
"""
reqs = OrderedDict()
reqs.update(self.task.workflow_requires())
return reqs |
def show(self):
"""Show the overfitting PDF summary."""
try:
if platform.system().lower().startswith('darwin'):
subprocess.call(['open', self.pdf])
elif os.name == 'nt':
os.startfile(self.pdf)
elif os.name == 'posix':
subprocess.call(['xdg-open', self.pdf])
else:
raise IOError("")
except IOError:
log.info("Unable to open the pdf. Try opening it manually:")
log.info(self.pdf) | Show the overfitting PDF summary. | Below is the the instruction that describes the task:
### Input:
Show the overfitting PDF summary.
### Response:
def show(self):
"""Show the overfitting PDF summary."""
try:
if platform.system().lower().startswith('darwin'):
subprocess.call(['open', self.pdf])
elif os.name == 'nt':
os.startfile(self.pdf)
elif os.name == 'posix':
subprocess.call(['xdg-open', self.pdf])
else:
raise IOError("")
except IOError:
log.info("Unable to open the pdf. Try opening it manually:")
log.info(self.pdf) |
async def run_asgi(self):
"""
Wrapper around the ASGI callable, handling exceptions and unexpected
termination states.
"""
try:
result = await self.app(self.scope, self.asgi_receive, self.asgi_send)
except BaseException as exc:
self.closed_event.set()
msg = "Exception in ASGI application\n"
self.logger.error(msg, exc_info=exc)
if not self.handshake_started_event.is_set():
self.send_500_response()
else:
await self.handshake_completed_event.wait()
self.transport.close()
else:
self.closed_event.set()
if not self.handshake_started_event.is_set():
msg = "ASGI callable returned without sending handshake."
self.logger.error(msg)
self.send_500_response()
self.transport.close()
elif result is not None:
msg = "ASGI callable should return None, but returned '%s'."
self.logger.error(msg, result)
await self.handshake_completed_event.wait()
self.transport.close() | Wrapper around the ASGI callable, handling exceptions and unexpected
termination states. | Below is the the instruction that describes the task:
### Input:
Wrapper around the ASGI callable, handling exceptions and unexpected
termination states.
### Response:
async def run_asgi(self):
"""
Wrapper around the ASGI callable, handling exceptions and unexpected
termination states.
"""
try:
result = await self.app(self.scope, self.asgi_receive, self.asgi_send)
except BaseException as exc:
self.closed_event.set()
msg = "Exception in ASGI application\n"
self.logger.error(msg, exc_info=exc)
if not self.handshake_started_event.is_set():
self.send_500_response()
else:
await self.handshake_completed_event.wait()
self.transport.close()
else:
self.closed_event.set()
if not self.handshake_started_event.is_set():
msg = "ASGI callable returned without sending handshake."
self.logger.error(msg)
self.send_500_response()
self.transport.close()
elif result is not None:
msg = "ASGI callable should return None, but returned '%s'."
self.logger.error(msg, result)
await self.handshake_completed_event.wait()
self.transport.close() |
def fill_categorical_na(df, nan_cat='NA'):
"""Fill categoricals with 'NA', possibly creating a new category,
and fill other NaNa with blanks """
for col in df.columns[df.isna().any()].tolist():
if df[col].dtype.name != 'category':
# If not categorical, fill with a blank, which creates and
# empty cell in CSV.
df[col] = df[col].fillna('')
else:
try:
df[col].cat.add_categories([nan_cat], inplace=True)
except ValueError:
pass
df[col] = df[col].fillna(nan_cat)
return df | Fill categoricals with 'NA', possibly creating a new category,
and fill other NaNa with blanks | Below is the the instruction that describes the task:
### Input:
Fill categoricals with 'NA', possibly creating a new category,
and fill other NaNa with blanks
### Response:
def fill_categorical_na(df, nan_cat='NA'):
"""Fill categoricals with 'NA', possibly creating a new category,
and fill other NaNa with blanks """
for col in df.columns[df.isna().any()].tolist():
if df[col].dtype.name != 'category':
# If not categorical, fill with a blank, which creates and
# empty cell in CSV.
df[col] = df[col].fillna('')
else:
try:
df[col].cat.add_categories([nan_cat], inplace=True)
except ValueError:
pass
df[col] = df[col].fillna(nan_cat)
return df |
def tags(self):
""" Yields a list of all the token tags as they appeared when the word was parsed.
For example: ["was", "VBD", "B-VP", "O", "VP-1", "A1", "be"]
"""
# See also. Sentence.__repr__().
ch, I,O,B = self.chunk, INSIDE+"-", OUTSIDE, BEGIN+"-"
tags = [OUTSIDE for i in range(len(self.sentence.token))]
for i, tag in enumerate(self.sentence.token): # Default: [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]
if tag == WORD:
tags[i] = encode_entities(self.string)
elif tag == POS and self.type:
tags[i] = self.type
elif tag == CHUNK and ch and ch.type:
tags[i] = (self == ch[0] and B or I) + ch.type
elif tag == PNP and self.pnp:
tags[i] = (self == self.pnp[0] and B or I) + "PNP"
elif tag == REL and ch and len(ch.relations) > 0:
tags[i] = ["-".join([str(x) for x in [ch.type]+list(reversed(r)) if x]) for r in ch.relations]
tags[i] = "*".join(tags[i])
elif tag == ANCHOR and ch:
tags[i] = ch.anchor_id or OUTSIDE
elif tag == LEMMA:
tags[i] = encode_entities(self.lemma or "")
elif tag in self.custom_tags:
tags[i] = self.custom_tags.get(tag) or OUTSIDE
return tags | Yields a list of all the token tags as they appeared when the word was parsed.
For example: ["was", "VBD", "B-VP", "O", "VP-1", "A1", "be"] | Below is the the instruction that describes the task:
### Input:
Yields a list of all the token tags as they appeared when the word was parsed.
For example: ["was", "VBD", "B-VP", "O", "VP-1", "A1", "be"]
### Response:
def tags(self):
""" Yields a list of all the token tags as they appeared when the word was parsed.
For example: ["was", "VBD", "B-VP", "O", "VP-1", "A1", "be"]
"""
# See also. Sentence.__repr__().
ch, I,O,B = self.chunk, INSIDE+"-", OUTSIDE, BEGIN+"-"
tags = [OUTSIDE for i in range(len(self.sentence.token))]
for i, tag in enumerate(self.sentence.token): # Default: [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]
if tag == WORD:
tags[i] = encode_entities(self.string)
elif tag == POS and self.type:
tags[i] = self.type
elif tag == CHUNK and ch and ch.type:
tags[i] = (self == ch[0] and B or I) + ch.type
elif tag == PNP and self.pnp:
tags[i] = (self == self.pnp[0] and B or I) + "PNP"
elif tag == REL and ch and len(ch.relations) > 0:
tags[i] = ["-".join([str(x) for x in [ch.type]+list(reversed(r)) if x]) for r in ch.relations]
tags[i] = "*".join(tags[i])
elif tag == ANCHOR and ch:
tags[i] = ch.anchor_id or OUTSIDE
elif tag == LEMMA:
tags[i] = encode_entities(self.lemma or "")
elif tag in self.custom_tags:
tags[i] = self.custom_tags.get(tag) or OUTSIDE
return tags |
def _get_jacobian_hessian_strategy(self):
"""
Figure out how to calculate the jacobian and hessian. Will return a
tuple describing how best to calculate the jacobian and hessian,
repectively. If None, it should be calculated using the available
analytical method.
:return: tuple of jacobian_method, hessian_method
"""
if self.jacobian is not None and self.hessian is None:
jacobian = None
hessian = 'cs'
elif self.jacobian is None and self.hessian is None:
jacobian = 'cs'
hessian = soBFGS(exception_strategy='damp_update')
else:
jacobian = None
hessian = None
return jacobian, hessian | Figure out how to calculate the jacobian and hessian. Will return a
tuple describing how best to calculate the jacobian and hessian,
repectively. If None, it should be calculated using the available
analytical method.
:return: tuple of jacobian_method, hessian_method | Below is the the instruction that describes the task:
### Input:
Figure out how to calculate the jacobian and hessian. Will return a
tuple describing how best to calculate the jacobian and hessian,
repectively. If None, it should be calculated using the available
analytical method.
:return: tuple of jacobian_method, hessian_method
### Response:
def _get_jacobian_hessian_strategy(self):
"""
Figure out how to calculate the jacobian and hessian. Will return a
tuple describing how best to calculate the jacobian and hessian,
repectively. If None, it should be calculated using the available
analytical method.
:return: tuple of jacobian_method, hessian_method
"""
if self.jacobian is not None and self.hessian is None:
jacobian = None
hessian = 'cs'
elif self.jacobian is None and self.hessian is None:
jacobian = 'cs'
hessian = soBFGS(exception_strategy='damp_update')
else:
jacobian = None
hessian = None
return jacobian, hessian |
def format(self, **kwargs):
"""Apply formatting."""
attrs = self._attrs.copy()
attrs.update({'width': self._width, 'height': self._height})
attrs.update(kwargs)
return Map(self._features, **attrs) | Apply formatting. | Below is the the instruction that describes the task:
### Input:
Apply formatting.
### Response:
def format(self, **kwargs):
"""Apply formatting."""
attrs = self._attrs.copy()
attrs.update({'width': self._width, 'height': self._height})
attrs.update(kwargs)
return Map(self._features, **attrs) |
def reflect_runtime_member(self, name):
"""Reflect 'name' using ONLY runtime reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType.
"""
for scope in reversed(self.scopes):
try:
return structured.reflect_runtime_member(scope, name)
except (NotImplementedError, KeyError, AttributeError):
continue
return protocol.AnyType | Reflect 'name' using ONLY runtime reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType. | Below is the the instruction that describes the task:
### Input:
Reflect 'name' using ONLY runtime reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType.
### Response:
def reflect_runtime_member(self, name):
"""Reflect 'name' using ONLY runtime reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType.
"""
for scope in reversed(self.scopes):
try:
return structured.reflect_runtime_member(scope, name)
except (NotImplementedError, KeyError, AttributeError):
continue
return protocol.AnyType |
def install(self, install_options, global_options=()):
"""Install everything in this set (after having downloaded and unpacked the packages)"""
to_install = [r for r in self.requirements.values()
if not r.satisfied_by]
if to_install:
logger.notify('Installing collected packages: %s' % ', '.join([req.name for req in to_install]))
logger.indent += 2
try:
for requirement in to_install:
if requirement.conflicts_with:
logger.notify('Found existing installation: %s'
% requirement.conflicts_with)
logger.indent += 2
try:
requirement.uninstall(auto_confirm=True)
finally:
logger.indent -= 2
try:
requirement.install(install_options, global_options)
except:
# if install did not succeed, rollback previous uninstall
if requirement.conflicts_with and not requirement.install_succeeded:
requirement.rollback_uninstall()
raise
else:
if requirement.conflicts_with and requirement.install_succeeded:
requirement.commit_uninstall()
requirement.remove_temporary_source()
finally:
logger.indent -= 2
self.successfully_installed = to_install | Install everything in this set (after having downloaded and unpacked the packages) | Below is the the instruction that describes the task:
### Input:
Install everything in this set (after having downloaded and unpacked the packages)
### Response:
def install(self, install_options, global_options=()):
"""Install everything in this set (after having downloaded and unpacked the packages)"""
to_install = [r for r in self.requirements.values()
if not r.satisfied_by]
if to_install:
logger.notify('Installing collected packages: %s' % ', '.join([req.name for req in to_install]))
logger.indent += 2
try:
for requirement in to_install:
if requirement.conflicts_with:
logger.notify('Found existing installation: %s'
% requirement.conflicts_with)
logger.indent += 2
try:
requirement.uninstall(auto_confirm=True)
finally:
logger.indent -= 2
try:
requirement.install(install_options, global_options)
except:
# if install did not succeed, rollback previous uninstall
if requirement.conflicts_with and not requirement.install_succeeded:
requirement.rollback_uninstall()
raise
else:
if requirement.conflicts_with and requirement.install_succeeded:
requirement.commit_uninstall()
requirement.remove_temporary_source()
finally:
logger.indent -= 2
self.successfully_installed = to_install |
def sys_save_screenshot(name: Optional[str] = None) -> None:
"""Save a screenshot to a file.
By default this will automatically save screenshots in the working
directory.
The automatic names are formatted as screenshotNNN.png. For example:
screenshot000.png, screenshot001.png, etc. Whichever is available first.
Args:
file Optional[AnyStr]: File path to save screenshot.
"""
lib.TCOD_sys_save_screenshot(
_bytes(name) if name is not None else ffi.NULL
) | Save a screenshot to a file.
By default this will automatically save screenshots in the working
directory.
The automatic names are formatted as screenshotNNN.png. For example:
screenshot000.png, screenshot001.png, etc. Whichever is available first.
Args:
file Optional[AnyStr]: File path to save screenshot. | Below is the the instruction that describes the task:
### Input:
Save a screenshot to a file.
By default this will automatically save screenshots in the working
directory.
The automatic names are formatted as screenshotNNN.png. For example:
screenshot000.png, screenshot001.png, etc. Whichever is available first.
Args:
file Optional[AnyStr]: File path to save screenshot.
### Response:
def sys_save_screenshot(name: Optional[str] = None) -> None:
"""Save a screenshot to a file.
By default this will automatically save screenshots in the working
directory.
The automatic names are formatted as screenshotNNN.png. For example:
screenshot000.png, screenshot001.png, etc. Whichever is available first.
Args:
file Optional[AnyStr]: File path to save screenshot.
"""
lib.TCOD_sys_save_screenshot(
_bytes(name) if name is not None else ffi.NULL
) |
def makePalette(color1, color2, N, hsv=True):
"""
Generate N colors starting from `color1` to `color2`
by linear interpolation HSV in or RGB spaces.
:param int N: number of output colors.
:param color1: first rgb color.
:param color2: second rgb color.
:param bool hsv: if `False`, interpolation is calculated in RGB space.
.. hint:: Example: |colorpalette.py|_
"""
if hsv:
color1 = rgb2hsv(color1)
color2 = rgb2hsv(color2)
c1 = np.array(getColor(color1))
c2 = np.array(getColor(color2))
cols = []
for f in np.linspace(0, 1, N - 1, endpoint=True):
c = c1 * (1 - f) + c2 * f
if hsv:
c = np.array(hsv2rgb(c))
cols.append(c)
return cols | Generate N colors starting from `color1` to `color2`
by linear interpolation HSV in or RGB spaces.
:param int N: number of output colors.
:param color1: first rgb color.
:param color2: second rgb color.
:param bool hsv: if `False`, interpolation is calculated in RGB space.
.. hint:: Example: |colorpalette.py|_ | Below is the the instruction that describes the task:
### Input:
Generate N colors starting from `color1` to `color2`
by linear interpolation HSV in or RGB spaces.
:param int N: number of output colors.
:param color1: first rgb color.
:param color2: second rgb color.
:param bool hsv: if `False`, interpolation is calculated in RGB space.
.. hint:: Example: |colorpalette.py|_
### Response:
def makePalette(color1, color2, N, hsv=True):
"""
Generate N colors starting from `color1` to `color2`
by linear interpolation HSV in or RGB spaces.
:param int N: number of output colors.
:param color1: first rgb color.
:param color2: second rgb color.
:param bool hsv: if `False`, interpolation is calculated in RGB space.
.. hint:: Example: |colorpalette.py|_
"""
if hsv:
color1 = rgb2hsv(color1)
color2 = rgb2hsv(color2)
c1 = np.array(getColor(color1))
c2 = np.array(getColor(color2))
cols = []
for f in np.linspace(0, 1, N - 1, endpoint=True):
c = c1 * (1 - f) + c2 * f
if hsv:
c = np.array(hsv2rgb(c))
cols.append(c)
return cols |
def get_hdu(uri, cutout=None):
"""Get a at the given uri from VOSpace, possibly doing a cutout.
If the cutout is flips the image then we also must flip the datasec keywords. Also, we must offset the
datasec to reflect the cutout area being used.
@param uri: The URI in VOSpace of the image to HDU to retrieve.
@param cutout: A CADC data service CUTOUT paramter to be used when retrieving the observation.
@return: fits.HDU
"""
try:
# the filename is based on the Simple FITS images file.
filename = os.path.basename(uri)
if os.access(filename, os.F_OK) and cutout is None:
logger.debug("File already on disk: {}".format(filename))
hdu_list = fits.open(filename, scale_back=True)
hdu_list.verify('silentfix+ignore')
else:
logger.debug("Pulling: {}{} from VOSpace".format(uri, cutout))
fpt = tempfile.NamedTemporaryFile(suffix='.fits')
cutout = cutout is not None and cutout or ""
copy(uri+cutout, fpt.name)
fpt.seek(0, 2)
fpt.seek(0)
logger.debug("Read from vospace completed. Building fits object.")
hdu_list = fits.open(fpt, scale_back=False)
hdu_list.verify('silentfix+ignore')
logger.debug("Got image from vospace")
try:
hdu_list[0].header['DATASEC'] = reset_datasec(cutout, hdu_list[0].header['DATASEC'],
hdu_list[0].header['NAXIS1'],
hdu_list[0].header['NAXIS2'])
except Exception as e:
logging.debug("error converting datasec: {}".format(str(e)))
for hdu in hdu_list:
logging.debug("Adding converter to {}".format(hdu))
hdu.converter = CoordinateConverter(0, 0)
try:
hdu.wcs = WCS(hdu.header)
except Exception as ex:
logger.error("Failed trying to initialize the WCS: {}".format(ex))
except Exception as ex:
raise ex
return hdu_list | Get a at the given uri from VOSpace, possibly doing a cutout.
If the cutout is flips the image then we also must flip the datasec keywords. Also, we must offset the
datasec to reflect the cutout area being used.
@param uri: The URI in VOSpace of the image to HDU to retrieve.
@param cutout: A CADC data service CUTOUT paramter to be used when retrieving the observation.
@return: fits.HDU | Below is the the instruction that describes the task:
### Input:
Get a at the given uri from VOSpace, possibly doing a cutout.
If the cutout is flips the image then we also must flip the datasec keywords. Also, we must offset the
datasec to reflect the cutout area being used.
@param uri: The URI in VOSpace of the image to HDU to retrieve.
@param cutout: A CADC data service CUTOUT paramter to be used when retrieving the observation.
@return: fits.HDU
### Response:
def get_hdu(uri, cutout=None):
"""Get a at the given uri from VOSpace, possibly doing a cutout.
If the cutout is flips the image then we also must flip the datasec keywords. Also, we must offset the
datasec to reflect the cutout area being used.
@param uri: The URI in VOSpace of the image to HDU to retrieve.
@param cutout: A CADC data service CUTOUT paramter to be used when retrieving the observation.
@return: fits.HDU
"""
try:
# the filename is based on the Simple FITS images file.
filename = os.path.basename(uri)
if os.access(filename, os.F_OK) and cutout is None:
logger.debug("File already on disk: {}".format(filename))
hdu_list = fits.open(filename, scale_back=True)
hdu_list.verify('silentfix+ignore')
else:
logger.debug("Pulling: {}{} from VOSpace".format(uri, cutout))
fpt = tempfile.NamedTemporaryFile(suffix='.fits')
cutout = cutout is not None and cutout or ""
copy(uri+cutout, fpt.name)
fpt.seek(0, 2)
fpt.seek(0)
logger.debug("Read from vospace completed. Building fits object.")
hdu_list = fits.open(fpt, scale_back=False)
hdu_list.verify('silentfix+ignore')
logger.debug("Got image from vospace")
try:
hdu_list[0].header['DATASEC'] = reset_datasec(cutout, hdu_list[0].header['DATASEC'],
hdu_list[0].header['NAXIS1'],
hdu_list[0].header['NAXIS2'])
except Exception as e:
logging.debug("error converting datasec: {}".format(str(e)))
for hdu in hdu_list:
logging.debug("Adding converter to {}".format(hdu))
hdu.converter = CoordinateConverter(0, 0)
try:
hdu.wcs = WCS(hdu.header)
except Exception as ex:
logger.error("Failed trying to initialize the WCS: {}".format(ex))
except Exception as ex:
raise ex
return hdu_list |
def delete(self, deviceId, measurementId):
"""
Deletes a stored measurement.
:param deviceId: the device to measure.
:param measurementId: the name of the measurement.
:return: 200 if it was deleted, 400 if no such measurement (or device).
"""
record = self.measurements.get(deviceId)
if record is not None:
popped = record.pop(measurementId, None)
return popped, 200 if popped else 400
return None, 400 | Deletes a stored measurement.
:param deviceId: the device to measure.
:param measurementId: the name of the measurement.
:return: 200 if it was deleted, 400 if no such measurement (or device). | Below is the the instruction that describes the task:
### Input:
Deletes a stored measurement.
:param deviceId: the device to measure.
:param measurementId: the name of the measurement.
:return: 200 if it was deleted, 400 if no such measurement (or device).
### Response:
def delete(self, deviceId, measurementId):
"""
Deletes a stored measurement.
:param deviceId: the device to measure.
:param measurementId: the name of the measurement.
:return: 200 if it was deleted, 400 if no such measurement (or device).
"""
record = self.measurements.get(deviceId)
if record is not None:
popped = record.pop(measurementId, None)
return popped, 200 if popped else 400
return None, 400 |
def xmltreefromfile(filename):
"""Internal function to read an XML file"""
try:
return ElementTree.parse(filename, ElementTree.XMLParser(collect_ids=False))
except TypeError:
return ElementTree.parse(filename, ElementTree.XMLParser()) | Internal function to read an XML file | Below is the the instruction that describes the task:
### Input:
Internal function to read an XML file
### Response:
def xmltreefromfile(filename):
"""Internal function to read an XML file"""
try:
return ElementTree.parse(filename, ElementTree.XMLParser(collect_ids=False))
except TypeError:
return ElementTree.parse(filename, ElementTree.XMLParser()) |
def to_netcdf(self, path=None, mode='w', format=None, group=None,
engine=None, encoding=None, unlimited_dims=None,
compute=True):
"""Write dataset contents to a netCDF file.
Parameters
----------
path : str, Path or file-like object, optional
Path to which to save this dataset. File-like objects are only
supported by the scipy engine. If no path is provided, this
function returns the resulting netCDF file as bytes; in this case,
we need to use scipy, which does not support netCDF version 4 (the
default format becomes NETCDF3_64BIT).
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten. If mode='a', existing variables
will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,
'zlib': True}, ...}``
The `h5netcdf` engine supports both the NetCDF4-style compression
encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py
ones ``{'compression': 'gzip', 'compression_opts': 9}``.
This allows using any compression plugin installed in the HDF5
library, e.g. LZF.
unlimited_dims : sequence of str, optional
Dimension(s) that should be serialized as unlimited dimensions.
By default, no dimensions are treated as unlimited dimensions.
Note that unlimited_dims may also be set via
``dataset.encoding['unlimited_dims']``.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
"""
if encoding is None:
encoding = {}
from ..backends.api import to_netcdf
return to_netcdf(self, path, mode, format=format, group=group,
engine=engine, encoding=encoding,
unlimited_dims=unlimited_dims,
compute=compute) | Write dataset contents to a netCDF file.
Parameters
----------
path : str, Path or file-like object, optional
Path to which to save this dataset. File-like objects are only
supported by the scipy engine. If no path is provided, this
function returns the resulting netCDF file as bytes; in this case,
we need to use scipy, which does not support netCDF version 4 (the
default format becomes NETCDF3_64BIT).
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten. If mode='a', existing variables
will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,
'zlib': True}, ...}``
The `h5netcdf` engine supports both the NetCDF4-style compression
encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py
ones ``{'compression': 'gzip', 'compression_opts': 9}``.
This allows using any compression plugin installed in the HDF5
library, e.g. LZF.
unlimited_dims : sequence of str, optional
Dimension(s) that should be serialized as unlimited dimensions.
By default, no dimensions are treated as unlimited dimensions.
Note that unlimited_dims may also be set via
``dataset.encoding['unlimited_dims']``.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later. | Below is the the instruction that describes the task:
### Input:
Write dataset contents to a netCDF file.
Parameters
----------
path : str, Path or file-like object, optional
Path to which to save this dataset. File-like objects are only
supported by the scipy engine. If no path is provided, this
function returns the resulting netCDF file as bytes; in this case,
we need to use scipy, which does not support netCDF version 4 (the
default format becomes NETCDF3_64BIT).
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten. If mode='a', existing variables
will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,
'zlib': True}, ...}``
The `h5netcdf` engine supports both the NetCDF4-style compression
encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py
ones ``{'compression': 'gzip', 'compression_opts': 9}``.
This allows using any compression plugin installed in the HDF5
library, e.g. LZF.
unlimited_dims : sequence of str, optional
Dimension(s) that should be serialized as unlimited dimensions.
By default, no dimensions are treated as unlimited dimensions.
Note that unlimited_dims may also be set via
``dataset.encoding['unlimited_dims']``.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
### Response:
def to_netcdf(self, path=None, mode='w', format=None, group=None,
engine=None, encoding=None, unlimited_dims=None,
compute=True):
"""Write dataset contents to a netCDF file.
Parameters
----------
path : str, Path or file-like object, optional
Path to which to save this dataset. File-like objects are only
supported by the scipy engine. If no path is provided, this
function returns the resulting netCDF file as bytes; in this case,
we need to use scipy, which does not support netCDF version 4 (the
default format becomes NETCDF3_64BIT).
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten. If mode='a', existing variables
will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,
'zlib': True}, ...}``
The `h5netcdf` engine supports both the NetCDF4-style compression
encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py
ones ``{'compression': 'gzip', 'compression_opts': 9}``.
This allows using any compression plugin installed in the HDF5
library, e.g. LZF.
unlimited_dims : sequence of str, optional
Dimension(s) that should be serialized as unlimited dimensions.
By default, no dimensions are treated as unlimited dimensions.
Note that unlimited_dims may also be set via
``dataset.encoding['unlimited_dims']``.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
"""
if encoding is None:
encoding = {}
from ..backends.api import to_netcdf
return to_netcdf(self, path, mode, format=format, group=group,
engine=engine, encoding=encoding,
unlimited_dims=unlimited_dims,
compute=compute) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.