code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _itodq(self, n):
"""Convert long to dotquad or hextet."""
if self.v == 4:
return '.'.join(map(str, [
(n >> 24) & 0xff,
(n >> 16) & 0xff,
(n >> 8) & 0xff,
n & 0xff,
]))
else:
n = '%032x' % n
return ':'.join(n[4 * x:4 * x + 4] for x in range(0, 8)) | Convert long to dotquad or hextet. | Below is the the instruction that describes the task:
### Input:
Convert long to dotquad or hextet.
### Response:
def _itodq(self, n):
"""Convert long to dotquad or hextet."""
if self.v == 4:
return '.'.join(map(str, [
(n >> 24) & 0xff,
(n >> 16) & 0xff,
(n >> 8) & 0xff,
n & 0xff,
]))
else:
n = '%032x' % n
return ':'.join(n[4 * x:4 * x + 4] for x in range(0, 8)) |
def start_output (self):
"""Write start of checking info as xml comment."""
super(SitemapXmlLogger, self).start_output()
self.xml_start_output()
attrs = {u"xmlns": u"http://www.sitemaps.org/schemas/sitemap/0.9"}
self.xml_starttag(u'urlset', attrs)
self.flush() | Write start of checking info as xml comment. | Below is the the instruction that describes the task:
### Input:
Write start of checking info as xml comment.
### Response:
def start_output (self):
"""Write start of checking info as xml comment."""
super(SitemapXmlLogger, self).start_output()
self.xml_start_output()
attrs = {u"xmlns": u"http://www.sitemaps.org/schemas/sitemap/0.9"}
self.xml_starttag(u'urlset', attrs)
self.flush() |
def merging_cli(debug=False):
"""
simple commandline interface of the merging module.
This function is called when you use the ``discoursegraphs`` application
directly on the command line.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tiger-file',
help='TigerXML (syntax) file to be merged')
parser.add_argument('-r', '--rst-file',
help='RS3 (rhetorical structure) file to be merged')
parser.add_argument('-a', '--anaphoricity-file',
help='anaphoricity file to be merged')
parser.add_argument('-c', '--conano-file',
help='conano file to be merged')
parser.add_argument('-m', '--mmax-file',
help='MMAX2 file to be merged')
parser.add_argument(
'-o', '--output-format', default='dot',
help=('output format: brackets, brat, dot, pickle, geoff, gexf, graphml, '
'neo4j, exmaralda, conll, paula, no-output'))
parser.add_argument('output_file', nargs='?', default=sys.stdout)
args = parser.parse_args(sys.argv[1:])
for filepath in (args.tiger_file, args.rst_file, args.anaphoricity_file,
args.conano_file):
if filepath: # if it was specified on the command line
assert os.path.isfile(filepath), \
"File '{}' doesn't exist".format(filepath)
# create an empty document graph. merge it with other graphs later on.
discourse_docgraph = DiscourseDocumentGraph()
if args.tiger_file:
from discoursegraphs.readwrite.tiger import TigerDocumentGraph
tiger_docgraph = TigerDocumentGraph(args.tiger_file)
discourse_docgraph.merge_graphs(tiger_docgraph)
if args.rst_file:
rst_graph = dg.read_rs3(args.rst_file)
discourse_docgraph.merge_graphs(rst_graph)
if args.anaphoricity_file:
from discoursegraphs.readwrite import AnaphoraDocumentGraph
anaphora_graph = AnaphoraDocumentGraph(args.anaphoricity_file)
discourse_docgraph.merge_graphs(anaphora_graph)
# the anaphora doc graph only contains trivial edges from its root
# node.
try:
discourse_docgraph.remove_node('anaphoricity:root_node')
except networkx.NetworkXError as e: # ignore if the node doesn't exist
pass
if args.conano_file:
from discoursegraphs.readwrite import ConanoDocumentGraph
conano_graph = ConanoDocumentGraph(args.conano_file)
discourse_docgraph.merge_graphs(conano_graph)
if args.mmax_file:
from discoursegraphs.readwrite import MMAXDocumentGraph
mmax_graph = MMAXDocumentGraph(args.mmax_file)
discourse_docgraph.merge_graphs(mmax_graph)
if isinstance(args.output_file, str): # if we're not piping to stdout ...
# we need abspath to handle files in the current directory
path_to_output_file = \
os.path.dirname(os.path.abspath(args.output_file))
if not os.path.isdir(path_to_output_file):
create_dir(path_to_output_file)
if args.output_format == 'dot':
write_dot(discourse_docgraph, args.output_file)
elif args.output_format == 'brat':
dg.write_brat(discourse_docgraph, args.output_file)
elif args.output_format == 'brackets':
dg.write_brackets(discourse_docgraph, args.output_file)
elif args.output_format == 'pickle':
import cPickle as pickle
with open(args.output_file, 'wb') as pickle_file:
pickle.dump(discourse_docgraph, pickle_file)
elif args.output_format in ('geoff', 'neo4j'):
from discoursegraphs.readwrite.neo4j import write_geoff
write_geoff(discourse_docgraph, args.output_file)
print '' # this is just cosmetic for stdout
elif args.output_format == 'gexf':
dg.write_gexf(discourse_docgraph, args.output_file)
elif args.output_format == 'graphml':
dg.write_graphml(discourse_docgraph, args.output_file)
elif args.output_format == 'exmaralda':
from discoursegraphs.readwrite.exmaralda import write_exb
write_exb(discourse_docgraph, args.output_file)
elif args.output_format == 'conll':
from discoursegraphs.readwrite.conll import write_conll
write_conll(discourse_docgraph, args.output_file)
elif args.output_format == 'paula':
from discoursegraphs.readwrite.paulaxml.paula import write_paula
write_paula(discourse_docgraph, args.output_file)
elif args.output_format == 'no-output':
pass # just testing if the merging works
else:
raise ValueError(
"Unsupported output format: {}".format(args.output_format))
if debug:
print "Merged successfully: ", args.tiger_file | simple commandline interface of the merging module.
This function is called when you use the ``discoursegraphs`` application
directly on the command line. | Below is the the instruction that describes the task:
### Input:
simple commandline interface of the merging module.
This function is called when you use the ``discoursegraphs`` application
directly on the command line.
### Response:
def merging_cli(debug=False):
"""
simple commandline interface of the merging module.
This function is called when you use the ``discoursegraphs`` application
directly on the command line.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tiger-file',
help='TigerXML (syntax) file to be merged')
parser.add_argument('-r', '--rst-file',
help='RS3 (rhetorical structure) file to be merged')
parser.add_argument('-a', '--anaphoricity-file',
help='anaphoricity file to be merged')
parser.add_argument('-c', '--conano-file',
help='conano file to be merged')
parser.add_argument('-m', '--mmax-file',
help='MMAX2 file to be merged')
parser.add_argument(
'-o', '--output-format', default='dot',
help=('output format: brackets, brat, dot, pickle, geoff, gexf, graphml, '
'neo4j, exmaralda, conll, paula, no-output'))
parser.add_argument('output_file', nargs='?', default=sys.stdout)
args = parser.parse_args(sys.argv[1:])
for filepath in (args.tiger_file, args.rst_file, args.anaphoricity_file,
args.conano_file):
if filepath: # if it was specified on the command line
assert os.path.isfile(filepath), \
"File '{}' doesn't exist".format(filepath)
# create an empty document graph. merge it with other graphs later on.
discourse_docgraph = DiscourseDocumentGraph()
if args.tiger_file:
from discoursegraphs.readwrite.tiger import TigerDocumentGraph
tiger_docgraph = TigerDocumentGraph(args.tiger_file)
discourse_docgraph.merge_graphs(tiger_docgraph)
if args.rst_file:
rst_graph = dg.read_rs3(args.rst_file)
discourse_docgraph.merge_graphs(rst_graph)
if args.anaphoricity_file:
from discoursegraphs.readwrite import AnaphoraDocumentGraph
anaphora_graph = AnaphoraDocumentGraph(args.anaphoricity_file)
discourse_docgraph.merge_graphs(anaphora_graph)
# the anaphora doc graph only contains trivial edges from its root
# node.
try:
discourse_docgraph.remove_node('anaphoricity:root_node')
except networkx.NetworkXError as e: # ignore if the node doesn't exist
pass
if args.conano_file:
from discoursegraphs.readwrite import ConanoDocumentGraph
conano_graph = ConanoDocumentGraph(args.conano_file)
discourse_docgraph.merge_graphs(conano_graph)
if args.mmax_file:
from discoursegraphs.readwrite import MMAXDocumentGraph
mmax_graph = MMAXDocumentGraph(args.mmax_file)
discourse_docgraph.merge_graphs(mmax_graph)
if isinstance(args.output_file, str): # if we're not piping to stdout ...
# we need abspath to handle files in the current directory
path_to_output_file = \
os.path.dirname(os.path.abspath(args.output_file))
if not os.path.isdir(path_to_output_file):
create_dir(path_to_output_file)
if args.output_format == 'dot':
write_dot(discourse_docgraph, args.output_file)
elif args.output_format == 'brat':
dg.write_brat(discourse_docgraph, args.output_file)
elif args.output_format == 'brackets':
dg.write_brackets(discourse_docgraph, args.output_file)
elif args.output_format == 'pickle':
import cPickle as pickle
with open(args.output_file, 'wb') as pickle_file:
pickle.dump(discourse_docgraph, pickle_file)
elif args.output_format in ('geoff', 'neo4j'):
from discoursegraphs.readwrite.neo4j import write_geoff
write_geoff(discourse_docgraph, args.output_file)
print '' # this is just cosmetic for stdout
elif args.output_format == 'gexf':
dg.write_gexf(discourse_docgraph, args.output_file)
elif args.output_format == 'graphml':
dg.write_graphml(discourse_docgraph, args.output_file)
elif args.output_format == 'exmaralda':
from discoursegraphs.readwrite.exmaralda import write_exb
write_exb(discourse_docgraph, args.output_file)
elif args.output_format == 'conll':
from discoursegraphs.readwrite.conll import write_conll
write_conll(discourse_docgraph, args.output_file)
elif args.output_format == 'paula':
from discoursegraphs.readwrite.paulaxml.paula import write_paula
write_paula(discourse_docgraph, args.output_file)
elif args.output_format == 'no-output':
pass # just testing if the merging works
else:
raise ValueError(
"Unsupported output format: {}".format(args.output_format))
if debug:
print "Merged successfully: ", args.tiger_file |
def perform_exit():
"""perform_exit
Handling at-the-exit events
---------------------------
This will cleanup each worker process which
could be in the middle of a request/sleep/block
action. This has been tested on python 3 with
Celery and single processes.
"""
if SPLUNK_DEBUG:
print('{} -------------------------------'.format(
rnow()))
print('{} splunkpub: atexit.register - start'.format(
rnow()))
worked = True
for instance in instances:
try:
if SPLUNK_DEBUG:
print('{} - shutting down instance={} - start'.format(
rnow(),
instance))
instance.shutdown()
if SPLUNK_DEBUG:
print('{} - shutting down instance={} - done'.format(
rnow(),
instance))
except Exception as e:
worked = False
if SPLUNK_DEBUG:
print(
'{} - shutting down instance={} '
'- hit ex={} during shutdown'.format(
rnow(),
instance,
e))
# end of try/ex
if not worked:
if SPLUNK_DEBUG:
print('{} Failed exiting'.format(
rnow()))
if SPLUNK_DEBUG:
print('{} splunkpub: atexit.register - done'.format(
rnow()))
print('{} -------------------------------'.format(
rnow())) | perform_exit
Handling at-the-exit events
---------------------------
This will cleanup each worker process which
could be in the middle of a request/sleep/block
action. This has been tested on python 3 with
Celery and single processes. | Below is the the instruction that describes the task:
### Input:
perform_exit
Handling at-the-exit events
---------------------------
This will cleanup each worker process which
could be in the middle of a request/sleep/block
action. This has been tested on python 3 with
Celery and single processes.
### Response:
def perform_exit():
"""perform_exit
Handling at-the-exit events
---------------------------
This will cleanup each worker process which
could be in the middle of a request/sleep/block
action. This has been tested on python 3 with
Celery and single processes.
"""
if SPLUNK_DEBUG:
print('{} -------------------------------'.format(
rnow()))
print('{} splunkpub: atexit.register - start'.format(
rnow()))
worked = True
for instance in instances:
try:
if SPLUNK_DEBUG:
print('{} - shutting down instance={} - start'.format(
rnow(),
instance))
instance.shutdown()
if SPLUNK_DEBUG:
print('{} - shutting down instance={} - done'.format(
rnow(),
instance))
except Exception as e:
worked = False
if SPLUNK_DEBUG:
print(
'{} - shutting down instance={} '
'- hit ex={} during shutdown'.format(
rnow(),
instance,
e))
# end of try/ex
if not worked:
if SPLUNK_DEBUG:
print('{} Failed exiting'.format(
rnow()))
if SPLUNK_DEBUG:
print('{} splunkpub: atexit.register - done'.format(
rnow()))
print('{} -------------------------------'.format(
rnow())) |
def _populate_profile_flags_from_dn_regex(self, profile):
"""
Populate the given profile object flags from AUTH_LDAP_PROFILE_FLAGS_BY_DN_REGEX.
Returns True if the profile was modified
"""
save_profile = True
for field, regex in self.settings.PROFILE_FLAGS_BY_DN_REGEX.items():
field_value = False
if re.search(regex, self._get_user_dn(), re.IGNORECASE):
field_value = True
setattr(profile, field, field_value)
save_profile = True
return save_profile | Populate the given profile object flags from AUTH_LDAP_PROFILE_FLAGS_BY_DN_REGEX.
Returns True if the profile was modified | Below is the the instruction that describes the task:
### Input:
Populate the given profile object flags from AUTH_LDAP_PROFILE_FLAGS_BY_DN_REGEX.
Returns True if the profile was modified
### Response:
def _populate_profile_flags_from_dn_regex(self, profile):
"""
Populate the given profile object flags from AUTH_LDAP_PROFILE_FLAGS_BY_DN_REGEX.
Returns True if the profile was modified
"""
save_profile = True
for field, regex in self.settings.PROFILE_FLAGS_BY_DN_REGEX.items():
field_value = False
if re.search(regex, self._get_user_dn(), re.IGNORECASE):
field_value = True
setattr(profile, field, field_value)
save_profile = True
return save_profile |
def set_expire(name, expire):
'''
Sets the time at which the account expires (in seconds since the UNIX
epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on
FreeBSD.
A value of ``0`` sets the account to never expire.
CLI Example:
.. code-block:: bash
salt '*' shadow.set_expire username 1419980400
'''
pre_info = info(name)
if expire == pre_info['expire']:
return True
if __grains__['kernel'] == 'FreeBSD':
cmd = ['pw', 'user', 'mod', name, '-e', expire]
else:
cmd = ['usermod', '-e', expire, name]
__salt__['cmd.run'](cmd, python_shell=False)
post_info = info(name)
if post_info['expire'] != pre_info['expire']:
return post_info['expire'] == expire | Sets the time at which the account expires (in seconds since the UNIX
epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on
FreeBSD.
A value of ``0`` sets the account to never expire.
CLI Example:
.. code-block:: bash
salt '*' shadow.set_expire username 1419980400 | Below is the the instruction that describes the task:
### Input:
Sets the time at which the account expires (in seconds since the UNIX
epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on
FreeBSD.
A value of ``0`` sets the account to never expire.
CLI Example:
.. code-block:: bash
salt '*' shadow.set_expire username 1419980400
### Response:
def set_expire(name, expire):
'''
Sets the time at which the account expires (in seconds since the UNIX
epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on
FreeBSD.
A value of ``0`` sets the account to never expire.
CLI Example:
.. code-block:: bash
salt '*' shadow.set_expire username 1419980400
'''
pre_info = info(name)
if expire == pre_info['expire']:
return True
if __grains__['kernel'] == 'FreeBSD':
cmd = ['pw', 'user', 'mod', name, '-e', expire]
else:
cmd = ['usermod', '-e', expire, name]
__salt__['cmd.run'](cmd, python_shell=False)
post_info = info(name)
if post_info['expire'] != pre_info['expire']:
return post_info['expire'] == expire |
def disable(self, msgid, scope="package", line=None, ignore_unknown=False):
"""don't output message of the given id"""
self._set_msg_status(
msgid, enable=False, scope=scope, line=line, ignore_unknown=ignore_unknown
)
self._register_by_id_managed_msg(msgid, line) | don't output message of the given id | Below is the the instruction that describes the task:
### Input:
don't output message of the given id
### Response:
def disable(self, msgid, scope="package", line=None, ignore_unknown=False):
"""don't output message of the given id"""
self._set_msg_status(
msgid, enable=False, scope=scope, line=line, ignore_unknown=ignore_unknown
)
self._register_by_id_managed_msg(msgid, line) |
def brew_recipe(recipe_name):
"""Returns a pipeline string from a recipe name.
Parameters
----------
recipe_name : str
Name of the recipe. Must match the name attribute in one of the classes
defined in :mod:`flowcraft.generator.recipes`
Returns
-------
str
Pipeline string ready for parsing and processing by flowcraft engine
"""
# This will iterate over all modules included in the recipes subpackage
# It will return the import class and the module name, algon with the
# correct prefix
prefix = "{}.".format(recipes.__name__)
for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):
# Import the current module
_module = importer.find_module(modname).load_module(modname)
# Fetch all available classes in module
_recipe_classes = [cls for cls in _module.__dict__.values() if
isinstance(cls, type)]
# Iterate over each Recipe class, and check for a match with the
# provided recipe name.
for cls in _recipe_classes:
# Create instance of class to allow fetching the name attribute
recipe_cls = cls()
if getattr(recipe_cls, "name", None) == recipe_name:
return recipe_cls.brew()
logger.error(
colored_print("Recipe name '{}' does not exist.".format(recipe_name))
)
sys.exit(1) | Returns a pipeline string from a recipe name.
Parameters
----------
recipe_name : str
Name of the recipe. Must match the name attribute in one of the classes
defined in :mod:`flowcraft.generator.recipes`
Returns
-------
str
Pipeline string ready for parsing and processing by flowcraft engine | Below is the the instruction that describes the task:
### Input:
Returns a pipeline string from a recipe name.
Parameters
----------
recipe_name : str
Name of the recipe. Must match the name attribute in one of the classes
defined in :mod:`flowcraft.generator.recipes`
Returns
-------
str
Pipeline string ready for parsing and processing by flowcraft engine
### Response:
def brew_recipe(recipe_name):
"""Returns a pipeline string from a recipe name.
Parameters
----------
recipe_name : str
Name of the recipe. Must match the name attribute in one of the classes
defined in :mod:`flowcraft.generator.recipes`
Returns
-------
str
Pipeline string ready for parsing and processing by flowcraft engine
"""
# This will iterate over all modules included in the recipes subpackage
# It will return the import class and the module name, algon with the
# correct prefix
prefix = "{}.".format(recipes.__name__)
for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):
# Import the current module
_module = importer.find_module(modname).load_module(modname)
# Fetch all available classes in module
_recipe_classes = [cls for cls in _module.__dict__.values() if
isinstance(cls, type)]
# Iterate over each Recipe class, and check for a match with the
# provided recipe name.
for cls in _recipe_classes:
# Create instance of class to allow fetching the name attribute
recipe_cls = cls()
if getattr(recipe_cls, "name", None) == recipe_name:
return recipe_cls.brew()
logger.error(
colored_print("Recipe name '{}' does not exist.".format(recipe_name))
)
sys.exit(1) |
def getBuyerInfo(self, auction_id, buyer_id):
"""Return buyer info."""
# TODO: add price from getBids
rc = self.__ask__('doGetPostBuyData', itemsArray=self.ArrayOfLong([auction_id]), buyerFilterArray=self.ArrayOfLong([buyer_id]))
rc = rc[0]['usersPostBuyData']['item'][0]['userData']
return {'allegro_aid': auction_id,
'allegro_uid': rc['userId'],
'allegro_login': magicDecode(rc['userLogin']),
'name': magicDecode(rc['userFirstName']),
'surname': magicDecode(rc['userLastName']),
'company': magicDecode(rc['userCompany']),
'postcode': magicDecode(rc['userPostcode']),
'city': magicDecode(rc['userCity']),
'address': magicDecode(rc['userAddress']),
'email': magicDecode(rc['userEmail']),
'phone': rc['userPhone']} | Return buyer info. | Below is the the instruction that describes the task:
### Input:
Return buyer info.
### Response:
def getBuyerInfo(self, auction_id, buyer_id):
"""Return buyer info."""
# TODO: add price from getBids
rc = self.__ask__('doGetPostBuyData', itemsArray=self.ArrayOfLong([auction_id]), buyerFilterArray=self.ArrayOfLong([buyer_id]))
rc = rc[0]['usersPostBuyData']['item'][0]['userData']
return {'allegro_aid': auction_id,
'allegro_uid': rc['userId'],
'allegro_login': magicDecode(rc['userLogin']),
'name': magicDecode(rc['userFirstName']),
'surname': magicDecode(rc['userLastName']),
'company': magicDecode(rc['userCompany']),
'postcode': magicDecode(rc['userPostcode']),
'city': magicDecode(rc['userCity']),
'address': magicDecode(rc['userAddress']),
'email': magicDecode(rc['userEmail']),
'phone': rc['userPhone']} |
def reset_script(self):
"""Clear any partially received script."""
self.remote_bridge.status = BRIDGE_STATUS.IDLE
self.remote_bridge.error = 0
self.remote_bridge.parsed_script = None
self._device.script = bytearray()
return [0] | Clear any partially received script. | Below is the the instruction that describes the task:
### Input:
Clear any partially received script.
### Response:
def reset_script(self):
"""Clear any partially received script."""
self.remote_bridge.status = BRIDGE_STATUS.IDLE
self.remote_bridge.error = 0
self.remote_bridge.parsed_script = None
self._device.script = bytearray()
return [0] |
def rate(s=switchpoint, e=early_mean, l=late_mean):
''' Concatenate Poisson means '''
out = empty(len(disasters_array))
out[:s] = e
out[s:] = l
return out | Concatenate Poisson means | Below is the the instruction that describes the task:
### Input:
Concatenate Poisson means
### Response:
def rate(s=switchpoint, e=early_mean, l=late_mean):
''' Concatenate Poisson means '''
out = empty(len(disasters_array))
out[:s] = e
out[s:] = l
return out |
def change_dict_keys(self, data_dict, prefix):
"""
Prefixes 'L_'/'R_' to the collection keys
:param data_dict: dictionary which is to be altered
:type data_dict: dict
:param prefix: prefix to be attached before every key
:type prefix: string
:return dict_: dict
"""
keys = data_dict.keys()
dummy_dict = copy.deepcopy(data_dict)
changed_dict = {}
for key in keys:
changed_dict[prefix + str(key)] = dummy_dict.pop(key)
return changed_dict | Prefixes 'L_'/'R_' to the collection keys
:param data_dict: dictionary which is to be altered
:type data_dict: dict
:param prefix: prefix to be attached before every key
:type prefix: string
:return dict_: dict | Below is the the instruction that describes the task:
### Input:
Prefixes 'L_'/'R_' to the collection keys
:param data_dict: dictionary which is to be altered
:type data_dict: dict
:param prefix: prefix to be attached before every key
:type prefix: string
:return dict_: dict
### Response:
def change_dict_keys(self, data_dict, prefix):
"""
Prefixes 'L_'/'R_' to the collection keys
:param data_dict: dictionary which is to be altered
:type data_dict: dict
:param prefix: prefix to be attached before every key
:type prefix: string
:return dict_: dict
"""
keys = data_dict.keys()
dummy_dict = copy.deepcopy(data_dict)
changed_dict = {}
for key in keys:
changed_dict[prefix + str(key)] = dummy_dict.pop(key)
return changed_dict |
def _get_total_read_size(self):
"""How much event data to process at once."""
if self.read_size:
read_size = EVENT_SIZE * self.read_size
else:
read_size = EVENT_SIZE
return read_size | How much event data to process at once. | Below is the the instruction that describes the task:
### Input:
How much event data to process at once.
### Response:
def _get_total_read_size(self):
"""How much event data to process at once."""
if self.read_size:
read_size = EVENT_SIZE * self.read_size
else:
read_size = EVENT_SIZE
return read_size |
def _start_managers(self):
"""
(internal) starts input and output pool queue manager threads.
"""
self._task_queue = _Weave(self._tasks, self.stride)
# here we determine the size of the maximum memory consumption
self._semaphore_value = (self.buffer or (len(self._tasks) * self.stride))
self._pool_semaphore = Semaphore(self._semaphore_value)
# start the pool getter thread
self._pool_getter = Thread(target=self._pool_get, args=(self._getout, \
self._task_results, self._next_available, \
self._task_next_lock, self._next_skipped, len(self._tasks), \
len(self.pool), id(self)))
self._pool_getter.deamon = True
self._pool_getter.start()
# start the pool putter thread
self._pool_putter = Thread(target=self._pool_put, args=\
(self._pool_semaphore, self._task_queue, self._putin, \
len(self.pool), id(self), self._stopping.isSet))
self._pool_putter.deamon = True
self._pool_putter.start() | (internal) starts input and output pool queue manager threads. | Below is the the instruction that describes the task:
### Input:
(internal) starts input and output pool queue manager threads.
### Response:
def _start_managers(self):
"""
(internal) starts input and output pool queue manager threads.
"""
self._task_queue = _Weave(self._tasks, self.stride)
# here we determine the size of the maximum memory consumption
self._semaphore_value = (self.buffer or (len(self._tasks) * self.stride))
self._pool_semaphore = Semaphore(self._semaphore_value)
# start the pool getter thread
self._pool_getter = Thread(target=self._pool_get, args=(self._getout, \
self._task_results, self._next_available, \
self._task_next_lock, self._next_skipped, len(self._tasks), \
len(self.pool), id(self)))
self._pool_getter.deamon = True
self._pool_getter.start()
# start the pool putter thread
self._pool_putter = Thread(target=self._pool_put, args=\
(self._pool_semaphore, self._task_queue, self._putin, \
len(self.pool), id(self), self._stopping.isSet))
self._pool_putter.deamon = True
self._pool_putter.start() |
def almost_eq(arr1, arr2, thresh=1E-11, ret_error=False):
""" checks if floating point number are equal to a threshold
"""
error = np.abs(arr1 - arr2)
passed = error < thresh
if ret_error:
return passed, error
return passed | checks if floating point number are equal to a threshold | Below is the the instruction that describes the task:
### Input:
checks if floating point number are equal to a threshold
### Response:
def almost_eq(arr1, arr2, thresh=1E-11, ret_error=False):
""" checks if floating point number are equal to a threshold
"""
error = np.abs(arr1 - arr2)
passed = error < thresh
if ret_error:
return passed, error
return passed |
async def unset_lock(self, resource, lock_identifier):
"""
Unlock this instance
:param resource: redis key to set
:param lock_identifier: uniquie id of lock
:raises: LockError if the lock resource acquired with different lock_identifier
"""
try:
with await self.connect() as redis:
await redis.eval(
self.unset_lock_script,
keys=[resource],
args=[lock_identifier]
)
except aioredis.errors.ReplyError as exc: # script fault
self.log.debug('Can not unset lock "%s" on %s',
resource, repr(self))
raise LockError('Can not unset lock') from exc
except (aioredis.errors.RedisError, OSError) as exc:
self.log.error('Can not unset lock "%s" on %s: %s',
resource, repr(self), repr(exc))
raise LockError('Can not set lock') from exc
except asyncio.CancelledError:
self.log.debug('Lock "%s" unset is cancelled on %s',
resource, repr(self))
raise
except Exception as exc:
self.log.exception('Can not unset lock "%s" on %s',
resource, repr(self))
raise
else:
self.log.debug('Lock "%s" is unset on %s', resource, repr(self)) | Unlock this instance
:param resource: redis key to set
:param lock_identifier: uniquie id of lock
:raises: LockError if the lock resource acquired with different lock_identifier | Below is the the instruction that describes the task:
### Input:
Unlock this instance
:param resource: redis key to set
:param lock_identifier: uniquie id of lock
:raises: LockError if the lock resource acquired with different lock_identifier
### Response:
async def unset_lock(self, resource, lock_identifier):
"""
Unlock this instance
:param resource: redis key to set
:param lock_identifier: uniquie id of lock
:raises: LockError if the lock resource acquired with different lock_identifier
"""
try:
with await self.connect() as redis:
await redis.eval(
self.unset_lock_script,
keys=[resource],
args=[lock_identifier]
)
except aioredis.errors.ReplyError as exc: # script fault
self.log.debug('Can not unset lock "%s" on %s',
resource, repr(self))
raise LockError('Can not unset lock') from exc
except (aioredis.errors.RedisError, OSError) as exc:
self.log.error('Can not unset lock "%s" on %s: %s',
resource, repr(self), repr(exc))
raise LockError('Can not set lock') from exc
except asyncio.CancelledError:
self.log.debug('Lock "%s" unset is cancelled on %s',
resource, repr(self))
raise
except Exception as exc:
self.log.exception('Can not unset lock "%s" on %s',
resource, repr(self))
raise
else:
self.log.debug('Lock "%s" is unset on %s', resource, repr(self)) |
def credentials(login=None):
"""
Find user credentials. We should have parsed the command line for a ``--login`` option.
We will try to find credentials in environment variables.
We will ask user if we cannot find any in arguments nor environment
"""
if not login:
login = environ.get("PROF_LOGIN")
password = environ.get("PROF_PASSWORD")
if not login:
try:
login = input("login? ")
print("\t\tDon't get prompted everytime. Store your login in the ``~/.profrc`` config file")
except KeyboardInterrupt:
exit(0)
if not password:
try:
password = getpass.getpass("pass for {0} ? ".format(login))
except KeyboardInterrupt:
exit(0)
return (login, password) | Find user credentials. We should have parsed the command line for a ``--login`` option.
We will try to find credentials in environment variables.
We will ask user if we cannot find any in arguments nor environment | Below is the the instruction that describes the task:
### Input:
Find user credentials. We should have parsed the command line for a ``--login`` option.
We will try to find credentials in environment variables.
We will ask user if we cannot find any in arguments nor environment
### Response:
def credentials(login=None):
"""
Find user credentials. We should have parsed the command line for a ``--login`` option.
We will try to find credentials in environment variables.
We will ask user if we cannot find any in arguments nor environment
"""
if not login:
login = environ.get("PROF_LOGIN")
password = environ.get("PROF_PASSWORD")
if not login:
try:
login = input("login? ")
print("\t\tDon't get prompted everytime. Store your login in the ``~/.profrc`` config file")
except KeyboardInterrupt:
exit(0)
if not password:
try:
password = getpass.getpass("pass for {0} ? ".format(login))
except KeyboardInterrupt:
exit(0)
return (login, password) |
def _read_audio_data(self, file_path):
"""
Read audio data from file.
:rtype: tuple (True, (duration, sample_rate, codec, data)) or (False, None) on exception
"""
try:
self.log(u"Reading audio data...")
# if we know the TTS outputs to PCM16 mono WAVE
# with the correct sample rate,
# we can read samples directly from it,
# without an intermediate conversion through ffmpeg
audio_file = AudioFile(
file_path=file_path,
file_format=self.OUTPUT_AUDIO_FORMAT,
rconf=self.rconf,
logger=self.logger
)
audio_file.read_samples_from_file()
self.log([u"Duration of '%s': %f", file_path, audio_file.audio_length])
self.log(u"Reading audio data... done")
return (True, (
audio_file.audio_length,
audio_file.audio_sample_rate,
audio_file.audio_format,
audio_file.audio_samples
))
except (AudioFileUnsupportedFormatError, OSError) as exc:
self.log_exc(u"An unexpected error occurred while reading audio data", exc, True, None)
return (False, None) | Read audio data from file.
:rtype: tuple (True, (duration, sample_rate, codec, data)) or (False, None) on exception | Below is the the instruction that describes the task:
### Input:
Read audio data from file.
:rtype: tuple (True, (duration, sample_rate, codec, data)) or (False, None) on exception
### Response:
def _read_audio_data(self, file_path):
"""
Read audio data from file.
:rtype: tuple (True, (duration, sample_rate, codec, data)) or (False, None) on exception
"""
try:
self.log(u"Reading audio data...")
# if we know the TTS outputs to PCM16 mono WAVE
# with the correct sample rate,
# we can read samples directly from it,
# without an intermediate conversion through ffmpeg
audio_file = AudioFile(
file_path=file_path,
file_format=self.OUTPUT_AUDIO_FORMAT,
rconf=self.rconf,
logger=self.logger
)
audio_file.read_samples_from_file()
self.log([u"Duration of '%s': %f", file_path, audio_file.audio_length])
self.log(u"Reading audio data... done")
return (True, (
audio_file.audio_length,
audio_file.audio_sample_rate,
audio_file.audio_format,
audio_file.audio_samples
))
except (AudioFileUnsupportedFormatError, OSError) as exc:
self.log_exc(u"An unexpected error occurred while reading audio data", exc, True, None)
return (False, None) |
def upload(self, pkg_path):
"""
Upload a python script (or kecpkg) to the service.
.. versionadded:: 1.13
:param pkg_path: path to the python script or kecpkg to upload.
:type pkg_path: basestring
:raises APIError: if the python package could not be uploaded.
:raises OSError: if the python package could not be located on disk.
"""
if os.path.exists(pkg_path):
self._upload(pkg_path=pkg_path)
else:
raise OSError("Could not locate python package to upload in '{}'".format(pkg_path)) | Upload a python script (or kecpkg) to the service.
.. versionadded:: 1.13
:param pkg_path: path to the python script or kecpkg to upload.
:type pkg_path: basestring
:raises APIError: if the python package could not be uploaded.
:raises OSError: if the python package could not be located on disk. | Below is the the instruction that describes the task:
### Input:
Upload a python script (or kecpkg) to the service.
.. versionadded:: 1.13
:param pkg_path: path to the python script or kecpkg to upload.
:type pkg_path: basestring
:raises APIError: if the python package could not be uploaded.
:raises OSError: if the python package could not be located on disk.
### Response:
def upload(self, pkg_path):
"""
Upload a python script (or kecpkg) to the service.
.. versionadded:: 1.13
:param pkg_path: path to the python script or kecpkg to upload.
:type pkg_path: basestring
:raises APIError: if the python package could not be uploaded.
:raises OSError: if the python package could not be located on disk.
"""
if os.path.exists(pkg_path):
self._upload(pkg_path=pkg_path)
else:
raise OSError("Could not locate python package to upload in '{}'".format(pkg_path)) |
def get(cls, expression):
"""
Retrieve the model instance matching the given expression.
If the number of matching results is not equal to one, then
a ``ValueError`` will be raised.
:param expression: A boolean expression to filter by.
:returns: The matching :py:class:`Model` instance.
:raises: ``ValueError`` if result set size is not 1.
"""
executor = Executor(cls.__database__)
result = executor.execute(expression)
if len(result) != 1:
raise ValueError('Got %s results, expected 1.' % len(result))
return cls.load(result._first_or_any(), convert_key=False) | Retrieve the model instance matching the given expression.
If the number of matching results is not equal to one, then
a ``ValueError`` will be raised.
:param expression: A boolean expression to filter by.
:returns: The matching :py:class:`Model` instance.
:raises: ``ValueError`` if result set size is not 1. | Below is the the instruction that describes the task:
### Input:
Retrieve the model instance matching the given expression.
If the number of matching results is not equal to one, then
a ``ValueError`` will be raised.
:param expression: A boolean expression to filter by.
:returns: The matching :py:class:`Model` instance.
:raises: ``ValueError`` if result set size is not 1.
### Response:
def get(cls, expression):
"""
Retrieve the model instance matching the given expression.
If the number of matching results is not equal to one, then
a ``ValueError`` will be raised.
:param expression: A boolean expression to filter by.
:returns: The matching :py:class:`Model` instance.
:raises: ``ValueError`` if result set size is not 1.
"""
executor = Executor(cls.__database__)
result = executor.execute(expression)
if len(result) != 1:
raise ValueError('Got %s results, expected 1.' % len(result))
return cls.load(result._first_or_any(), convert_key=False) |
def rule_command_cmdlist_interface_u_interface_fe_leaf_interface_fortygigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_u = ET.SubElement(cmdlist, "interface-u")
interface_fe_leaf = ET.SubElement(interface_u, "interface-fe-leaf")
interface = ET.SubElement(interface_fe_leaf, "interface")
fortygigabitethernet_leaf = ET.SubElement(interface, "fortygigabitethernet-leaf")
fortygigabitethernet_leaf.text = kwargs.pop('fortygigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def rule_command_cmdlist_interface_u_interface_fe_leaf_interface_fortygigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_u = ET.SubElement(cmdlist, "interface-u")
interface_fe_leaf = ET.SubElement(interface_u, "interface-fe-leaf")
interface = ET.SubElement(interface_fe_leaf, "interface")
fortygigabitethernet_leaf = ET.SubElement(interface, "fortygigabitethernet-leaf")
fortygigabitethernet_leaf.text = kwargs.pop('fortygigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _get_option_value(self, key):
"""
Get option value(s), or use default value if no override value.
Two output values for 'p' type values (parameters), else one.
Returns a single value or tuple of two values
"""
fn_name = "set_{}_value".format(key)
if key in self.overrides:
val = self.overrides[key]
elif hasattr(self, fn_name):
val = getattr(self, fn_name)()
else:
val = self.defaults[key]["d"]
if self.defaults[key]["t"] == constants.FT_PARAM:
return val.split("|")
else:
return val | Get option value(s), or use default value if no override value.
Two output values for 'p' type values (parameters), else one.
Returns a single value or tuple of two values | Below is the the instruction that describes the task:
### Input:
Get option value(s), or use default value if no override value.
Two output values for 'p' type values (parameters), else one.
Returns a single value or tuple of two values
### Response:
def _get_option_value(self, key):
"""
Get option value(s), or use default value if no override value.
Two output values for 'p' type values (parameters), else one.
Returns a single value or tuple of two values
"""
fn_name = "set_{}_value".format(key)
if key in self.overrides:
val = self.overrides[key]
elif hasattr(self, fn_name):
val = getattr(self, fn_name)()
else:
val = self.defaults[key]["d"]
if self.defaults[key]["t"] == constants.FT_PARAM:
return val.split("|")
else:
return val |
def get_provides_by_kind(self, kind):
""" Returns an array of provides of a certain kind """
provs = []
for p in self.provides:
if p.kind == kind:
provs.append(p)
return provs | Returns an array of provides of a certain kind | Below is the the instruction that describes the task:
### Input:
Returns an array of provides of a certain kind
### Response:
def get_provides_by_kind(self, kind):
""" Returns an array of provides of a certain kind """
provs = []
for p in self.provides:
if p.kind == kind:
provs.append(p)
return provs |
def parse_from_import_statement(self):
"""Parse a 'from x import y' statement.
The purpose is to find __future__ statements.
"""
self.log.debug("parsing from/import statement.")
is_future_import = self._parse_from_import_source()
self._parse_from_import_names(is_future_import) | Parse a 'from x import y' statement.
The purpose is to find __future__ statements. | Below is the the instruction that describes the task:
### Input:
Parse a 'from x import y' statement.
The purpose is to find __future__ statements.
### Response:
def parse_from_import_statement(self):
"""Parse a 'from x import y' statement.
The purpose is to find __future__ statements.
"""
self.log.debug("parsing from/import statement.")
is_future_import = self._parse_from_import_source()
self._parse_from_import_names(is_future_import) |
def from_inline(cls: Type[MembershipType], version: int, currency: str, membership_type: str,
inline: str) -> MembershipType:
"""
Return Membership instance from inline format
:param version: Version of the document
:param currency: Name of the currency
:param membership_type: "IN" or "OUT" to enter or exit membership
:param inline: Inline string format
:return:
"""
data = Membership.re_inline.match(inline)
if data is None:
raise MalformedDocumentError("Inline membership ({0})".format(inline))
issuer = data.group(1)
signature = data.group(2)
membership_ts = BlockUID.from_str(data.group(3))
identity_ts = BlockUID.from_str(data.group(4))
uid = data.group(5)
return cls(version, currency, issuer, membership_ts, membership_type, uid, identity_ts, signature) | Return Membership instance from inline format
:param version: Version of the document
:param currency: Name of the currency
:param membership_type: "IN" or "OUT" to enter or exit membership
:param inline: Inline string format
:return: | Below is the the instruction that describes the task:
### Input:
Return Membership instance from inline format
:param version: Version of the document
:param currency: Name of the currency
:param membership_type: "IN" or "OUT" to enter or exit membership
:param inline: Inline string format
:return:
### Response:
def from_inline(cls: Type[MembershipType], version: int, currency: str, membership_type: str,
inline: str) -> MembershipType:
"""
Return Membership instance from inline format
:param version: Version of the document
:param currency: Name of the currency
:param membership_type: "IN" or "OUT" to enter or exit membership
:param inline: Inline string format
:return:
"""
data = Membership.re_inline.match(inline)
if data is None:
raise MalformedDocumentError("Inline membership ({0})".format(inline))
issuer = data.group(1)
signature = data.group(2)
membership_ts = BlockUID.from_str(data.group(3))
identity_ts = BlockUID.from_str(data.group(4))
uid = data.group(5)
return cls(version, currency, issuer, membership_ts, membership_type, uid, identity_ts, signature) |
def show_keypair(kwargs=None, call=None):
'''
Show the details of an SSH keypair
'''
if call != 'function':
log.error(
'The show_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
keypairs = list_keypairs(call='function')
keyid = keypairs[kwargs['keyname']]['id']
log.debug('Key ID is %s', keyid)
details = query(method='account/keys', command=keyid)
return details | Show the details of an SSH keypair | Below is the the instruction that describes the task:
### Input:
Show the details of an SSH keypair
### Response:
def show_keypair(kwargs=None, call=None):
'''
Show the details of an SSH keypair
'''
if call != 'function':
log.error(
'The show_keypair function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
keypairs = list_keypairs(call='function')
keyid = keypairs[kwargs['keyname']]['id']
log.debug('Key ID is %s', keyid)
details = query(method='account/keys', command=keyid)
return details |
async def _deferred_init(self):
"""
Run those things in a sepearate tasks as they are not required for the
bot to work and they take a lot of time to run.
"""
await self._check_subscriptions()
await self._set_whitelist()
await self._set_get_started()
await self._set_greeting_text()
await self._set_persistent_menu() | Run those things in a sepearate tasks as they are not required for the
bot to work and they take a lot of time to run. | Below is the the instruction that describes the task:
### Input:
Run those things in a sepearate tasks as they are not required for the
bot to work and they take a lot of time to run.
### Response:
async def _deferred_init(self):
"""
Run those things in a sepearate tasks as they are not required for the
bot to work and they take a lot of time to run.
"""
await self._check_subscriptions()
await self._set_whitelist()
await self._set_get_started()
await self._set_greeting_text()
await self._set_persistent_menu() |
def taper_timeseries(tsdata, tapermethod=None, return_lal=False):
"""
Taper either or both ends of a time series using wrapped
LALSimulation functions
Parameters
----------
tsdata : TimeSeries
Series to be tapered, dtype must be either float32 or float64
tapermethod : string
Should be one of ('TAPER_NONE', 'TAPER_START', 'TAPER_END',
'TAPER_STARTEND', 'start', 'end', 'startend') - NB 'TAPER_NONE' will
not change the series!
return_lal : Boolean
If True, return a wrapped LAL time series object, else return a
PyCBC time series.
"""
if tapermethod is None:
raise ValueError("Must specify a tapering method (function was called"
"with tapermethod=None)")
if tapermethod not in taper_map.keys():
raise ValueError("Unknown tapering method %s, valid methods are %s" % \
(tapermethod, ", ".join(taper_map.keys())))
if tsdata.dtype not in (float32, float64):
raise TypeError("Strain dtype must be float32 or float64, not "
+ str(tsdata.dtype))
taper_func = taper_func_map[tsdata.dtype]
# make a LAL TimeSeries to pass to the LALSim function
ts_lal = tsdata.astype(tsdata.dtype).lal()
if taper_map[tapermethod] is not None:
taper_func(ts_lal.data, taper_map[tapermethod])
if return_lal:
return ts_lal
else:
return TimeSeries(ts_lal.data.data[:], delta_t=ts_lal.deltaT,
epoch=ts_lal.epoch) | Taper either or both ends of a time series using wrapped
LALSimulation functions
Parameters
----------
tsdata : TimeSeries
Series to be tapered, dtype must be either float32 or float64
tapermethod : string
Should be one of ('TAPER_NONE', 'TAPER_START', 'TAPER_END',
'TAPER_STARTEND', 'start', 'end', 'startend') - NB 'TAPER_NONE' will
not change the series!
return_lal : Boolean
If True, return a wrapped LAL time series object, else return a
PyCBC time series. | Below is the the instruction that describes the task:
### Input:
Taper either or both ends of a time series using wrapped
LALSimulation functions
Parameters
----------
tsdata : TimeSeries
Series to be tapered, dtype must be either float32 or float64
tapermethod : string
Should be one of ('TAPER_NONE', 'TAPER_START', 'TAPER_END',
'TAPER_STARTEND', 'start', 'end', 'startend') - NB 'TAPER_NONE' will
not change the series!
return_lal : Boolean
If True, return a wrapped LAL time series object, else return a
PyCBC time series.
### Response:
def taper_timeseries(tsdata, tapermethod=None, return_lal=False):
"""
Taper either or both ends of a time series using wrapped
LALSimulation functions
Parameters
----------
tsdata : TimeSeries
Series to be tapered, dtype must be either float32 or float64
tapermethod : string
Should be one of ('TAPER_NONE', 'TAPER_START', 'TAPER_END',
'TAPER_STARTEND', 'start', 'end', 'startend') - NB 'TAPER_NONE' will
not change the series!
return_lal : Boolean
If True, return a wrapped LAL time series object, else return a
PyCBC time series.
"""
if tapermethod is None:
raise ValueError("Must specify a tapering method (function was called"
"with tapermethod=None)")
if tapermethod not in taper_map.keys():
raise ValueError("Unknown tapering method %s, valid methods are %s" % \
(tapermethod, ", ".join(taper_map.keys())))
if tsdata.dtype not in (float32, float64):
raise TypeError("Strain dtype must be float32 or float64, not "
+ str(tsdata.dtype))
taper_func = taper_func_map[tsdata.dtype]
# make a LAL TimeSeries to pass to the LALSim function
ts_lal = tsdata.astype(tsdata.dtype).lal()
if taper_map[tapermethod] is not None:
taper_func(ts_lal.data, taper_map[tapermethod])
if return_lal:
return ts_lal
else:
return TimeSeries(ts_lal.data.data[:], delta_t=ts_lal.deltaT,
epoch=ts_lal.epoch) |
def update(self, user, **kwargs):
"""If parent resource is not an editable state, should not be able to update"""
yield self.get_parent()
if not self.parent.editable:
err = 'Cannot update child of {} resource'.format(self.parent.state.name)
raise exceptions.Unauthorized(err)
yield super(SubResource, self).update(user, **kwargs) | If parent resource is not an editable state, should not be able to update | Below is the the instruction that describes the task:
### Input:
If parent resource is not an editable state, should not be able to update
### Response:
def update(self, user, **kwargs):
"""If parent resource is not an editable state, should not be able to update"""
yield self.get_parent()
if not self.parent.editable:
err = 'Cannot update child of {} resource'.format(self.parent.state.name)
raise exceptions.Unauthorized(err)
yield super(SubResource, self).update(user, **kwargs) |
def _get_event_and_context(self, event, arg_type):
"""Return an INDRA Event based on an event entry."""
eid = _choose_id(event, arg_type)
ev = self.concept_dict[eid]
concept, metadata = self._make_concept(ev)
ev_delta = {'adjectives': [],
'states': get_states(ev),
'polarity': get_polarity(ev)}
context = self._make_context(ev)
event_obj = Event(concept, delta=ev_delta, context=context)
return event_obj | Return an INDRA Event based on an event entry. | Below is the the instruction that describes the task:
### Input:
Return an INDRA Event based on an event entry.
### Response:
def _get_event_and_context(self, event, arg_type):
"""Return an INDRA Event based on an event entry."""
eid = _choose_id(event, arg_type)
ev = self.concept_dict[eid]
concept, metadata = self._make_concept(ev)
ev_delta = {'adjectives': [],
'states': get_states(ev),
'polarity': get_polarity(ev)}
context = self._make_context(ev)
event_obj = Event(concept, delta=ev_delta, context=context)
return event_obj |
def list_templates(self, offset=0, count=20):
"""
获取本账号内所有模板
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1500465446_j4CgR
:param offset: 用于分页,表示起始量,最小值为0
:type offset: int
:param count: 用于分页,表示拉取数量,最大值为20
:type count: int
:return: 模板列表
:rtype: list[dict]
"""
return self._post(
'cgi-bin/wxopen/template/list',
data={
'offset': offset,
'count': count,
},
result_processor=lambda x: x['list'],
) | 获取本账号内所有模板
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1500465446_j4CgR
:param offset: 用于分页,表示起始量,最小值为0
:type offset: int
:param count: 用于分页,表示拉取数量,最大值为20
:type count: int
:return: 模板列表
:rtype: list[dict] | Below is the the instruction that describes the task:
### Input:
获取本账号内所有模板
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1500465446_j4CgR
:param offset: 用于分页,表示起始量,最小值为0
:type offset: int
:param count: 用于分页,表示拉取数量,最大值为20
:type count: int
:return: 模板列表
:rtype: list[dict]
### Response:
def list_templates(self, offset=0, count=20):
"""
获取本账号内所有模板
详情请参考
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1500465446_j4CgR
:param offset: 用于分页,表示起始量,最小值为0
:type offset: int
:param count: 用于分页,表示拉取数量,最大值为20
:type count: int
:return: 模板列表
:rtype: list[dict]
"""
return self._post(
'cgi-bin/wxopen/template/list',
data={
'offset': offset,
'count': count,
},
result_processor=lambda x: x['list'],
) |
def mousePressEvent(self, event):
"""
Starts the selection process for this widget and snapshot area.
:param event | <QMousePressEvent>
"""
self._region.setX(event.pos().x())
self._region.setY(event.pos().y())
super(XSnapshotWidget, self).mousePressEvent(event) | Starts the selection process for this widget and snapshot area.
:param event | <QMousePressEvent> | Below is the the instruction that describes the task:
### Input:
Starts the selection process for this widget and snapshot area.
:param event | <QMousePressEvent>
### Response:
def mousePressEvent(self, event):
"""
Starts the selection process for this widget and snapshot area.
:param event | <QMousePressEvent>
"""
self._region.setX(event.pos().x())
self._region.setY(event.pos().y())
super(XSnapshotWidget, self).mousePressEvent(event) |
def table_api_get(self, *paths, **kparams):
""" helper to make GET /api/now/v1/table requests """
url = self.flattened_params_url("/api/now/v1/table", *paths, **kparams)
rjson = self.req("get", url).text
return json.loads(rjson) | helper to make GET /api/now/v1/table requests | Below is the the instruction that describes the task:
### Input:
helper to make GET /api/now/v1/table requests
### Response:
def table_api_get(self, *paths, **kparams):
""" helper to make GET /api/now/v1/table requests """
url = self.flattened_params_url("/api/now/v1/table", *paths, **kparams)
rjson = self.req("get", url).text
return json.loads(rjson) |
def _sf2(args):
"""
A shallow wrapper for sigma_filter.
Parameters
----------
args : list
A list of arguments for sigma_filter
Returns
-------
None
"""
# an easier to debug traceback when multiprocessing
# thanks to https://stackoverflow.com/a/16618842/1710603
try:
return sigma_filter(*args)
except:
import traceback
raise Exception("".join(traceback.format_exception(*sys.exc_info()))) | A shallow wrapper for sigma_filter.
Parameters
----------
args : list
A list of arguments for sigma_filter
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
A shallow wrapper for sigma_filter.
Parameters
----------
args : list
A list of arguments for sigma_filter
Returns
-------
None
### Response:
def _sf2(args):
"""
A shallow wrapper for sigma_filter.
Parameters
----------
args : list
A list of arguments for sigma_filter
Returns
-------
None
"""
# an easier to debug traceback when multiprocessing
# thanks to https://stackoverflow.com/a/16618842/1710603
try:
return sigma_filter(*args)
except:
import traceback
raise Exception("".join(traceback.format_exception(*sys.exc_info()))) |
def do_direct_authorization(self, session):
""" Direct Authorization, more info: https://vk.com/dev/auth_direct """
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message) | Direct Authorization, more info: https://vk.com/dev/auth_direct | Below is the the instruction that describes the task:
### Input:
Direct Authorization, more info: https://vk.com/dev/auth_direct
### Response:
def do_direct_authorization(self, session):
""" Direct Authorization, more info: https://vk.com/dev/auth_direct """
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message) |
def _load_lsm_data(self, data_var,
conversion_factor=1,
calc_4d_method=None,
calc_4d_dim=None,
time_step=None):
"""
This extracts the LSM data from a folder of netcdf files
"""
data = self.xd.lsm.getvar(data_var,
yslice=self.yslice,
xslice=self.xslice,
calc_4d_method=calc_4d_method,
calc_4d_dim=calc_4d_dim)
if isinstance(time_step, datetime):
data = data.loc[{self.lsm_time_dim: [pd.to_datetime(time_step)]}]
elif time_step is not None:
data = data[{self.lsm_time_dim: [time_step]}]
data = data.fillna(0)
data.values *= conversion_factor
return data | This extracts the LSM data from a folder of netcdf files | Below is the the instruction that describes the task:
### Input:
This extracts the LSM data from a folder of netcdf files
### Response:
def _load_lsm_data(self, data_var,
conversion_factor=1,
calc_4d_method=None,
calc_4d_dim=None,
time_step=None):
"""
This extracts the LSM data from a folder of netcdf files
"""
data = self.xd.lsm.getvar(data_var,
yslice=self.yslice,
xslice=self.xslice,
calc_4d_method=calc_4d_method,
calc_4d_dim=calc_4d_dim)
if isinstance(time_step, datetime):
data = data.loc[{self.lsm_time_dim: [pd.to_datetime(time_step)]}]
elif time_step is not None:
data = data[{self.lsm_time_dim: [time_step]}]
data = data.fillna(0)
data.values *= conversion_factor
return data |
def fields_metadata(self):
"""
Returns fields metadata as a dataframe.
"""
return (pd.concat([f.metadata() for f in self.fields], axis = 1)
.transpose()
.sort_values(["step_num", "frame", "label", "position"])) | Returns fields metadata as a dataframe. | Below is the the instruction that describes the task:
### Input:
Returns fields metadata as a dataframe.
### Response:
def fields_metadata(self):
"""
Returns fields metadata as a dataframe.
"""
return (pd.concat([f.metadata() for f in self.fields], axis = 1)
.transpose()
.sort_values(["step_num", "frame", "label", "position"])) |
def parsewarn(self, msg, line=None):
"""Emit parse warning."""
if line is None:
line = self.sline
self.dowarn('warning: ' + msg + ' on line {}'.format(line)) | Emit parse warning. | Below is the the instruction that describes the task:
### Input:
Emit parse warning.
### Response:
def parsewarn(self, msg, line=None):
"""Emit parse warning."""
if line is None:
line = self.sline
self.dowarn('warning: ' + msg + ' on line {}'.format(line)) |
def norm(field, vmin=0, vmax=255):
"""Truncates field to 0,1; then normalizes to a uin8 on [0,255]"""
field = 255*np.clip(field, 0, 1)
field = field.astype('uint8')
return field | Truncates field to 0,1; then normalizes to a uin8 on [0,255] | Below is the the instruction that describes the task:
### Input:
Truncates field to 0,1; then normalizes to a uin8 on [0,255]
### Response:
def norm(field, vmin=0, vmax=255):
"""Truncates field to 0,1; then normalizes to a uin8 on [0,255]"""
field = 255*np.clip(field, 0, 1)
field = field.astype('uint8')
return field |
def active_network_addresses(hypervisor):
"""Query libvirt for the already reserved addresses."""
active = []
for network in hypervisor.listNetworks():
try:
xml = hypervisor.networkLookupByName(network).XMLDesc(0)
except libvirt.libvirtError: # network has been destroyed meanwhile
continue
else:
ip_element = etree.fromstring(xml).find('.//ip')
address = ip_element.get('address')
netmask = ip_element.get('netmask')
active.append(ipaddress.IPv4Network(u'/'.join((address, netmask)),
strict=False))
return active | Query libvirt for the already reserved addresses. | Below is the the instruction that describes the task:
### Input:
Query libvirt for the already reserved addresses.
### Response:
def active_network_addresses(hypervisor):
"""Query libvirt for the already reserved addresses."""
active = []
for network in hypervisor.listNetworks():
try:
xml = hypervisor.networkLookupByName(network).XMLDesc(0)
except libvirt.libvirtError: # network has been destroyed meanwhile
continue
else:
ip_element = etree.fromstring(xml).find('.//ip')
address = ip_element.get('address')
netmask = ip_element.get('netmask')
active.append(ipaddress.IPv4Network(u'/'.join((address, netmask)),
strict=False))
return active |
def transfer(self, data):
"""Transfers data over SPI.
Arguments:
data: The data to transfer.
Returns:
The data returned by the SPI device.
"""
settings = self.transfer_settings
settings.spi_tx_size = len(data)
self.transfer_settings = settings
response = ''
for i in range(0, len(data), 60):
response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data
time.sleep(0.01)
while len(response) < len(data):
response += self.sendCommand(commands.SPITransferCommand('')).data
return ''.join(response) | Transfers data over SPI.
Arguments:
data: The data to transfer.
Returns:
The data returned by the SPI device. | Below is the the instruction that describes the task:
### Input:
Transfers data over SPI.
Arguments:
data: The data to transfer.
Returns:
The data returned by the SPI device.
### Response:
def transfer(self, data):
"""Transfers data over SPI.
Arguments:
data: The data to transfer.
Returns:
The data returned by the SPI device.
"""
settings = self.transfer_settings
settings.spi_tx_size = len(data)
self.transfer_settings = settings
response = ''
for i in range(0, len(data), 60):
response += self.sendCommand(commands.SPITransferCommand(data[i:i + 60])).data
time.sleep(0.01)
while len(response) < len(data):
response += self.sendCommand(commands.SPITransferCommand('')).data
return ''.join(response) |
def eth_getCode(self, address, default_block=BLOCK_TAG_LATEST):
"""TODO: documentation
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode
NEEDS TESTING
"""
if isinstance(default_block, str):
if default_block not in BLOCK_TAGS:
raise ValueError
return self._call("eth_getCode", [address, default_block]) | TODO: documentation
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode
NEEDS TESTING | Below is the the instruction that describes the task:
### Input:
TODO: documentation
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode
NEEDS TESTING
### Response:
def eth_getCode(self, address, default_block=BLOCK_TAG_LATEST):
"""TODO: documentation
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode
NEEDS TESTING
"""
if isinstance(default_block, str):
if default_block not in BLOCK_TAGS:
raise ValueError
return self._call("eth_getCode", [address, default_block]) |
def run_commands(*commands, **kwargs):
'''
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F
'''
encoding = kwargs.pop('encoding', 'json')
send_enable = kwargs.pop('send_enable', True)
output = call('run_commands',
commands,
encoding=encoding,
send_enable=send_enable,
**kwargs)
if encoding == 'text':
ret = []
for res in output:
ret.append(res['output'])
return ret
return output | Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F | Below is the the instruction that describes the task:
### Input:
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F
### Response:
def run_commands(*commands, **kwargs):
'''
Sends the commands over the transport to the device.
This function sends the commands to the device using the nodes
transport. This is a lower layer function that shouldn't normally
need to be used, preferring instead to use ``config()`` or ``enable()``.
transport: ``https``
Specifies the type of connection transport to use. Valid values for the
connection are ``socket``, ``http_local``, ``http``, and ``https``.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
host: ``localhost``
The IP address or DNS host name of the connection device.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
username: ``admin``
The username to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
password
The password to pass to the device to authenticate the eAPI connection.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
port
The TCP port of the endpoint for the eAPI connection. If this keyword is
not specified, the default value is automatically determined by the
transport type (``80`` for ``http``, or ``443`` for ``https``).
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
enablepwd
The enable mode password if required by the destination node.
.. note::
This argument does not need to be specified when running in a
:mod:`pyeapi <salt.proxy.arista_pyeapi>` Proxy Minion.
CLI Example:
.. code-block:: bash
salt '*' pyeapi.run_commands 'show version'
salt '*' pyeapi.run_commands 'show version' encoding=text
salt '*' pyeapi.run_commands 'show version' encoding=text host=cr1.thn.lon username=example password=weak
Output example:
.. code-block:: text
veos1:
|_
----------
architecture:
i386
bootupTimestamp:
1527541728.53
hardwareRevision:
internalBuildId:
63d2e89a-220d-4b8a-a9b3-0524fa8f9c5f
internalVersion:
4.18.1F-4591672.4181F
isIntlVersion:
False
memFree:
501468
memTotal:
1893316
modelName:
vEOS
serialNumber:
systemMacAddress:
52:54:00:3f:e6:d0
version:
4.18.1F
'''
encoding = kwargs.pop('encoding', 'json')
send_enable = kwargs.pop('send_enable', True)
output = call('run_commands',
commands,
encoding=encoding,
send_enable=send_enable,
**kwargs)
if encoding == 'text':
ret = []
for res in output:
ret.append(res['output'])
return ret
return output |
def get_activate_url(self, card_id, outer_str=None):
"""
获取开卡插件 Url, 内含调用开卡插件所需的参数
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1499332673_Unm7V
:param card_id: 会员卡的card_id
:param outer_str: 渠道值,用于统计本次领取的渠道参数
:return: 内含调用开卡插件所需的参数的 Url
"""
return self._post(
'card/membercard/activate/geturl',
data={
'card_id': card_id,
'outer_str': outer_str,
},
result_processor=lambda x: x['url'],
) | 获取开卡插件 Url, 内含调用开卡插件所需的参数
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1499332673_Unm7V
:param card_id: 会员卡的card_id
:param outer_str: 渠道值,用于统计本次领取的渠道参数
:return: 内含调用开卡插件所需的参数的 Url | Below is the the instruction that describes the task:
### Input:
获取开卡插件 Url, 内含调用开卡插件所需的参数
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1499332673_Unm7V
:param card_id: 会员卡的card_id
:param outer_str: 渠道值,用于统计本次领取的渠道参数
:return: 内含调用开卡插件所需的参数的 Url
### Response:
def get_activate_url(self, card_id, outer_str=None):
"""
获取开卡插件 Url, 内含调用开卡插件所需的参数
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1499332673_Unm7V
:param card_id: 会员卡的card_id
:param outer_str: 渠道值,用于统计本次领取的渠道参数
:return: 内含调用开卡插件所需的参数的 Url
"""
return self._post(
'card/membercard/activate/geturl',
data={
'card_id': card_id,
'outer_str': outer_str,
},
result_processor=lambda x: x['url'],
) |
def rm(self, container_alias):
'''
a method to remove an active container
:param container_alias: string with name or id of container
:return: string with container id
'''
title = '%s.rm' % self.__class__.__name__
# validate inputs
input_fields = {
'container_alias': container_alias
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# run remove command
sys_cmd = 'docker rm -f %s' % container_alias
output_lines = self.command(sys_cmd).split('\n')
return output_lines[0] | a method to remove an active container
:param container_alias: string with name or id of container
:return: string with container id | Below is the the instruction that describes the task:
### Input:
a method to remove an active container
:param container_alias: string with name or id of container
:return: string with container id
### Response:
def rm(self, container_alias):
'''
a method to remove an active container
:param container_alias: string with name or id of container
:return: string with container id
'''
title = '%s.rm' % self.__class__.__name__
# validate inputs
input_fields = {
'container_alias': container_alias
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# run remove command
sys_cmd = 'docker rm -f %s' % container_alias
output_lines = self.command(sys_cmd).split('\n')
return output_lines[0] |
def list_records(self, domain, limit=None, offset=None):
"""
Returns a list of all records configured for the specified domain.
"""
uri = "/domains/%s/records%s" % (utils.get_id(domain),
self._get_pagination_qs(limit, offset))
return self._list_records(uri) | Returns a list of all records configured for the specified domain. | Below is the the instruction that describes the task:
### Input:
Returns a list of all records configured for the specified domain.
### Response:
def list_records(self, domain, limit=None, offset=None):
"""
Returns a list of all records configured for the specified domain.
"""
uri = "/domains/%s/records%s" % (utils.get_id(domain),
self._get_pagination_qs(limit, offset))
return self._list_records(uri) |
def handle_typed_values(val, type_name, value_type):
"""Translate typed values into the appropriate python object.
Takes an element name, value, and type and returns a list
with the string value(s) properly converted to a python type.
TypedValues are handled in ucar.ma2.DataType in netcdfJava
in the DataType enum. Possibilities are:
"boolean"
"byte"
"char"
"short"
"int"
"long"
"float"
"double"
"Sequence"
"String"
"Structure"
"enum1"
"enum2"
"enum4"
"opaque"
"object"
All of these are values written as strings in the xml, so simply
applying int, float to the values will work in most cases (i.e.
the TDS encodes them as string values properly).
Examle XML element:
<attribute name="scale_factor" type="double" value="0.0010000000474974513"/>
Parameters
----------
val : string
The string representation of the value attribute of the xml element
type_name : string
The string representation of the name attribute of the xml element
value_type : string
The string representation of the type attribute of the xml element
Returns
-------
val : list
A list containing the properly typed python values.
"""
if value_type in ['byte', 'short', 'int', 'long']:
try:
val = [int(v) for v in re.split('[ ,]', val) if v]
except ValueError:
log.warning('Cannot convert "%s" to int. Keeping type as str.', val)
elif value_type in ['float', 'double']:
try:
val = [float(v) for v in re.split('[ ,]', val) if v]
except ValueError:
log.warning('Cannot convert "%s" to float. Keeping type as str.', val)
elif value_type == 'boolean':
try:
# special case for boolean type
val = val.split()
# values must be either true or false
for potential_bool in val:
if potential_bool not in ['true', 'false']:
raise ValueError
val = [True if item == 'true' else False for item in val]
except ValueError:
msg = 'Cannot convert values %s to boolean.'
msg += ' Keeping type as str.'
log.warning(msg, val)
elif value_type == 'String':
# nothing special for String type
pass
else:
# possibilities - Sequence, Structure, enum, opaque, object,
# and char.
# Not sure how to handle these as I do not have an example
# of how they would show up in dataset.xml
log.warning('%s type %s not understood. Keeping as String.',
type_name, value_type)
if not isinstance(val, list):
val = [val]
return val | Translate typed values into the appropriate python object.
Takes an element name, value, and type and returns a list
with the string value(s) properly converted to a python type.
TypedValues are handled in ucar.ma2.DataType in netcdfJava
in the DataType enum. Possibilities are:
"boolean"
"byte"
"char"
"short"
"int"
"long"
"float"
"double"
"Sequence"
"String"
"Structure"
"enum1"
"enum2"
"enum4"
"opaque"
"object"
All of these are values written as strings in the xml, so simply
applying int, float to the values will work in most cases (i.e.
the TDS encodes them as string values properly).
Examle XML element:
<attribute name="scale_factor" type="double" value="0.0010000000474974513"/>
Parameters
----------
val : string
The string representation of the value attribute of the xml element
type_name : string
The string representation of the name attribute of the xml element
value_type : string
The string representation of the type attribute of the xml element
Returns
-------
val : list
A list containing the properly typed python values. | Below is the the instruction that describes the task:
### Input:
Translate typed values into the appropriate python object.
Takes an element name, value, and type and returns a list
with the string value(s) properly converted to a python type.
TypedValues are handled in ucar.ma2.DataType in netcdfJava
in the DataType enum. Possibilities are:
"boolean"
"byte"
"char"
"short"
"int"
"long"
"float"
"double"
"Sequence"
"String"
"Structure"
"enum1"
"enum2"
"enum4"
"opaque"
"object"
All of these are values written as strings in the xml, so simply
applying int, float to the values will work in most cases (i.e.
the TDS encodes them as string values properly).
Examle XML element:
<attribute name="scale_factor" type="double" value="0.0010000000474974513"/>
Parameters
----------
val : string
The string representation of the value attribute of the xml element
type_name : string
The string representation of the name attribute of the xml element
value_type : string
The string representation of the type attribute of the xml element
Returns
-------
val : list
A list containing the properly typed python values.
### Response:
def handle_typed_values(val, type_name, value_type):
"""Translate typed values into the appropriate python object.
Takes an element name, value, and type and returns a list
with the string value(s) properly converted to a python type.
TypedValues are handled in ucar.ma2.DataType in netcdfJava
in the DataType enum. Possibilities are:
"boolean"
"byte"
"char"
"short"
"int"
"long"
"float"
"double"
"Sequence"
"String"
"Structure"
"enum1"
"enum2"
"enum4"
"opaque"
"object"
All of these are values written as strings in the xml, so simply
applying int, float to the values will work in most cases (i.e.
the TDS encodes them as string values properly).
Examle XML element:
<attribute name="scale_factor" type="double" value="0.0010000000474974513"/>
Parameters
----------
val : string
The string representation of the value attribute of the xml element
type_name : string
The string representation of the name attribute of the xml element
value_type : string
The string representation of the type attribute of the xml element
Returns
-------
val : list
A list containing the properly typed python values.
"""
if value_type in ['byte', 'short', 'int', 'long']:
try:
val = [int(v) for v in re.split('[ ,]', val) if v]
except ValueError:
log.warning('Cannot convert "%s" to int. Keeping type as str.', val)
elif value_type in ['float', 'double']:
try:
val = [float(v) for v in re.split('[ ,]', val) if v]
except ValueError:
log.warning('Cannot convert "%s" to float. Keeping type as str.', val)
elif value_type == 'boolean':
try:
# special case for boolean type
val = val.split()
# values must be either true or false
for potential_bool in val:
if potential_bool not in ['true', 'false']:
raise ValueError
val = [True if item == 'true' else False for item in val]
except ValueError:
msg = 'Cannot convert values %s to boolean.'
msg += ' Keeping type as str.'
log.warning(msg, val)
elif value_type == 'String':
# nothing special for String type
pass
else:
# possibilities - Sequence, Structure, enum, opaque, object,
# and char.
# Not sure how to handle these as I do not have an example
# of how they would show up in dataset.xml
log.warning('%s type %s not understood. Keeping as String.',
type_name, value_type)
if not isinstance(val, list):
val = [val]
return val |
def attributes_in_restriction(self):
"""
:return: list of attributes that are probably used in the restriction.
The function errs on the side of false positives.
For example, if the restriction is "val='id'", then the attribute 'id' would be flagged.
This is used internally for optimizing SQL statements.
"""
return set(name for name in self.heading.names
if re.search(r'\b' + name + r'\b', self.where_clause)) | :return: list of attributes that are probably used in the restriction.
The function errs on the side of false positives.
For example, if the restriction is "val='id'", then the attribute 'id' would be flagged.
This is used internally for optimizing SQL statements. | Below is the the instruction that describes the task:
### Input:
:return: list of attributes that are probably used in the restriction.
The function errs on the side of false positives.
For example, if the restriction is "val='id'", then the attribute 'id' would be flagged.
This is used internally for optimizing SQL statements.
### Response:
def attributes_in_restriction(self):
"""
:return: list of attributes that are probably used in the restriction.
The function errs on the side of false positives.
For example, if the restriction is "val='id'", then the attribute 'id' would be flagged.
This is used internally for optimizing SQL statements.
"""
return set(name for name in self.heading.names
if re.search(r'\b' + name + r'\b', self.where_clause)) |
def push_all(self, record_shard_pairs):
"""Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order.
:param record_shard_pairs: list of ``(record, shard)`` tuples
(see :func:`~bloop.stream.buffer.RecordBuffer.push`).
"""
# Faster than inserting one at a time; the heap is sorted once after all inserts.
for record, shard in record_shard_pairs:
item = heap_item(self.clock, record, shard)
self.heap.append(item)
heapq.heapify(self.heap) | Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order.
:param record_shard_pairs: list of ``(record, shard)`` tuples
(see :func:`~bloop.stream.buffer.RecordBuffer.push`). | Below is the the instruction that describes the task:
### Input:
Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order.
:param record_shard_pairs: list of ``(record, shard)`` tuples
(see :func:`~bloop.stream.buffer.RecordBuffer.push`).
### Response:
def push_all(self, record_shard_pairs):
"""Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order.
:param record_shard_pairs: list of ``(record, shard)`` tuples
(see :func:`~bloop.stream.buffer.RecordBuffer.push`).
"""
# Faster than inserting one at a time; the heap is sorted once after all inserts.
for record, shard in record_shard_pairs:
item = heap_item(self.clock, record, shard)
self.heap.append(item)
heapq.heapify(self.heap) |
def CompareTo(self, other_hash_value):
"""Compares the passed hash value with the hash value of this object
"""
if len(self.hash_value) != len(other_hash_value):
raise ValueError("Length of hashes doesn't match.")
# The hash byte array that is returned from ComputeHash method has the MSB at the end of the array
# so comparing the bytes from the end for compare operations.
for i in xrange(0, len(self.hash_value)):
if(self.hash_value[len(self.hash_value) - i - 1] < other_hash_value[len(self.hash_value) - i - 1]):
return -1
elif self.hash_value[len(self.hash_value) - i - 1] > other_hash_value[len(self.hash_value) - i - 1]:
return 1
return 0 | Compares the passed hash value with the hash value of this object | Below is the the instruction that describes the task:
### Input:
Compares the passed hash value with the hash value of this object
### Response:
def CompareTo(self, other_hash_value):
"""Compares the passed hash value with the hash value of this object
"""
if len(self.hash_value) != len(other_hash_value):
raise ValueError("Length of hashes doesn't match.")
# The hash byte array that is returned from ComputeHash method has the MSB at the end of the array
# so comparing the bytes from the end for compare operations.
for i in xrange(0, len(self.hash_value)):
if(self.hash_value[len(self.hash_value) - i - 1] < other_hash_value[len(self.hash_value) - i - 1]):
return -1
elif self.hash_value[len(self.hash_value) - i - 1] > other_hash_value[len(self.hash_value) - i - 1]:
return 1
return 0 |
def complete(self):
"""
Make the graph a complete graph.
@attention: This will modify the current graph.
"""
for each in self.nodes():
for other in self.nodes():
if (each != other and not self.has_edge((each, other))):
self.add_edge((each, other)) | Make the graph a complete graph.
@attention: This will modify the current graph. | Below is the the instruction that describes the task:
### Input:
Make the graph a complete graph.
@attention: This will modify the current graph.
### Response:
def complete(self):
"""
Make the graph a complete graph.
@attention: This will modify the current graph.
"""
for each in self.nodes():
for other in self.nodes():
if (each != other and not self.has_edge((each, other))):
self.add_edge((each, other)) |
def _do_include(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
"""
Implements @include, for @mixins
"""
funct, params, _ = name.partition('(')
funct = funct.strip()
funct = self.do_glob_math(
funct, rule[CONTEXT], rule[OPTIONS], rule, True)
params = split_params(depar(params + _))
new_params = {}
num_args = 0
for param in params:
varname, _, param = param.partition(':')
if param:
param = param.strip()
varname = varname.strip()
else:
param = varname.strip()
varname = num_args
if param:
num_args += 1
if param:
new_params[varname] = param
mixin = rule[OPTIONS].get('@mixin %s:%s' % (funct, num_args))
if not mixin:
# Fallback to single parmeter:
mixin = rule[OPTIONS].get('@mixin %s:1' % (funct,))
if mixin and all(map(lambda o: isinstance(o, int), new_params.keys())):
new_params = {0: ', '.join(new_params.values())}
if mixin:
m_params = mixin[0]
m_vars = mixin[1].copy()
m_codestr = mixin[2]
for varname, value in new_params.items():
try:
m_param = m_params[varname]
except:
m_param = varname
value = self.calculate(
value, rule[CONTEXT], rule[OPTIONS], rule)
m_vars[m_param] = value
for p in m_vars:
if p not in new_params:
if isinstance(m_vars[p], basestring):
value = self.calculate(
m_vars[p], m_vars, rule[OPTIONS], rule)
m_vars[p] = value
_context = rule[CONTEXT].copy()
_context.update(m_vars)
_rule = spawn_rule(
rule, codestr=m_codestr, context=_context, lineno=c_lineno)
self.manage_children(
_rule, p_selectors, p_parents, p_children, scope, media)
else:
log.error("Required mixin not found: %s:%d (%s)",
funct, num_args, rule[INDEX][rule[LINENO]]) | Implements @include, for @mixins | Below is the the instruction that describes the task:
### Input:
Implements @include, for @mixins
### Response:
def _do_include(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
"""
Implements @include, for @mixins
"""
funct, params, _ = name.partition('(')
funct = funct.strip()
funct = self.do_glob_math(
funct, rule[CONTEXT], rule[OPTIONS], rule, True)
params = split_params(depar(params + _))
new_params = {}
num_args = 0
for param in params:
varname, _, param = param.partition(':')
if param:
param = param.strip()
varname = varname.strip()
else:
param = varname.strip()
varname = num_args
if param:
num_args += 1
if param:
new_params[varname] = param
mixin = rule[OPTIONS].get('@mixin %s:%s' % (funct, num_args))
if not mixin:
# Fallback to single parmeter:
mixin = rule[OPTIONS].get('@mixin %s:1' % (funct,))
if mixin and all(map(lambda o: isinstance(o, int), new_params.keys())):
new_params = {0: ', '.join(new_params.values())}
if mixin:
m_params = mixin[0]
m_vars = mixin[1].copy()
m_codestr = mixin[2]
for varname, value in new_params.items():
try:
m_param = m_params[varname]
except:
m_param = varname
value = self.calculate(
value, rule[CONTEXT], rule[OPTIONS], rule)
m_vars[m_param] = value
for p in m_vars:
if p not in new_params:
if isinstance(m_vars[p], basestring):
value = self.calculate(
m_vars[p], m_vars, rule[OPTIONS], rule)
m_vars[p] = value
_context = rule[CONTEXT].copy()
_context.update(m_vars)
_rule = spawn_rule(
rule, codestr=m_codestr, context=_context, lineno=c_lineno)
self.manage_children(
_rule, p_selectors, p_parents, p_children, scope, media)
else:
log.error("Required mixin not found: %s:%d (%s)",
funct, num_args, rule[INDEX][rule[LINENO]]) |
def warn(msg, level=0, prefix=True):
"""Prints the specified message as a warning; prepends "WARNING" to
the message, so that can be left off.
"""
if will_print(level):
printer(("WARNING: " if prefix else "") + msg, "yellow") | Prints the specified message as a warning; prepends "WARNING" to
the message, so that can be left off. | Below is the the instruction that describes the task:
### Input:
Prints the specified message as a warning; prepends "WARNING" to
the message, so that can be left off.
### Response:
def warn(msg, level=0, prefix=True):
"""Prints the specified message as a warning; prepends "WARNING" to
the message, so that can be left off.
"""
if will_print(level):
printer(("WARNING: " if prefix else "") + msg, "yellow") |
def list_to_raw_list(poselist):
"""
Flatten a normal pose list into a raw list
:param poselist: a formatted list [[x,y,z], [x,y,z,w]]
:return: a raw list [x, y, z, x, y, z, w]
"""
if not (isinstance(poselist, list) or isinstance(poselist, tuple)):
raise TypeError(
"flatten_pose({}) does not accept this type of argument".format(
str(type(poselist))))
return [field for pose in poselist for field in pose] | Flatten a normal pose list into a raw list
:param poselist: a formatted list [[x,y,z], [x,y,z,w]]
:return: a raw list [x, y, z, x, y, z, w] | Below is the the instruction that describes the task:
### Input:
Flatten a normal pose list into a raw list
:param poselist: a formatted list [[x,y,z], [x,y,z,w]]
:return: a raw list [x, y, z, x, y, z, w]
### Response:
def list_to_raw_list(poselist):
"""
Flatten a normal pose list into a raw list
:param poselist: a formatted list [[x,y,z], [x,y,z,w]]
:return: a raw list [x, y, z, x, y, z, w]
"""
if not (isinstance(poselist, list) or isinstance(poselist, tuple)):
raise TypeError(
"flatten_pose({}) does not accept this type of argument".format(
str(type(poselist))))
return [field for pose in poselist for field in pose] |
def p_function_statement(self, p):
'function_statement : funcvardecls function_calc'
p[0] = p[1] + (p[2],)
p.set_lineno(0, p.lineno(1)) | function_statement : funcvardecls function_calc | Below is the the instruction that describes the task:
### Input:
function_statement : funcvardecls function_calc
### Response:
def p_function_statement(self, p):
'function_statement : funcvardecls function_calc'
p[0] = p[1] + (p[2],)
p.set_lineno(0, p.lineno(1)) |
def stage_platform_hpp(capnproot):
"""stage platform.hpp into libcapnp sources
Tries ./configure first (except on Windows),
then falls back on included platform.hpp previously generated.
"""
platform_hpp = pjoin(capnproot, 'src', 'platform.hpp')
if os.path.exists(platform_hpp):
info("already have platform.hpp")
return
if os.name == 'nt':
# stage msvc platform header
platform_dir = pjoin(capnproot, 'builds', 'msvc')
else:
info("attempting ./configure to generate platform.hpp")
p = Popen('./configure', cwd=capnproot, shell=True,
stdout=PIPE, stderr=PIPE,
)
o,e = p.communicate()
if p.returncode:
warn("failed to configure libcapnp:\n%s" % e)
if sys.platform == 'darwin':
platform_dir = pjoin(HERE, 'include_darwin')
elif sys.platform.startswith('freebsd'):
platform_dir = pjoin(HERE, 'include_freebsd')
elif sys.platform.startswith('linux-armv'):
platform_dir = pjoin(HERE, 'include_linux-armv')
else:
platform_dir = pjoin(HERE, 'include_linux')
else:
return
info("staging platform.hpp from: %s" % platform_dir)
shutil.copy(pjoin(platform_dir, 'platform.hpp'), platform_hpp) | stage platform.hpp into libcapnp sources
Tries ./configure first (except on Windows),
then falls back on included platform.hpp previously generated. | Below is the the instruction that describes the task:
### Input:
stage platform.hpp into libcapnp sources
Tries ./configure first (except on Windows),
then falls back on included platform.hpp previously generated.
### Response:
def stage_platform_hpp(capnproot):
"""stage platform.hpp into libcapnp sources
Tries ./configure first (except on Windows),
then falls back on included platform.hpp previously generated.
"""
platform_hpp = pjoin(capnproot, 'src', 'platform.hpp')
if os.path.exists(platform_hpp):
info("already have platform.hpp")
return
if os.name == 'nt':
# stage msvc platform header
platform_dir = pjoin(capnproot, 'builds', 'msvc')
else:
info("attempting ./configure to generate platform.hpp")
p = Popen('./configure', cwd=capnproot, shell=True,
stdout=PIPE, stderr=PIPE,
)
o,e = p.communicate()
if p.returncode:
warn("failed to configure libcapnp:\n%s" % e)
if sys.platform == 'darwin':
platform_dir = pjoin(HERE, 'include_darwin')
elif sys.platform.startswith('freebsd'):
platform_dir = pjoin(HERE, 'include_freebsd')
elif sys.platform.startswith('linux-armv'):
platform_dir = pjoin(HERE, 'include_linux-armv')
else:
platform_dir = pjoin(HERE, 'include_linux')
else:
return
info("staging platform.hpp from: %s" % platform_dir)
shutil.copy(pjoin(platform_dir, 'platform.hpp'), platform_hpp) |
def tables(self):
"""
A list containing the tables in this container, in document order.
Read-only.
"""
from .table import Table
return [Table(tbl, self) for tbl in self._element.tbl_lst] | A list containing the tables in this container, in document order.
Read-only. | Below is the the instruction that describes the task:
### Input:
A list containing the tables in this container, in document order.
Read-only.
### Response:
def tables(self):
"""
A list containing the tables in this container, in document order.
Read-only.
"""
from .table import Table
return [Table(tbl, self) for tbl in self._element.tbl_lst] |
def play_tour(self, name=None, interval=0):
""" Plays a tour on the current website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
interval - The delay time between autoplaying tour steps.
If set to 0 (default), the tour is fully manual control.
"""
if self.headless:
return # Tours should not run in headless mode.
if not name:
name = "default"
if name not in self._tour_steps:
raise Exception("Tour {%s} does not exist!" % name)
if "Bootstrap" in self._tour_steps[name][0]:
tour_helper.play_bootstrap_tour(
self.driver, self._tour_steps, self.browser,
self.message_duration, name=name, interval=interval)
elif "Hopscotch" in self._tour_steps[name][0]:
tour_helper.play_hopscotch_tour(
self.driver, self._tour_steps, self.browser,
self.message_duration, name=name, interval=interval)
elif "IntroJS" in self._tour_steps[name][0]:
tour_helper.play_introjs_tour(
self.driver, self._tour_steps, self.browser,
self.message_duration, name=name, interval=interval)
else:
# "Shepherd"
tour_helper.play_shepherd_tour(
self.driver, self._tour_steps,
self.message_duration, name=name, interval=interval) | Plays a tour on the current website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
interval - The delay time between autoplaying tour steps.
If set to 0 (default), the tour is fully manual control. | Below is the the instruction that describes the task:
### Input:
Plays a tour on the current website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
interval - The delay time between autoplaying tour steps.
If set to 0 (default), the tour is fully manual control.
### Response:
def play_tour(self, name=None, interval=0):
""" Plays a tour on the current website.
@Params
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
interval - The delay time between autoplaying tour steps.
If set to 0 (default), the tour is fully manual control.
"""
if self.headless:
return # Tours should not run in headless mode.
if not name:
name = "default"
if name not in self._tour_steps:
raise Exception("Tour {%s} does not exist!" % name)
if "Bootstrap" in self._tour_steps[name][0]:
tour_helper.play_bootstrap_tour(
self.driver, self._tour_steps, self.browser,
self.message_duration, name=name, interval=interval)
elif "Hopscotch" in self._tour_steps[name][0]:
tour_helper.play_hopscotch_tour(
self.driver, self._tour_steps, self.browser,
self.message_duration, name=name, interval=interval)
elif "IntroJS" in self._tour_steps[name][0]:
tour_helper.play_introjs_tour(
self.driver, self._tour_steps, self.browser,
self.message_duration, name=name, interval=interval)
else:
# "Shepherd"
tour_helper.play_shepherd_tour(
self.driver, self._tour_steps,
self.message_duration, name=name, interval=interval) |
def print_debug(self, text, indent=0):
"""Only prints debug info on screen when self.debug == True."""
if self.debug:
if indent > 0:
print(" "*self.debug, text)
self.debug += indent
if indent <= 0:
print(" "*self.debug, text) | Only prints debug info on screen when self.debug == True. | Below is the the instruction that describes the task:
### Input:
Only prints debug info on screen when self.debug == True.
### Response:
def print_debug(self, text, indent=0):
"""Only prints debug info on screen when self.debug == True."""
if self.debug:
if indent > 0:
print(" "*self.debug, text)
self.debug += indent
if indent <= 0:
print(" "*self.debug, text) |
def arr_base10toN(anum10, aradix, *args):
"""
ARGS
anum10 in number in base 10
aradix in convert <anum10> to number in base
+ <aradix>
OPTIONAL
forcelength in if nonzero, indicates the length
+ of the return array. Useful if
+ array needs to be zero padded.
DESC
Converts a scalar from base 10 to base radix. Return
an array.
"""
new_num_arr = array(())
current = anum10
while current != 0:
remainder = current % aradix
new_num_arr = r_[remainder, new_num_arr]
current = current / aradix
forcelength = new_num_arr.size
# Optionally, allow user to specify word length
if len(args): forcelength = args[0]
while new_num_arr.size < forcelength:
new_num_arr = r_[0, new_num_arr]
return new_num_arr | ARGS
anum10 in number in base 10
aradix in convert <anum10> to number in base
+ <aradix>
OPTIONAL
forcelength in if nonzero, indicates the length
+ of the return array. Useful if
+ array needs to be zero padded.
DESC
Converts a scalar from base 10 to base radix. Return
an array. | Below is the the instruction that describes the task:
### Input:
ARGS
anum10 in number in base 10
aradix in convert <anum10> to number in base
+ <aradix>
OPTIONAL
forcelength in if nonzero, indicates the length
+ of the return array. Useful if
+ array needs to be zero padded.
DESC
Converts a scalar from base 10 to base radix. Return
an array.
### Response:
def arr_base10toN(anum10, aradix, *args):
"""
ARGS
anum10 in number in base 10
aradix in convert <anum10> to number in base
+ <aradix>
OPTIONAL
forcelength in if nonzero, indicates the length
+ of the return array. Useful if
+ array needs to be zero padded.
DESC
Converts a scalar from base 10 to base radix. Return
an array.
"""
new_num_arr = array(())
current = anum10
while current != 0:
remainder = current % aradix
new_num_arr = r_[remainder, new_num_arr]
current = current / aradix
forcelength = new_num_arr.size
# Optionally, allow user to specify word length
if len(args): forcelength = args[0]
while new_num_arr.size < forcelength:
new_num_arr = r_[0, new_num_arr]
return new_num_arr |
def list_nodes_full(mask='mask[id]', call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
ret = {}
conn = get_conn(service='SoftLayer_Account')
response = conn.getVirtualGuests()
for node_id in response:
hostname = node_id['hostname']
ret[hostname] = node_id
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret | Return a list of the VMs that are on the provider | Below is the the instruction that describes the task:
### Input:
Return a list of the VMs that are on the provider
### Response:
def list_nodes_full(mask='mask[id]', call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
ret = {}
conn = get_conn(service='SoftLayer_Account')
response = conn.getVirtualGuests()
for node_id in response:
hostname = node_id['hostname']
ret[hostname] = node_id
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret |
def read_csvlc(lcfile):
'''This reads a HAT data server or LCC-Server produced CSV light curve
into an lcdict.
This will automatically figure out the format of the file
provided. Currently, it can read:
- legacy HAT data server CSV LCs (e.g. from
https://hatsouth.org/planets/lightcurves.html) with an extension of the
form: `.hatlc.csv.gz`.
- all LCC-Server produced LCC-CSV-V1 LCs (e.g. from
https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`.
Parameters
----------
lcfile : str
The light curve file to read.
Returns
-------
dict
Returns an lcdict that can be read and used by many astrobase processing
functions.
'''
# read in the file and split by lines
if '.gz' in os.path.basename(lcfile):
LOGINFO('reading gzipped HATLC: %s' % lcfile)
infd = gzip.open(lcfile,'rb')
else:
LOGINFO('reading HATLC: %s' % lcfile)
infd = open(lcfile,'rb')
# this transparently reads LCC CSVLCs
lcformat_check = infd.read(12).decode()
if 'LCC-CSVLC' in lcformat_check:
infd.close()
return read_lcc_csvlc(lcfile)
else:
infd.seek(0)
# below is reading the HATLC v2 CSV LCs
lctext = infd.read().decode() # argh Python 3
infd.close()
# figure out the header and get the LC columns
lcstart = lctext.index('# LIGHTCURVE\n')
lcheader = lctext[:lcstart+12]
lccolumns = lctext[lcstart+13:].split('\n')
lccolumns = [x for x in lccolumns if len(x) > 0]
# initialize the lcdict and parse the CSV header
lcdict = _parse_csv_header(lcheader)
# tranpose the LC rows into columns
lccolumns = [x.split(',') for x in lccolumns]
lccolumns = list(zip(*lccolumns)) # argh more Python 3
# write the columns to the dict
for colind, col in enumerate(lcdict['columns']):
if (col.split('_')[0] in LC_MAG_COLUMNS or
col.split('_')[0] in LC_ERR_COLUMNS or
col.split('_')[0] in LC_FLAG_COLUMNS):
lcdict[col] = np.array([_smartcast(x,
COLUMNDEFS[col.split('_')[0]][2])
for x in lccolumns[colind]])
elif col in COLUMNDEFS:
lcdict[col] = np.array([_smartcast(x,COLUMNDEFS[col][2])
for x in lccolumns[colind]])
else:
LOGWARNING('lcdict col %s has no formatter available' % col)
continue
return lcdict | This reads a HAT data server or LCC-Server produced CSV light curve
into an lcdict.
This will automatically figure out the format of the file
provided. Currently, it can read:
- legacy HAT data server CSV LCs (e.g. from
https://hatsouth.org/planets/lightcurves.html) with an extension of the
form: `.hatlc.csv.gz`.
- all LCC-Server produced LCC-CSV-V1 LCs (e.g. from
https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`.
Parameters
----------
lcfile : str
The light curve file to read.
Returns
-------
dict
Returns an lcdict that can be read and used by many astrobase processing
functions. | Below is the the instruction that describes the task:
### Input:
This reads a HAT data server or LCC-Server produced CSV light curve
into an lcdict.
This will automatically figure out the format of the file
provided. Currently, it can read:
- legacy HAT data server CSV LCs (e.g. from
https://hatsouth.org/planets/lightcurves.html) with an extension of the
form: `.hatlc.csv.gz`.
- all LCC-Server produced LCC-CSV-V1 LCs (e.g. from
https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`.
Parameters
----------
lcfile : str
The light curve file to read.
Returns
-------
dict
Returns an lcdict that can be read and used by many astrobase processing
functions.
### Response:
def read_csvlc(lcfile):
'''This reads a HAT data server or LCC-Server produced CSV light curve
into an lcdict.
This will automatically figure out the format of the file
provided. Currently, it can read:
- legacy HAT data server CSV LCs (e.g. from
https://hatsouth.org/planets/lightcurves.html) with an extension of the
form: `.hatlc.csv.gz`.
- all LCC-Server produced LCC-CSV-V1 LCs (e.g. from
https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`.
Parameters
----------
lcfile : str
The light curve file to read.
Returns
-------
dict
Returns an lcdict that can be read and used by many astrobase processing
functions.
'''
# read in the file and split by lines
if '.gz' in os.path.basename(lcfile):
LOGINFO('reading gzipped HATLC: %s' % lcfile)
infd = gzip.open(lcfile,'rb')
else:
LOGINFO('reading HATLC: %s' % lcfile)
infd = open(lcfile,'rb')
# this transparently reads LCC CSVLCs
lcformat_check = infd.read(12).decode()
if 'LCC-CSVLC' in lcformat_check:
infd.close()
return read_lcc_csvlc(lcfile)
else:
infd.seek(0)
# below is reading the HATLC v2 CSV LCs
lctext = infd.read().decode() # argh Python 3
infd.close()
# figure out the header and get the LC columns
lcstart = lctext.index('# LIGHTCURVE\n')
lcheader = lctext[:lcstart+12]
lccolumns = lctext[lcstart+13:].split('\n')
lccolumns = [x for x in lccolumns if len(x) > 0]
# initialize the lcdict and parse the CSV header
lcdict = _parse_csv_header(lcheader)
# tranpose the LC rows into columns
lccolumns = [x.split(',') for x in lccolumns]
lccolumns = list(zip(*lccolumns)) # argh more Python 3
# write the columns to the dict
for colind, col in enumerate(lcdict['columns']):
if (col.split('_')[0] in LC_MAG_COLUMNS or
col.split('_')[0] in LC_ERR_COLUMNS or
col.split('_')[0] in LC_FLAG_COLUMNS):
lcdict[col] = np.array([_smartcast(x,
COLUMNDEFS[col.split('_')[0]][2])
for x in lccolumns[colind]])
elif col in COLUMNDEFS:
lcdict[col] = np.array([_smartcast(x,COLUMNDEFS[col][2])
for x in lccolumns[colind]])
else:
LOGWARNING('lcdict col %s has no formatter available' % col)
continue
return lcdict |
def edit_protection(self, strict=github.GithubObject.NotSet, contexts=github.GithubObject.NotSet, enforce_admins=github.GithubObject.NotSet, dismissal_users=github.GithubObject.NotSet, dismissal_teams=github.GithubObject.NotSet, dismiss_stale_reviews=github.GithubObject.NotSet, require_code_owner_reviews=github.GithubObject.NotSet, required_approving_review_count=github.GithubObject.NotSet, user_push_restrictions=github.GithubObject.NotSet, team_push_restrictions=github.GithubObject.NotSet):
"""
:calls: `PUT /repos/:owner/:repo/branches/:branch/protection <https://developer.github.com/v3/repos/branches>`_
:strict: bool
:contexts: list of strings
:enforce_admins: bool
:dismissal_users: list of strings
:dismissal_teams: list of strings
:dismiss_stale_reviews: bool
:require_code_owner_reviews: bool
:required_approving_review_count: int
:user_push_restrictions: list of strings
:team_push_restrictions: list of strings
NOTE: The GitHub API groups strict and contexts together, both must
be submitted. Take care to pass both as arguments even if only one is
changing. Use edit_required_status_checks() to avoid this.
"""
assert strict is github.GithubObject.NotSet or isinstance(strict, bool), strict
assert contexts is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in contexts), contexts
assert enforce_admins is github.GithubObject.NotSet or isinstance(enforce_admins, bool), enforce_admins
assert dismissal_users is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in dismissal_users), dismissal_users
assert dismissal_teams is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in dismissal_teams), dismissal_teams
assert dismiss_stale_reviews is github.GithubObject.NotSet or isinstance(dismiss_stale_reviews, bool), dismiss_stale_reviews
assert require_code_owner_reviews is github.GithubObject.NotSet or isinstance(require_code_owner_reviews, bool), require_code_owner_reviews
assert required_approving_review_count is github.GithubObject.NotSet or isinstance(required_approving_review_count, int), required_approving_review_count
post_parameters = {}
if strict is not github.GithubObject.NotSet or contexts is not github.GithubObject.NotSet:
if strict is github.GithubObject.NotSet:
strict = False
if contexts is github.GithubObject.NotSet:
contexts = []
post_parameters["required_status_checks"] = {"strict": strict, "contexts": contexts}
else:
post_parameters["required_status_checks"] = None
if enforce_admins is not github.GithubObject.NotSet:
post_parameters["enforce_admins"] = enforce_admins
else:
post_parameters["enforce_admins"] = None
if dismissal_users is not github.GithubObject.NotSet or dismissal_teams is not github.GithubObject.NotSet or dismiss_stale_reviews is not github.GithubObject.NotSet or require_code_owner_reviews is not github.GithubObject.NotSet or required_approving_review_count is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"] = {}
if dismiss_stale_reviews is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"]["dismiss_stale_reviews"] = dismiss_stale_reviews
if require_code_owner_reviews is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"]["require_code_owner_reviews"] = require_code_owner_reviews
if required_approving_review_count is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"]["required_approving_review_count"] = required_approving_review_count
if dismissal_users is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"]["dismissal_restrictions"] = {"users": dismissal_users}
if dismissal_teams is not github.GithubObject.NotSet:
if "dismissal_restrictions" not in post_parameters["required_pull_request_reviews"]:
post_parameters["required_pull_request_reviews"]["dismissal_restrictions"] = {}
post_parameters["required_pull_request_reviews"]["dismissal_restrictions"]["teams"] = dismissal_teams
else:
post_parameters["required_pull_request_reviews"] = None
if user_push_restrictions is not github.GithubObject.NotSet or team_push_restrictions is not github.GithubObject.NotSet:
if user_push_restrictions is github.GithubObject.NotSet:
user_push_restrictions = []
if team_push_restrictions is github.GithubObject.NotSet:
team_push_restrictions = []
post_parameters["restrictions"] = {"users": user_push_restrictions, "teams": team_push_restrictions}
else:
post_parameters["restrictions"] = None
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.protection_url,
headers={'Accept': Consts.mediaTypeRequireMultipleApprovingReviews},
input=post_parameters
) | :calls: `PUT /repos/:owner/:repo/branches/:branch/protection <https://developer.github.com/v3/repos/branches>`_
:strict: bool
:contexts: list of strings
:enforce_admins: bool
:dismissal_users: list of strings
:dismissal_teams: list of strings
:dismiss_stale_reviews: bool
:require_code_owner_reviews: bool
:required_approving_review_count: int
:user_push_restrictions: list of strings
:team_push_restrictions: list of strings
NOTE: The GitHub API groups strict and contexts together, both must
be submitted. Take care to pass both as arguments even if only one is
changing. Use edit_required_status_checks() to avoid this. | Below is the the instruction that describes the task:
### Input:
:calls: `PUT /repos/:owner/:repo/branches/:branch/protection <https://developer.github.com/v3/repos/branches>`_
:strict: bool
:contexts: list of strings
:enforce_admins: bool
:dismissal_users: list of strings
:dismissal_teams: list of strings
:dismiss_stale_reviews: bool
:require_code_owner_reviews: bool
:required_approving_review_count: int
:user_push_restrictions: list of strings
:team_push_restrictions: list of strings
NOTE: The GitHub API groups strict and contexts together, both must
be submitted. Take care to pass both as arguments even if only one is
changing. Use edit_required_status_checks() to avoid this.
### Response:
def edit_protection(self, strict=github.GithubObject.NotSet, contexts=github.GithubObject.NotSet, enforce_admins=github.GithubObject.NotSet, dismissal_users=github.GithubObject.NotSet, dismissal_teams=github.GithubObject.NotSet, dismiss_stale_reviews=github.GithubObject.NotSet, require_code_owner_reviews=github.GithubObject.NotSet, required_approving_review_count=github.GithubObject.NotSet, user_push_restrictions=github.GithubObject.NotSet, team_push_restrictions=github.GithubObject.NotSet):
"""
:calls: `PUT /repos/:owner/:repo/branches/:branch/protection <https://developer.github.com/v3/repos/branches>`_
:strict: bool
:contexts: list of strings
:enforce_admins: bool
:dismissal_users: list of strings
:dismissal_teams: list of strings
:dismiss_stale_reviews: bool
:require_code_owner_reviews: bool
:required_approving_review_count: int
:user_push_restrictions: list of strings
:team_push_restrictions: list of strings
NOTE: The GitHub API groups strict and contexts together, both must
be submitted. Take care to pass both as arguments even if only one is
changing. Use edit_required_status_checks() to avoid this.
"""
assert strict is github.GithubObject.NotSet or isinstance(strict, bool), strict
assert contexts is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in contexts), contexts
assert enforce_admins is github.GithubObject.NotSet or isinstance(enforce_admins, bool), enforce_admins
assert dismissal_users is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in dismissal_users), dismissal_users
assert dismissal_teams is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in dismissal_teams), dismissal_teams
assert dismiss_stale_reviews is github.GithubObject.NotSet or isinstance(dismiss_stale_reviews, bool), dismiss_stale_reviews
assert require_code_owner_reviews is github.GithubObject.NotSet or isinstance(require_code_owner_reviews, bool), require_code_owner_reviews
assert required_approving_review_count is github.GithubObject.NotSet or isinstance(required_approving_review_count, int), required_approving_review_count
post_parameters = {}
if strict is not github.GithubObject.NotSet or contexts is not github.GithubObject.NotSet:
if strict is github.GithubObject.NotSet:
strict = False
if contexts is github.GithubObject.NotSet:
contexts = []
post_parameters["required_status_checks"] = {"strict": strict, "contexts": contexts}
else:
post_parameters["required_status_checks"] = None
if enforce_admins is not github.GithubObject.NotSet:
post_parameters["enforce_admins"] = enforce_admins
else:
post_parameters["enforce_admins"] = None
if dismissal_users is not github.GithubObject.NotSet or dismissal_teams is not github.GithubObject.NotSet or dismiss_stale_reviews is not github.GithubObject.NotSet or require_code_owner_reviews is not github.GithubObject.NotSet or required_approving_review_count is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"] = {}
if dismiss_stale_reviews is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"]["dismiss_stale_reviews"] = dismiss_stale_reviews
if require_code_owner_reviews is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"]["require_code_owner_reviews"] = require_code_owner_reviews
if required_approving_review_count is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"]["required_approving_review_count"] = required_approving_review_count
if dismissal_users is not github.GithubObject.NotSet:
post_parameters["required_pull_request_reviews"]["dismissal_restrictions"] = {"users": dismissal_users}
if dismissal_teams is not github.GithubObject.NotSet:
if "dismissal_restrictions" not in post_parameters["required_pull_request_reviews"]:
post_parameters["required_pull_request_reviews"]["dismissal_restrictions"] = {}
post_parameters["required_pull_request_reviews"]["dismissal_restrictions"]["teams"] = dismissal_teams
else:
post_parameters["required_pull_request_reviews"] = None
if user_push_restrictions is not github.GithubObject.NotSet or team_push_restrictions is not github.GithubObject.NotSet:
if user_push_restrictions is github.GithubObject.NotSet:
user_push_restrictions = []
if team_push_restrictions is github.GithubObject.NotSet:
team_push_restrictions = []
post_parameters["restrictions"] = {"users": user_push_restrictions, "teams": team_push_restrictions}
else:
post_parameters["restrictions"] = None
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.protection_url,
headers={'Accept': Consts.mediaTypeRequireMultipleApprovingReviews},
input=post_parameters
) |
def append_tag(self, field_number, wire_type):
"""Appends a tag containing field number and wire type information."""
self._stream.append_var_uint32(wire_format.pack_tag(field_number, wire_type)) | Appends a tag containing field number and wire type information. | Below is the the instruction that describes the task:
### Input:
Appends a tag containing field number and wire type information.
### Response:
def append_tag(self, field_number, wire_type):
"""Appends a tag containing field number and wire type information."""
self._stream.append_var_uint32(wire_format.pack_tag(field_number, wire_type)) |
def get_app_status(system_key):
"""
Get Undergraduate application status
@return ApplicationStatus object
@InvalidSystemKey if system_key is not valid
"""
if invalid_system_key(system_key):
raise InvalidSystemKey(
"Invalid system key in get_app_status({})".format(system_key))
url = get_appstatus_url(system_key)
response = DAO.getURL(url, {})
response_data = str(response.data)
if response.status != 200:
raise DataFailureException(url, response.status, response_data)
if len(response.data) == 0:
is_cached = (type(response) == restclients_core.models.MockHttp)
raise Exception(
"{} Unexpected Response Data: {}, from cache: {}".format(
url, response_data, str(is_cached)))
status = parse_statuses(response_data)
return status | Get Undergraduate application status
@return ApplicationStatus object
@InvalidSystemKey if system_key is not valid | Below is the the instruction that describes the task:
### Input:
Get Undergraduate application status
@return ApplicationStatus object
@InvalidSystemKey if system_key is not valid
### Response:
def get_app_status(system_key):
"""
Get Undergraduate application status
@return ApplicationStatus object
@InvalidSystemKey if system_key is not valid
"""
if invalid_system_key(system_key):
raise InvalidSystemKey(
"Invalid system key in get_app_status({})".format(system_key))
url = get_appstatus_url(system_key)
response = DAO.getURL(url, {})
response_data = str(response.data)
if response.status != 200:
raise DataFailureException(url, response.status, response_data)
if len(response.data) == 0:
is_cached = (type(response) == restclients_core.models.MockHttp)
raise Exception(
"{} Unexpected Response Data: {}, from cache: {}".format(
url, response_data, str(is_cached)))
status = parse_statuses(response_data)
return status |
def start(self, auto_register=True):
"""
Tells the container to start this agent.
It returns a coroutine or a future depending on whether it is called from a coroutine or a synchronous method.
Args:
auto_register (bool): register the agent in the server (Default value = True)
"""
return self.container.start_agent(agent=self, auto_register=auto_register) | Tells the container to start this agent.
It returns a coroutine or a future depending on whether it is called from a coroutine or a synchronous method.
Args:
auto_register (bool): register the agent in the server (Default value = True) | Below is the the instruction that describes the task:
### Input:
Tells the container to start this agent.
It returns a coroutine or a future depending on whether it is called from a coroutine or a synchronous method.
Args:
auto_register (bool): register the agent in the server (Default value = True)
### Response:
def start(self, auto_register=True):
"""
Tells the container to start this agent.
It returns a coroutine or a future depending on whether it is called from a coroutine or a synchronous method.
Args:
auto_register (bool): register the agent in the server (Default value = True)
"""
return self.container.start_agent(agent=self, auto_register=auto_register) |
def apply(modifications, state):
""" applies modifications to given state
Parameters
----------
modifications: list of tuples
created by this class.list method.
state: dict
state dictionary
"""
count = 0
for a in modifications:
if _debug:
assert a[0] in ('set', 'mv', 'map', 'rm')
logger.debug("processing rule: %s", str(a))
if len(a) == 3:
operation, name, value = a
if operation == 'set':
state[name] = value
count += 1
elif operation == 'mv':
try:
arg = state.pop(name)
state[value] = arg
count += 1
except KeyError:
raise ClassVersionException("the previous version didn't "
"store an attribute named '{}'".format(a[1]))
elif operation == 'map':
func = value
if hasattr(func, '__func__'):
func = func.__func__
assert callable(func)
state[name] = func(state[name])
count += 1
elif len(a) == 2:
operation, value = a
if operation == 'rm':
state.pop(value, None)
count += 1
elif operation == 'transform':
assert callable(value)
value(state)
count += 1
assert count == len(modifications), 'was not able to process all modifications on state' | applies modifications to given state
Parameters
----------
modifications: list of tuples
created by this class.list method.
state: dict
state dictionary | Below is the the instruction that describes the task:
### Input:
applies modifications to given state
Parameters
----------
modifications: list of tuples
created by this class.list method.
state: dict
state dictionary
### Response:
def apply(modifications, state):
""" applies modifications to given state
Parameters
----------
modifications: list of tuples
created by this class.list method.
state: dict
state dictionary
"""
count = 0
for a in modifications:
if _debug:
assert a[0] in ('set', 'mv', 'map', 'rm')
logger.debug("processing rule: %s", str(a))
if len(a) == 3:
operation, name, value = a
if operation == 'set':
state[name] = value
count += 1
elif operation == 'mv':
try:
arg = state.pop(name)
state[value] = arg
count += 1
except KeyError:
raise ClassVersionException("the previous version didn't "
"store an attribute named '{}'".format(a[1]))
elif operation == 'map':
func = value
if hasattr(func, '__func__'):
func = func.__func__
assert callable(func)
state[name] = func(state[name])
count += 1
elif len(a) == 2:
operation, value = a
if operation == 'rm':
state.pop(value, None)
count += 1
elif operation == 'transform':
assert callable(value)
value(state)
count += 1
assert count == len(modifications), 'was not able to process all modifications on state' |
def min(self):
"""Return the minimum value in this histogram.
If there are no values in the histogram at all, return 10.
Returns:
int: The minimum value in the histogram.
"""
if len(self._data) == 0:
return 10
return next(iter(sorted(self._data.keys()))) | Return the minimum value in this histogram.
If there are no values in the histogram at all, return 10.
Returns:
int: The minimum value in the histogram. | Below is the the instruction that describes the task:
### Input:
Return the minimum value in this histogram.
If there are no values in the histogram at all, return 10.
Returns:
int: The minimum value in the histogram.
### Response:
def min(self):
"""Return the minimum value in this histogram.
If there are no values in the histogram at all, return 10.
Returns:
int: The minimum value in the histogram.
"""
if len(self._data) == 0:
return 10
return next(iter(sorted(self._data.keys()))) |
def _sign_input(cls, input_, message, key_pairs):
"""Signs a single Input.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
"""
if isinstance(input_.fulfillment, Ed25519Sha256):
return cls._sign_simple_signature_fulfillment(input_, message,
key_pairs)
elif isinstance(input_.fulfillment, ThresholdSha256):
return cls._sign_threshold_signature_fulfillment(input_, message,
key_pairs)
else:
raise ValueError("Fulfillment couldn't be matched to "
'Cryptocondition fulfillment type.') | Signs a single Input.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with. | Below is the the instruction that describes the task:
### Input:
Signs a single Input.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
### Response:
def _sign_input(cls, input_, message, key_pairs):
"""Signs a single Input.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256.
Args:
input_ (:class:`~bigchaindb.common.transaction.
Input`) The Input to be signed.
message (str): The message to be signed
key_pairs (dict): The keys to sign the Transaction with.
"""
if isinstance(input_.fulfillment, Ed25519Sha256):
return cls._sign_simple_signature_fulfillment(input_, message,
key_pairs)
elif isinstance(input_.fulfillment, ThresholdSha256):
return cls._sign_threshold_signature_fulfillment(input_, message,
key_pairs)
else:
raise ValueError("Fulfillment couldn't be matched to "
'Cryptocondition fulfillment type.') |
def from_euler_angles(alpha_beta_gamma, beta=None, gamma=None):
"""Improve your life drastically
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles naturally must be in radians for this to make any sense.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
alpha_beta_gamma: float or array of floats
This argument may either contain an array with last dimension of
size 3, where those three elements describe the (alpha, beta, gamma)
radian values for each rotation; or it may contain just the alpha
values, in which case the next two arguments must also be given.
beta: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and third arguments.
gamma: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and second arguments.
Returns
-------
R: quaternion array
The shape of this array will be the same as the input, except that
the last dimension will be removed.
"""
# Figure out the input angles from either type of input
if gamma is None:
alpha_beta_gamma = np.asarray(alpha_beta_gamma, dtype=np.double)
alpha = alpha_beta_gamma[..., 0]
beta = alpha_beta_gamma[..., 1]
gamma = alpha_beta_gamma[..., 2]
else:
alpha = np.asarray(alpha_beta_gamma, dtype=np.double)
beta = np.asarray(beta, dtype=np.double)
gamma = np.asarray(gamma, dtype=np.double)
# Set up the output array
R = np.empty(np.broadcast(alpha, beta, gamma).shape + (4,), dtype=np.double)
# Compute the actual values of the quaternion components
R[..., 0] = np.cos(beta/2)*np.cos((alpha+gamma)/2) # scalar quaternion components
R[..., 1] = -np.sin(beta/2)*np.sin((alpha-gamma)/2) # x quaternion components
R[..., 2] = np.sin(beta/2)*np.cos((alpha-gamma)/2) # y quaternion components
R[..., 3] = np.cos(beta/2)*np.sin((alpha+gamma)/2) # z quaternion components
return as_quat_array(R) | Improve your life drastically
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles naturally must be in radians for this to make any sense.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
alpha_beta_gamma: float or array of floats
This argument may either contain an array with last dimension of
size 3, where those three elements describe the (alpha, beta, gamma)
radian values for each rotation; or it may contain just the alpha
values, in which case the next two arguments must also be given.
beta: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and third arguments.
gamma: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and second arguments.
Returns
-------
R: quaternion array
The shape of this array will be the same as the input, except that
the last dimension will be removed. | Below is the the instruction that describes the task:
### Input:
Improve your life drastically
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles naturally must be in radians for this to make any sense.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
alpha_beta_gamma: float or array of floats
This argument may either contain an array with last dimension of
size 3, where those three elements describe the (alpha, beta, gamma)
radian values for each rotation; or it may contain just the alpha
values, in which case the next two arguments must also be given.
beta: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and third arguments.
gamma: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and second arguments.
Returns
-------
R: quaternion array
The shape of this array will be the same as the input, except that
the last dimension will be removed.
### Response:
def from_euler_angles(alpha_beta_gamma, beta=None, gamma=None):
"""Improve your life drastically
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles naturally must be in radians for this to make any sense.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
alpha_beta_gamma: float or array of floats
This argument may either contain an array with last dimension of
size 3, where those three elements describe the (alpha, beta, gamma)
radian values for each rotation; or it may contain just the alpha
values, in which case the next two arguments must also be given.
beta: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and third arguments.
gamma: None, float, or array of floats
If this array is given, it must be able to broadcast against the
first and second arguments.
Returns
-------
R: quaternion array
The shape of this array will be the same as the input, except that
the last dimension will be removed.
"""
# Figure out the input angles from either type of input
if gamma is None:
alpha_beta_gamma = np.asarray(alpha_beta_gamma, dtype=np.double)
alpha = alpha_beta_gamma[..., 0]
beta = alpha_beta_gamma[..., 1]
gamma = alpha_beta_gamma[..., 2]
else:
alpha = np.asarray(alpha_beta_gamma, dtype=np.double)
beta = np.asarray(beta, dtype=np.double)
gamma = np.asarray(gamma, dtype=np.double)
# Set up the output array
R = np.empty(np.broadcast(alpha, beta, gamma).shape + (4,), dtype=np.double)
# Compute the actual values of the quaternion components
R[..., 0] = np.cos(beta/2)*np.cos((alpha+gamma)/2) # scalar quaternion components
R[..., 1] = -np.sin(beta/2)*np.sin((alpha-gamma)/2) # x quaternion components
R[..., 2] = np.sin(beta/2)*np.cos((alpha-gamma)/2) # y quaternion components
R[..., 3] = np.cos(beta/2)*np.sin((alpha+gamma)/2) # z quaternion components
return as_quat_array(R) |
def check_pdb_status(pdbid):
"""Returns the status and up-to-date entry in the PDB for a given PDB ID"""
url = 'http://www.rcsb.org/pdb/rest/idStatus?structureId=%s' % pdbid
xmlf = urlopen(url)
xml = et.parse(xmlf)
xmlf.close()
status = None
current_pdbid = pdbid
for df in xml.xpath('//record'):
status = df.attrib['status'] # Status of an entry can be either 'UNKWOWN', 'OBSOLETE', or 'CURRENT'
if status == 'OBSOLETE':
current_pdbid = df.attrib['replacedBy'] # Contains the up-to-date PDB ID for obsolete entries
return [status, current_pdbid.lower()] | Returns the status and up-to-date entry in the PDB for a given PDB ID | Below is the the instruction that describes the task:
### Input:
Returns the status and up-to-date entry in the PDB for a given PDB ID
### Response:
def check_pdb_status(pdbid):
"""Returns the status and up-to-date entry in the PDB for a given PDB ID"""
url = 'http://www.rcsb.org/pdb/rest/idStatus?structureId=%s' % pdbid
xmlf = urlopen(url)
xml = et.parse(xmlf)
xmlf.close()
status = None
current_pdbid = pdbid
for df in xml.xpath('//record'):
status = df.attrib['status'] # Status of an entry can be either 'UNKWOWN', 'OBSOLETE', or 'CURRENT'
if status == 'OBSOLETE':
current_pdbid = df.attrib['replacedBy'] # Contains the up-to-date PDB ID for obsolete entries
return [status, current_pdbid.lower()] |
def search():
""" Search a movie on TMDB.
"""
redis_key = 's_%s' % request.args['query'].lower()
cached = redis_ro_conn.get(redis_key)
if cached:
return Response(cached)
else:
try:
found = get_on_tmdb(u'/search/movie', query=request.args['query'])
movies = []
for movie in found['results']:
cast = get_on_tmdb(u'/movie/%s/casts' % movie['id'])
year = datetime.strptime(movie['release_date'], '%Y-%m-%d').year if movie['release_date'] else None
movies.append({'title': movie['original_title'],
'directors': [x['name'] for x in cast['crew'] if x['department'] == 'Directing' and x['job'] == 'Director'],
'year': year,
'_tmdb_id': movie['id']})
except requests.HTTPError as err:
return Response('TMDB API error: %s' % str(err), status=err.response.status_code)
json_response = json.dumps({'movies': movies})
redis_conn.setex(redis_key, app.config['CACHE_TTL'], json_response)
return Response(json_response) | Search a movie on TMDB. | Below is the the instruction that describes the task:
### Input:
Search a movie on TMDB.
### Response:
def search():
""" Search a movie on TMDB.
"""
redis_key = 's_%s' % request.args['query'].lower()
cached = redis_ro_conn.get(redis_key)
if cached:
return Response(cached)
else:
try:
found = get_on_tmdb(u'/search/movie', query=request.args['query'])
movies = []
for movie in found['results']:
cast = get_on_tmdb(u'/movie/%s/casts' % movie['id'])
year = datetime.strptime(movie['release_date'], '%Y-%m-%d').year if movie['release_date'] else None
movies.append({'title': movie['original_title'],
'directors': [x['name'] for x in cast['crew'] if x['department'] == 'Directing' and x['job'] == 'Director'],
'year': year,
'_tmdb_id': movie['id']})
except requests.HTTPError as err:
return Response('TMDB API error: %s' % str(err), status=err.response.status_code)
json_response = json.dumps({'movies': movies})
redis_conn.setex(redis_key, app.config['CACHE_TTL'], json_response)
return Response(json_response) |
def custom_template_name(self):
"""
Returns the path for the custom special coverage template we want.
"""
base_path = getattr(settings, "CUSTOM_SPECIAL_COVERAGE_PATH", "special_coverage/custom")
if base_path is None:
base_path = ""
return "{0}/{1}_custom.html".format(
base_path, self.slug.replace("-", "_")
).lstrip("/") | Returns the path for the custom special coverage template we want. | Below is the the instruction that describes the task:
### Input:
Returns the path for the custom special coverage template we want.
### Response:
def custom_template_name(self):
"""
Returns the path for the custom special coverage template we want.
"""
base_path = getattr(settings, "CUSTOM_SPECIAL_COVERAGE_PATH", "special_coverage/custom")
if base_path is None:
base_path = ""
return "{0}/{1}_custom.html".format(
base_path, self.slug.replace("-", "_")
).lstrip("/") |
def evaluate(self, reference_scene_list, estimated_scene_list=None, estimated_scene_probabilities=None):
"""Evaluate file pair (reference and estimated)
Parameters
----------
reference_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Reference scene list.
Default value None
estimated_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Estimated scene list.
Default value None
estimated_scene_probabilities : dcase_util.containers.ProbabilityContainer
Estimated scene probabilities. Currently not used.
Default value None
Returns
-------
self
"""
if estimated_scene_list is None and estimated_scene_probabilities is None:
raise ValueError("Nothing to evaluate, give at least estimated_scene_list or estimated_scene_probabilities")
# Make sure reference_scene_list is dcase_util.containers.MetaDataContainer
if not isinstance(estimated_scene_list, dcase_util.containers.MetaDataContainer):
reference_scene_list = dcase_util.containers.MetaDataContainer(reference_scene_list)
# Make sure estimated_scene_list is dcase_util.containers.MetaDataContainer
if not isinstance(estimated_scene_list, dcase_util.containers.MetaDataContainer):
estimated_scene_list = dcase_util.containers.MetaDataContainer(estimated_scene_list)
# Make sure estimated_tag_probabilities is dcase_util.containers.ProbabilityContainer
if estimated_scene_probabilities is not None:
if not isinstance(estimated_scene_probabilities, dcase_util.containers.ProbabilityContainer):
estimated_scene_probabilities = dcase_util.containers.ProbabilityContainer(estimated_scene_probabilities)
# Translate "file" field to "filename"
for item in reference_scene_list:
if 'filename' not in item and 'file' in item:
item['filename'] = item['file']
for item in estimated_scene_list:
if 'filename' not in item and 'file' in item:
item['filename'] = item['file']
y_true = []
y_pred = []
for estimated_item in estimated_scene_list:
reference_item_matched = {}
for reference_item in reference_scene_list:
if estimated_item['filename'] == reference_item['filename']:
reference_item_matched = reference_item
break
if not reference_item_matched:
raise ValueError(
"Cannot find reference_item for estimated item [{item}]".format(item=estimated_item['file'])
)
y_true.append(reference_item_matched['scene_label'])
y_pred.append(estimated_item['scene_label'])
y_true = numpy.array(y_true)
y_pred = numpy.array(y_pred)
Ncorr_overall = 0
for scene_id, scene_label in enumerate(self.scene_label_list):
true_id = numpy.where(y_true == scene_label)[0]
pred_id = numpy.where(y_pred == scene_label)[0]
Ncorr = 0
for id in true_id:
if id in pred_id:
Ncorr += 1
Ncorr_overall += Ncorr
self.scene_wise[scene_label]['Ncorr'] += Ncorr
self.scene_wise[scene_label]['Nref'] += true_id.shape[0]
self.scene_wise[scene_label]['Nsys'] += pred_id.shape[0]
self.overall['Ncorr'] += Ncorr_overall
self.overall['Nref'] += y_true.shape[0]
self.overall['Nsys'] += y_pred.shape[0]
return self | Evaluate file pair (reference and estimated)
Parameters
----------
reference_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Reference scene list.
Default value None
estimated_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Estimated scene list.
Default value None
estimated_scene_probabilities : dcase_util.containers.ProbabilityContainer
Estimated scene probabilities. Currently not used.
Default value None
Returns
-------
self | Below is the the instruction that describes the task:
### Input:
Evaluate file pair (reference and estimated)
Parameters
----------
reference_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Reference scene list.
Default value None
estimated_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Estimated scene list.
Default value None
estimated_scene_probabilities : dcase_util.containers.ProbabilityContainer
Estimated scene probabilities. Currently not used.
Default value None
Returns
-------
self
### Response:
def evaluate(self, reference_scene_list, estimated_scene_list=None, estimated_scene_probabilities=None):
"""Evaluate file pair (reference and estimated)
Parameters
----------
reference_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Reference scene list.
Default value None
estimated_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Estimated scene list.
Default value None
estimated_scene_probabilities : dcase_util.containers.ProbabilityContainer
Estimated scene probabilities. Currently not used.
Default value None
Returns
-------
self
"""
if estimated_scene_list is None and estimated_scene_probabilities is None:
raise ValueError("Nothing to evaluate, give at least estimated_scene_list or estimated_scene_probabilities")
# Make sure reference_scene_list is dcase_util.containers.MetaDataContainer
if not isinstance(estimated_scene_list, dcase_util.containers.MetaDataContainer):
reference_scene_list = dcase_util.containers.MetaDataContainer(reference_scene_list)
# Make sure estimated_scene_list is dcase_util.containers.MetaDataContainer
if not isinstance(estimated_scene_list, dcase_util.containers.MetaDataContainer):
estimated_scene_list = dcase_util.containers.MetaDataContainer(estimated_scene_list)
# Make sure estimated_tag_probabilities is dcase_util.containers.ProbabilityContainer
if estimated_scene_probabilities is not None:
if not isinstance(estimated_scene_probabilities, dcase_util.containers.ProbabilityContainer):
estimated_scene_probabilities = dcase_util.containers.ProbabilityContainer(estimated_scene_probabilities)
# Translate "file" field to "filename"
for item in reference_scene_list:
if 'filename' not in item and 'file' in item:
item['filename'] = item['file']
for item in estimated_scene_list:
if 'filename' not in item and 'file' in item:
item['filename'] = item['file']
y_true = []
y_pred = []
for estimated_item in estimated_scene_list:
reference_item_matched = {}
for reference_item in reference_scene_list:
if estimated_item['filename'] == reference_item['filename']:
reference_item_matched = reference_item
break
if not reference_item_matched:
raise ValueError(
"Cannot find reference_item for estimated item [{item}]".format(item=estimated_item['file'])
)
y_true.append(reference_item_matched['scene_label'])
y_pred.append(estimated_item['scene_label'])
y_true = numpy.array(y_true)
y_pred = numpy.array(y_pred)
Ncorr_overall = 0
for scene_id, scene_label in enumerate(self.scene_label_list):
true_id = numpy.where(y_true == scene_label)[0]
pred_id = numpy.where(y_pred == scene_label)[0]
Ncorr = 0
for id in true_id:
if id in pred_id:
Ncorr += 1
Ncorr_overall += Ncorr
self.scene_wise[scene_label]['Ncorr'] += Ncorr
self.scene_wise[scene_label]['Nref'] += true_id.shape[0]
self.scene_wise[scene_label]['Nsys'] += pred_id.shape[0]
self.overall['Ncorr'] += Ncorr_overall
self.overall['Nref'] += y_true.shape[0]
self.overall['Nsys'] += y_pred.shape[0]
return self |
def evaluate(self, verbose=True, passes=None):
"""Summary
Returns:
TYPE: Description
"""
if self.is_pivot:
index, pivot, columns = LazyOpResult(
self.expr,
self.weld_type,
0
).evaluate(verbose=verbose, passes=passes)
df_dict = {}
for i, column_name in enumerate(columns):
df_dict[column_name] = pivot[i]
return DataFrameWeld(pd.DataFrame(df_dict, index=index))
else:
df = pd.DataFrame(columns=[])
weldvec_type_list = []
for type in self.column_types:
weldvec_type_list.append(WeldVec(type))
columns = LazyOpResult(
grizzly_impl.unzip_columns(
self.expr,
self.column_types
),
WeldStruct(weldvec_type_list),
0
).evaluate(verbose=verbose, passes=passes)
for i, column_name in enumerate(self.column_names):
df[column_name] = columns[i]
return DataFrameWeld(df) | Summary
Returns:
TYPE: Description | Below is the the instruction that describes the task:
### Input:
Summary
Returns:
TYPE: Description
### Response:
def evaluate(self, verbose=True, passes=None):
"""Summary
Returns:
TYPE: Description
"""
if self.is_pivot:
index, pivot, columns = LazyOpResult(
self.expr,
self.weld_type,
0
).evaluate(verbose=verbose, passes=passes)
df_dict = {}
for i, column_name in enumerate(columns):
df_dict[column_name] = pivot[i]
return DataFrameWeld(pd.DataFrame(df_dict, index=index))
else:
df = pd.DataFrame(columns=[])
weldvec_type_list = []
for type in self.column_types:
weldvec_type_list.append(WeldVec(type))
columns = LazyOpResult(
grizzly_impl.unzip_columns(
self.expr,
self.column_types
),
WeldStruct(weldvec_type_list),
0
).evaluate(verbose=verbose, passes=passes)
for i, column_name in enumerate(self.column_names):
df[column_name] = columns[i]
return DataFrameWeld(df) |
def write(url, content, **args):
"""Put an object into a ftp URL."""
with FTPResource(url, **args) as resource:
resource.write(content) | Put an object into a ftp URL. | Below is the the instruction that describes the task:
### Input:
Put an object into a ftp URL.
### Response:
def write(url, content, **args):
"""Put an object into a ftp URL."""
with FTPResource(url, **args) as resource:
resource.write(content) |
def emit_save_figure(self):
"""
Emit a signal when the toolbutton to save the figure is clicked.
"""
self.sig_save_figure.emit(self.canvas.fig, self.canvas.fmt) | Emit a signal when the toolbutton to save the figure is clicked. | Below is the the instruction that describes the task:
### Input:
Emit a signal when the toolbutton to save the figure is clicked.
### Response:
def emit_save_figure(self):
"""
Emit a signal when the toolbutton to save the figure is clicked.
"""
self.sig_save_figure.emit(self.canvas.fig, self.canvas.fmt) |
def use_comparative_log_entry_view(self):
"""Pass through to provider LogEntryLookupSession.use_comparative_log_entry_view"""
self._object_views['log_entry'] = COMPARATIVE
# self._get_provider_session('log_entry_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_log_entry_view()
except AttributeError:
pass | Pass through to provider LogEntryLookupSession.use_comparative_log_entry_view | Below is the the instruction that describes the task:
### Input:
Pass through to provider LogEntryLookupSession.use_comparative_log_entry_view
### Response:
def use_comparative_log_entry_view(self):
"""Pass through to provider LogEntryLookupSession.use_comparative_log_entry_view"""
self._object_views['log_entry'] = COMPARATIVE
# self._get_provider_session('log_entry_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_log_entry_view()
except AttributeError:
pass |
def get_weights(self):
"""
Returns:
dict: Model's trained weights.
"""
if self.is_trained() is False:
# print("Model not fitted yet. Use object.fit() to fit the model.")
return None
var_res = self._var_res
weights = self._var_res_to_weights(var_res)
# save to the side
weights["final_bias_fit"] = weights["final_bias"]
weights["feature_weights_fit"] = weights["feature_weights"]
return weights | Returns:
dict: Model's trained weights. | Below is the the instruction that describes the task:
### Input:
Returns:
dict: Model's trained weights.
### Response:
def get_weights(self):
"""
Returns:
dict: Model's trained weights.
"""
if self.is_trained() is False:
# print("Model not fitted yet. Use object.fit() to fit the model.")
return None
var_res = self._var_res
weights = self._var_res_to_weights(var_res)
# save to the side
weights["final_bias_fit"] = weights["final_bias"]
weights["feature_weights_fit"] = weights["feature_weights"]
return weights |
def use_plenary_objective_bank_view(self):
"""Pass through to provider ObjectiveObjectiveBankSession.use_plenary_objective_bank_view"""
self._objective_bank_view = PLENARY
# self._get_provider_session('objective_objective_bank_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_objective_bank_view()
except AttributeError:
pass | Pass through to provider ObjectiveObjectiveBankSession.use_plenary_objective_bank_view | Below is the the instruction that describes the task:
### Input:
Pass through to provider ObjectiveObjectiveBankSession.use_plenary_objective_bank_view
### Response:
def use_plenary_objective_bank_view(self):
"""Pass through to provider ObjectiveObjectiveBankSession.use_plenary_objective_bank_view"""
self._objective_bank_view = PLENARY
# self._get_provider_session('objective_objective_bank_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_objective_bank_view()
except AttributeError:
pass |
def update_params(self, param_d):
"""Update the attributes in self.obj that match the keys in
`param_d`.
"""
for param in self.paramlst:
if param.name in param_d:
value = param_d[param.name]
setattr(self.obj, param.name, value) | Update the attributes in self.obj that match the keys in
`param_d`. | Below is the the instruction that describes the task:
### Input:
Update the attributes in self.obj that match the keys in
`param_d`.
### Response:
def update_params(self, param_d):
"""Update the attributes in self.obj that match the keys in
`param_d`.
"""
for param in self.paramlst:
if param.name in param_d:
value = param_d[param.name]
setattr(self.obj, param.name, value) |
def replace_pattern(pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
search_only=False,
show_changes=True,
backslash_literal=False,
source=None,
path=None,
test=False,
replace=True,
debug=False,
commit=True):
'''
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' net.replace_pattern "bind-address\\s*=" "bind-address:"
CLI Example:
.. code-block:: bash
salt '*' net.replace_pattern PREFIX-LIST_NAME new-prefix-list-name
salt '*' net.replace_pattern bgp-group-name new-bgp-group-name count=1
'''
config_saved = save_config(source=source, path=path)
if not config_saved or not config_saved['result']:
return config_saved
path = config_saved['out']
replace_pattern = __salt__['file.replace'](path,
pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
search_only=search_only,
show_changes=show_changes,
backslash_literal=backslash_literal)
with salt.utils.files.fopen(path, 'r') as fh_:
updated_config = fh_.read()
return __salt__['net.load_config'](text=updated_config,
test=test,
debug=debug,
replace=replace,
commit=commit) | .. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' net.replace_pattern "bind-address\\s*=" "bind-address:"
CLI Example:
.. code-block:: bash
salt '*' net.replace_pattern PREFIX-LIST_NAME new-prefix-list-name
salt '*' net.replace_pattern bgp-group-name new-bgp-group-name count=1 | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' net.replace_pattern "bind-address\\s*=" "bind-address:"
CLI Example:
.. code-block:: bash
salt '*' net.replace_pattern PREFIX-LIST_NAME new-prefix-list-name
salt '*' net.replace_pattern bgp-group-name new-bgp-group-name count=1
### Response:
def replace_pattern(pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
search_only=False,
show_changes=True,
backslash_literal=False,
source=None,
path=None,
test=False,
replace=True,
debug=False,
commit=True):
'''
.. versionadded:: 2019.2.0
Replace occurrences of a pattern in the configuration source. If
``show_changes`` is ``True``, then a diff of what changed will be returned,
otherwise a ``True`` will be returned when changes are made, and ``False``
when no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text.
count: ``0``
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int): ``8``
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str): ``1``
How much of the configuration to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: ``False``
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: ``False``
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
search_only: ``False``
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: ``True``
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
backslash_literal: ``False``
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path
Save the temporary configuration to a specific path, then read from
there.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' net.replace_pattern "bind-address\\s*=" "bind-address:"
CLI Example:
.. code-block:: bash
salt '*' net.replace_pattern PREFIX-LIST_NAME new-prefix-list-name
salt '*' net.replace_pattern bgp-group-name new-bgp-group-name count=1
'''
config_saved = save_config(source=source, path=path)
if not config_saved or not config_saved['result']:
return config_saved
path = config_saved['out']
replace_pattern = __salt__['file.replace'](path,
pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
search_only=search_only,
show_changes=show_changes,
backslash_literal=backslash_literal)
with salt.utils.files.fopen(path, 'r') as fh_:
updated_config = fh_.read()
return __salt__['net.load_config'](text=updated_config,
test=test,
debug=debug,
replace=replace,
commit=commit) |
def smooth(self):
"""
Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points.
"""
smooth = self._element.smooth
if smooth is None:
return True
return smooth.val | Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points. | Below is the the instruction that describes the task:
### Input:
Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points.
### Response:
def smooth(self):
"""
Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points.
"""
smooth = self._element.smooth
if smooth is None:
return True
return smooth.val |
def findall(text):
"""Find all the timestrings within a block of text.
>>> timestring.findall("once upon a time, about 3 weeks ago, there was a boy whom was born on august 15th at 7:20 am. epic.")
[
('3 weeks ago,', <timestring.Date 2014-02-09 00:00:00 4483019280>),
('august 15th at 7:20 am', <timestring.Date 2014-08-15 07:20:00 4483019344>)
]
"""
results = TIMESTRING_RE.findall(text)
dates = []
for date in results:
if re.compile('((next|last)\s(\d+|couple(\sof))\s(weeks|months|quarters|years))|(between|from)', re.I).match(date[0]):
dates.append((date[0].strip(), Range(date[0])))
else:
dates.append((date[0].strip(), Date(date[0])))
return dates | Find all the timestrings within a block of text.
>>> timestring.findall("once upon a time, about 3 weeks ago, there was a boy whom was born on august 15th at 7:20 am. epic.")
[
('3 weeks ago,', <timestring.Date 2014-02-09 00:00:00 4483019280>),
('august 15th at 7:20 am', <timestring.Date 2014-08-15 07:20:00 4483019344>)
] | Below is the the instruction that describes the task:
### Input:
Find all the timestrings within a block of text.
>>> timestring.findall("once upon a time, about 3 weeks ago, there was a boy whom was born on august 15th at 7:20 am. epic.")
[
('3 weeks ago,', <timestring.Date 2014-02-09 00:00:00 4483019280>),
('august 15th at 7:20 am', <timestring.Date 2014-08-15 07:20:00 4483019344>)
]
### Response:
def findall(text):
"""Find all the timestrings within a block of text.
>>> timestring.findall("once upon a time, about 3 weeks ago, there was a boy whom was born on august 15th at 7:20 am. epic.")
[
('3 weeks ago,', <timestring.Date 2014-02-09 00:00:00 4483019280>),
('august 15th at 7:20 am', <timestring.Date 2014-08-15 07:20:00 4483019344>)
]
"""
results = TIMESTRING_RE.findall(text)
dates = []
for date in results:
if re.compile('((next|last)\s(\d+|couple(\sof))\s(weeks|months|quarters|years))|(between|from)', re.I).match(date[0]):
dates.append((date[0].strip(), Range(date[0])))
else:
dates.append((date[0].strip(), Date(date[0])))
return dates |
def extrapolate_error(self):
"""Estimate the numerical error to be expected when applying all
methods available based on the results of the current and the
last method.
Note that this expolation strategy cannot be applied on the first
method. If the current method is the first one, `-999.9` is returned.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> model.numvars.error = 1e-2
>>> model.numvars.last_error = 1e-1
>>> model.numvars.idx_method = 10
>>> model.extrapolate_error()
>>> from hydpy import round_
>>> round_(model.numvars.extrapolated_error)
0.01
>>> model.numvars.idx_method = 9
>>> model.extrapolate_error()
>>> round_(model.numvars.extrapolated_error)
0.001
"""
if self.numvars.idx_method > 2:
self.numvars.extrapolated_error = modelutils.exp(
modelutils.log(self.numvars.error) +
(modelutils.log(self.numvars.error) -
modelutils.log(self.numvars.last_error)) *
(self.numconsts.nmb_methods-self.numvars.idx_method))
else:
self.numvars.extrapolated_error = -999.9 | Estimate the numerical error to be expected when applying all
methods available based on the results of the current and the
last method.
Note that this expolation strategy cannot be applied on the first
method. If the current method is the first one, `-999.9` is returned.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> model.numvars.error = 1e-2
>>> model.numvars.last_error = 1e-1
>>> model.numvars.idx_method = 10
>>> model.extrapolate_error()
>>> from hydpy import round_
>>> round_(model.numvars.extrapolated_error)
0.01
>>> model.numvars.idx_method = 9
>>> model.extrapolate_error()
>>> round_(model.numvars.extrapolated_error)
0.001 | Below is the the instruction that describes the task:
### Input:
Estimate the numerical error to be expected when applying all
methods available based on the results of the current and the
last method.
Note that this expolation strategy cannot be applied on the first
method. If the current method is the first one, `-999.9` is returned.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> model.numvars.error = 1e-2
>>> model.numvars.last_error = 1e-1
>>> model.numvars.idx_method = 10
>>> model.extrapolate_error()
>>> from hydpy import round_
>>> round_(model.numvars.extrapolated_error)
0.01
>>> model.numvars.idx_method = 9
>>> model.extrapolate_error()
>>> round_(model.numvars.extrapolated_error)
0.001
### Response:
def extrapolate_error(self):
"""Estimate the numerical error to be expected when applying all
methods available based on the results of the current and the
last method.
Note that this expolation strategy cannot be applied on the first
method. If the current method is the first one, `-999.9` is returned.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> model.numvars.error = 1e-2
>>> model.numvars.last_error = 1e-1
>>> model.numvars.idx_method = 10
>>> model.extrapolate_error()
>>> from hydpy import round_
>>> round_(model.numvars.extrapolated_error)
0.01
>>> model.numvars.idx_method = 9
>>> model.extrapolate_error()
>>> round_(model.numvars.extrapolated_error)
0.001
"""
if self.numvars.idx_method > 2:
self.numvars.extrapolated_error = modelutils.exp(
modelutils.log(self.numvars.error) +
(modelutils.log(self.numvars.error) -
modelutils.log(self.numvars.last_error)) *
(self.numconsts.nmb_methods-self.numvars.idx_method))
else:
self.numvars.extrapolated_error = -999.9 |
def fill_series(x, year):
"""Returns the value of a timeseries (indexed over years) for a year
by linear interpolation.
Parameters
----------
x: pandas.Series
a timeseries to be interpolated
year: int
year of interpolation
"""
x = x.dropna()
if year in x.index and not np.isnan(x[year]):
return x[year]
else:
prev = [i for i in x.index if i < year]
nxt = [i for i in x.index if i > year]
if prev and nxt:
p = max(prev)
n = min(nxt)
return ((n - year) * x[p] + (year - p) * x[n]) / (n - p)
else:
return np.nan | Returns the value of a timeseries (indexed over years) for a year
by linear interpolation.
Parameters
----------
x: pandas.Series
a timeseries to be interpolated
year: int
year of interpolation | Below is the the instruction that describes the task:
### Input:
Returns the value of a timeseries (indexed over years) for a year
by linear interpolation.
Parameters
----------
x: pandas.Series
a timeseries to be interpolated
year: int
year of interpolation
### Response:
def fill_series(x, year):
"""Returns the value of a timeseries (indexed over years) for a year
by linear interpolation.
Parameters
----------
x: pandas.Series
a timeseries to be interpolated
year: int
year of interpolation
"""
x = x.dropna()
if year in x.index and not np.isnan(x[year]):
return x[year]
else:
prev = [i for i in x.index if i < year]
nxt = [i for i in x.index if i > year]
if prev and nxt:
p = max(prev)
n = min(nxt)
return ((n - year) * x[p] + (year - p) * x[n]) / (n - p)
else:
return np.nan |
def read(
self,
validity_check=False,
indexes=None,
resampling=None,
dst_nodata=None,
gdal_opts=None,
**kwargs
):
"""
Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
vector file: also run checks if reprojected geometry is valid,
otherwise throw RuntimeError (default: True)
indexes : list or int
raster file: a list of band numbers; None will read all.
dst_nodata : int or float, optional
raster file: if not set, the nodata value from the source dataset
will be used
gdal_opts : dict
raster file: GDAL options passed on to rasterio.Env()
Returns
-------
data : list for vector files or numpy array for raster files
"""
return self._read_as_tiledir(
data_type=self._file_type,
out_tile=self.tile,
td_crs=self._td_crs,
tiles_paths=self._tiles_paths,
profile=self._profile,
validity_check=validity_check,
indexes=indexes,
resampling=resampling if resampling else self._resampling,
dst_nodata=dst_nodata,
gdal_opts=gdal_opts,
**{k: v for k, v in kwargs.items() if k != "data_type"}
) | Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
vector file: also run checks if reprojected geometry is valid,
otherwise throw RuntimeError (default: True)
indexes : list or int
raster file: a list of band numbers; None will read all.
dst_nodata : int or float, optional
raster file: if not set, the nodata value from the source dataset
will be used
gdal_opts : dict
raster file: GDAL options passed on to rasterio.Env()
Returns
-------
data : list for vector files or numpy array for raster files | Below is the the instruction that describes the task:
### Input:
Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
vector file: also run checks if reprojected geometry is valid,
otherwise throw RuntimeError (default: True)
indexes : list or int
raster file: a list of band numbers; None will read all.
dst_nodata : int or float, optional
raster file: if not set, the nodata value from the source dataset
will be used
gdal_opts : dict
raster file: GDAL options passed on to rasterio.Env()
Returns
-------
data : list for vector files or numpy array for raster files
### Response:
def read(
self,
validity_check=False,
indexes=None,
resampling=None,
dst_nodata=None,
gdal_opts=None,
**kwargs
):
"""
Read reprojected & resampled input data.
Parameters
----------
validity_check : bool
vector file: also run checks if reprojected geometry is valid,
otherwise throw RuntimeError (default: True)
indexes : list or int
raster file: a list of band numbers; None will read all.
dst_nodata : int or float, optional
raster file: if not set, the nodata value from the source dataset
will be used
gdal_opts : dict
raster file: GDAL options passed on to rasterio.Env()
Returns
-------
data : list for vector files or numpy array for raster files
"""
return self._read_as_tiledir(
data_type=self._file_type,
out_tile=self.tile,
td_crs=self._td_crs,
tiles_paths=self._tiles_paths,
profile=self._profile,
validity_check=validity_check,
indexes=indexes,
resampling=resampling if resampling else self._resampling,
dst_nodata=dst_nodata,
gdal_opts=gdal_opts,
**{k: v for k, v in kwargs.items() if k != "data_type"}
) |
def from_dict(data, ctx):
"""
Instantiate a new AccountSummary from a dict (generally from loading a
JSON response). The data used to instantiate the AccountSummary is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('balance') is not None:
data['balance'] = ctx.convert_decimal_number(
data.get('balance')
)
if data.get('pl') is not None:
data['pl'] = ctx.convert_decimal_number(
data.get('pl')
)
if data.get('resettablePL') is not None:
data['resettablePL'] = ctx.convert_decimal_number(
data.get('resettablePL')
)
if data.get('financing') is not None:
data['financing'] = ctx.convert_decimal_number(
data.get('financing')
)
if data.get('commission') is not None:
data['commission'] = ctx.convert_decimal_number(
data.get('commission')
)
if data.get('guaranteedExecutionFees') is not None:
data['guaranteedExecutionFees'] = ctx.convert_decimal_number(
data.get('guaranteedExecutionFees')
)
if data.get('marginRate') is not None:
data['marginRate'] = ctx.convert_decimal_number(
data.get('marginRate')
)
if data.get('unrealizedPL') is not None:
data['unrealizedPL'] = ctx.convert_decimal_number(
data.get('unrealizedPL')
)
if data.get('NAV') is not None:
data['NAV'] = ctx.convert_decimal_number(
data.get('NAV')
)
if data.get('marginUsed') is not None:
data['marginUsed'] = ctx.convert_decimal_number(
data.get('marginUsed')
)
if data.get('marginAvailable') is not None:
data['marginAvailable'] = ctx.convert_decimal_number(
data.get('marginAvailable')
)
if data.get('positionValue') is not None:
data['positionValue'] = ctx.convert_decimal_number(
data.get('positionValue')
)
if data.get('marginCloseoutUnrealizedPL') is not None:
data['marginCloseoutUnrealizedPL'] = ctx.convert_decimal_number(
data.get('marginCloseoutUnrealizedPL')
)
if data.get('marginCloseoutNAV') is not None:
data['marginCloseoutNAV'] = ctx.convert_decimal_number(
data.get('marginCloseoutNAV')
)
if data.get('marginCloseoutMarginUsed') is not None:
data['marginCloseoutMarginUsed'] = ctx.convert_decimal_number(
data.get('marginCloseoutMarginUsed')
)
if data.get('marginCloseoutPercent') is not None:
data['marginCloseoutPercent'] = ctx.convert_decimal_number(
data.get('marginCloseoutPercent')
)
if data.get('marginCloseoutPositionValue') is not None:
data['marginCloseoutPositionValue'] = ctx.convert_decimal_number(
data.get('marginCloseoutPositionValue')
)
if data.get('withdrawalLimit') is not None:
data['withdrawalLimit'] = ctx.convert_decimal_number(
data.get('withdrawalLimit')
)
if data.get('marginCallMarginUsed') is not None:
data['marginCallMarginUsed'] = ctx.convert_decimal_number(
data.get('marginCallMarginUsed')
)
if data.get('marginCallPercent') is not None:
data['marginCallPercent'] = ctx.convert_decimal_number(
data.get('marginCallPercent')
)
return AccountSummary(**data) | Instantiate a new AccountSummary from a dict (generally from loading a
JSON response). The data used to instantiate the AccountSummary is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately. | Below is the the instruction that describes the task:
### Input:
Instantiate a new AccountSummary from a dict (generally from loading a
JSON response). The data used to instantiate the AccountSummary is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
### Response:
def from_dict(data, ctx):
"""
Instantiate a new AccountSummary from a dict (generally from loading a
JSON response). The data used to instantiate the AccountSummary is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('balance') is not None:
data['balance'] = ctx.convert_decimal_number(
data.get('balance')
)
if data.get('pl') is not None:
data['pl'] = ctx.convert_decimal_number(
data.get('pl')
)
if data.get('resettablePL') is not None:
data['resettablePL'] = ctx.convert_decimal_number(
data.get('resettablePL')
)
if data.get('financing') is not None:
data['financing'] = ctx.convert_decimal_number(
data.get('financing')
)
if data.get('commission') is not None:
data['commission'] = ctx.convert_decimal_number(
data.get('commission')
)
if data.get('guaranteedExecutionFees') is not None:
data['guaranteedExecutionFees'] = ctx.convert_decimal_number(
data.get('guaranteedExecutionFees')
)
if data.get('marginRate') is not None:
data['marginRate'] = ctx.convert_decimal_number(
data.get('marginRate')
)
if data.get('unrealizedPL') is not None:
data['unrealizedPL'] = ctx.convert_decimal_number(
data.get('unrealizedPL')
)
if data.get('NAV') is not None:
data['NAV'] = ctx.convert_decimal_number(
data.get('NAV')
)
if data.get('marginUsed') is not None:
data['marginUsed'] = ctx.convert_decimal_number(
data.get('marginUsed')
)
if data.get('marginAvailable') is not None:
data['marginAvailable'] = ctx.convert_decimal_number(
data.get('marginAvailable')
)
if data.get('positionValue') is not None:
data['positionValue'] = ctx.convert_decimal_number(
data.get('positionValue')
)
if data.get('marginCloseoutUnrealizedPL') is not None:
data['marginCloseoutUnrealizedPL'] = ctx.convert_decimal_number(
data.get('marginCloseoutUnrealizedPL')
)
if data.get('marginCloseoutNAV') is not None:
data['marginCloseoutNAV'] = ctx.convert_decimal_number(
data.get('marginCloseoutNAV')
)
if data.get('marginCloseoutMarginUsed') is not None:
data['marginCloseoutMarginUsed'] = ctx.convert_decimal_number(
data.get('marginCloseoutMarginUsed')
)
if data.get('marginCloseoutPercent') is not None:
data['marginCloseoutPercent'] = ctx.convert_decimal_number(
data.get('marginCloseoutPercent')
)
if data.get('marginCloseoutPositionValue') is not None:
data['marginCloseoutPositionValue'] = ctx.convert_decimal_number(
data.get('marginCloseoutPositionValue')
)
if data.get('withdrawalLimit') is not None:
data['withdrawalLimit'] = ctx.convert_decimal_number(
data.get('withdrawalLimit')
)
if data.get('marginCallMarginUsed') is not None:
data['marginCallMarginUsed'] = ctx.convert_decimal_number(
data.get('marginCallMarginUsed')
)
if data.get('marginCallPercent') is not None:
data['marginCallPercent'] = ctx.convert_decimal_number(
data.get('marginCallPercent')
)
return AccountSummary(**data) |
def register(self, plugin, columnType=None, columnName=None):
"""
Registers a plugin to handle particular column types and column names
based on user selection.
:param plugin | <XOrbQueryPlugin>
columnType | <orb.ColumnType> || None
columnName | <str> || None
"""
self._plugins[(columnType, columnName)] = plugin | Registers a plugin to handle particular column types and column names
based on user selection.
:param plugin | <XOrbQueryPlugin>
columnType | <orb.ColumnType> || None
columnName | <str> || None | Below is the the instruction that describes the task:
### Input:
Registers a plugin to handle particular column types and column names
based on user selection.
:param plugin | <XOrbQueryPlugin>
columnType | <orb.ColumnType> || None
columnName | <str> || None
### Response:
def register(self, plugin, columnType=None, columnName=None):
"""
Registers a plugin to handle particular column types and column names
based on user selection.
:param plugin | <XOrbQueryPlugin>
columnType | <orb.ColumnType> || None
columnName | <str> || None
"""
self._plugins[(columnType, columnName)] = plugin |
def factorize(number):
"""
Get the prime factors of an integer except for 1.
Parameters
----------
number : int
Returns
-------
primes : iterable
Examples
--------
>>> factorize(-17)
[-1, 17]
>>> factorize(8)
[2, 2, 2]
>>> factorize(3**25)
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
>>> factorize(1)
[1]
"""
if not isinstance(number, int):
raise ValueError('integer expected, but type(number)={}'
.format(type(number)))
if number < 0:
return [-1] + factorize(number * (-1))
elif number == 0:
raise ValueError('All primes are prime factors of 0.')
else:
for i in range(2, int(math_stl.ceil(number**0.5)) + 1):
if number % i == 0:
if i == number:
return [i]
else:
return [i] + factorize(int(number / i))
return [number] | Get the prime factors of an integer except for 1.
Parameters
----------
number : int
Returns
-------
primes : iterable
Examples
--------
>>> factorize(-17)
[-1, 17]
>>> factorize(8)
[2, 2, 2]
>>> factorize(3**25)
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
>>> factorize(1)
[1] | Below is the the instruction that describes the task:
### Input:
Get the prime factors of an integer except for 1.
Parameters
----------
number : int
Returns
-------
primes : iterable
Examples
--------
>>> factorize(-17)
[-1, 17]
>>> factorize(8)
[2, 2, 2]
>>> factorize(3**25)
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
>>> factorize(1)
[1]
### Response:
def factorize(number):
"""
Get the prime factors of an integer except for 1.
Parameters
----------
number : int
Returns
-------
primes : iterable
Examples
--------
>>> factorize(-17)
[-1, 17]
>>> factorize(8)
[2, 2, 2]
>>> factorize(3**25)
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
>>> factorize(1)
[1]
"""
if not isinstance(number, int):
raise ValueError('integer expected, but type(number)={}'
.format(type(number)))
if number < 0:
return [-1] + factorize(number * (-1))
elif number == 0:
raise ValueError('All primes are prime factors of 0.')
else:
for i in range(2, int(math_stl.ceil(number**0.5)) + 1):
if number % i == 0:
if i == number:
return [i]
else:
return [i] + factorize(int(number / i))
return [number] |
def from_meta(cls, meta, meta_all=None):
"""Copy DocstringMeta from another instance."""
if len(meta.args) == 2:
name = meta.args[1]
meta_type = None
for x in meta_all:
if x.args[1] == name and x.args[0] == 'type':
meta_type = x.description
break
return cls(args=meta.args, description=meta.description, type=meta_type)
else:
return cls(args=meta.args, description=meta.description) | Copy DocstringMeta from another instance. | Below is the the instruction that describes the task:
### Input:
Copy DocstringMeta from another instance.
### Response:
def from_meta(cls, meta, meta_all=None):
"""Copy DocstringMeta from another instance."""
if len(meta.args) == 2:
name = meta.args[1]
meta_type = None
for x in meta_all:
if x.args[1] == name and x.args[0] == 'type':
meta_type = x.description
break
return cls(args=meta.args, description=meta.description, type=meta_type)
else:
return cls(args=meta.args, description=meta.description) |
def add_data(self, X, y, err_y=0, n=0, T=None):
"""Add data to the training data set of the GaussianProcess instance.
Parameters
----------
X : array, (`M`, `D`)
`M` input values of dimension `D`.
y : array, (`M`,)
`M` target values.
err_y : array, (`M`,) or scalar float, optional
Non-negative values only. Error given as standard deviation) in the
`M` target values. If `err_y` is a scalar, the data set is taken to
be homoscedastic (constant error). Otherwise, the length of `err_y`
must equal the length of `y`. Default value is 0 (noiseless
observations).
n : array, (`M`, `D`) or scalar float, optional
Non-negative integer values only. Degree of derivative for each
target. If `n` is a scalar it is taken to be the value for all
points in `y`. Otherwise, the length of n must equal the length of
`y`. Default value is 0 (observation of target value). If
non-integer values are passed, they will be silently rounded.
T : array, (`M`, `N`), optional
Linear transformation to get from latent variables to data in the
argument `y`. When `T` is passed the argument `y` holds the
transformed quantities `y=TY(X)` where `y` are the observed values
of the transformed quantities, `T` is the transformation matrix and
`Y(X)` is the underlying (untransformed) values of the function to
be fit that enter into the transformation. When `T` is `M`-by-`N`
and `y` has `M` elements, `X` and `n` will both be `N`-by-`D`.
Default is None (no transformation).
Raises
------
ValueError
Bad shapes for any of the inputs, negative values for `err_y` or `n`.
"""
# Verify y has only one non-trivial dimension:
y = scipy.atleast_1d(scipy.asarray(y, dtype=float))
if len(y.shape) != 1:
raise ValueError(
"Training targets y must have only one dimension with length "
"greater than one! Shape of y given is %s" % (y.shape,)
)
# Handle scalar error or verify shape of array error matches shape of y:
try:
iter(err_y)
except TypeError:
err_y = err_y * scipy.ones_like(y, dtype=float)
else:
err_y = scipy.asarray(err_y, dtype=float)
if err_y.shape != y.shape:
raise ValueError(
"When using array-like err_y, shape must match shape of y! "
"Shape of err_y given is %s, shape of y given is %s." % (err_y.shape, y.shape)
)
if (err_y < 0).any():
raise ValueError("All elements of err_y must be non-negative!")
# Handle scalar training input or convert array input into 2d.
X = scipy.atleast_2d(scipy.asarray(X, dtype=float))
# Correct single-dimension inputs:
if self.num_dim == 1 and X.shape[0] == 1:
X = X.T
if T is None and X.shape != (len(y), self.num_dim):
raise ValueError(
"Shape of training inputs must be (len(y), k.num_dim)! X given "
"has shape %s, shape of y is %s and num_dim=%d." % (X.shape, y.shape, self.num_dim)
)
# Handle scalar derivative orders or verify shape of array derivative
# orders matches shape of y:
try:
iter(n)
except TypeError:
n = n * scipy.ones_like(X, dtype=int)
else:
n = scipy.atleast_2d(scipy.asarray(n, dtype=int))
# Correct single-dimension inputs:
if self.num_dim == 1 and n.shape[1] != 1:
n = n.T
if n.shape != X.shape:
raise ValueError(
"When using array-like n, shape must be (len(y), k.num_dim)! "
"Shape of n given is %s, shape of y given is %s and num_dim=%d."
% (n.shape, y.shape, self.num_dim)
)
if (n < 0).any():
raise ValueError("All elements of n must be non-negative integers!")
# Handle transform:
if T is None and self.T is not None:
T = scipy.eye(len(y))
if T is not None:
T = scipy.atleast_2d(scipy.asarray(T, dtype=float))
if T.ndim != 2:
raise ValueError("T must have exactly 2 dimensions!")
if T.shape[0] != len(y):
raise ValueError(
"T must have as many rows are there are elements in y!"
)
if T.shape[1] != X.shape[0]:
raise ValueError(
"There must be as many columns in T as there are rows in X!"
)
if self.T is None and self.X is not None:
self.T = scipy.eye(len(self.y))
if self.T is None:
self.T = T
else:
self.T = scipy.linalg.block_diag(self.T, T)
if self.X is None:
self.X = X
else:
self.X = scipy.vstack((self.X, X))
self.y = scipy.append(self.y, y)
self.err_y = scipy.append(self.err_y, err_y)
if self.n is None:
self.n = n
else:
self.n = scipy.vstack((self.n, n))
self.K_up_to_date = False | Add data to the training data set of the GaussianProcess instance.
Parameters
----------
X : array, (`M`, `D`)
`M` input values of dimension `D`.
y : array, (`M`,)
`M` target values.
err_y : array, (`M`,) or scalar float, optional
Non-negative values only. Error given as standard deviation) in the
`M` target values. If `err_y` is a scalar, the data set is taken to
be homoscedastic (constant error). Otherwise, the length of `err_y`
must equal the length of `y`. Default value is 0 (noiseless
observations).
n : array, (`M`, `D`) or scalar float, optional
Non-negative integer values only. Degree of derivative for each
target. If `n` is a scalar it is taken to be the value for all
points in `y`. Otherwise, the length of n must equal the length of
`y`. Default value is 0 (observation of target value). If
non-integer values are passed, they will be silently rounded.
T : array, (`M`, `N`), optional
Linear transformation to get from latent variables to data in the
argument `y`. When `T` is passed the argument `y` holds the
transformed quantities `y=TY(X)` where `y` are the observed values
of the transformed quantities, `T` is the transformation matrix and
`Y(X)` is the underlying (untransformed) values of the function to
be fit that enter into the transformation. When `T` is `M`-by-`N`
and `y` has `M` elements, `X` and `n` will both be `N`-by-`D`.
Default is None (no transformation).
Raises
------
ValueError
Bad shapes for any of the inputs, negative values for `err_y` or `n`. | Below is the the instruction that describes the task:
### Input:
Add data to the training data set of the GaussianProcess instance.
Parameters
----------
X : array, (`M`, `D`)
`M` input values of dimension `D`.
y : array, (`M`,)
`M` target values.
err_y : array, (`M`,) or scalar float, optional
Non-negative values only. Error given as standard deviation) in the
`M` target values. If `err_y` is a scalar, the data set is taken to
be homoscedastic (constant error). Otherwise, the length of `err_y`
must equal the length of `y`. Default value is 0 (noiseless
observations).
n : array, (`M`, `D`) or scalar float, optional
Non-negative integer values only. Degree of derivative for each
target. If `n` is a scalar it is taken to be the value for all
points in `y`. Otherwise, the length of n must equal the length of
`y`. Default value is 0 (observation of target value). If
non-integer values are passed, they will be silently rounded.
T : array, (`M`, `N`), optional
Linear transformation to get from latent variables to data in the
argument `y`. When `T` is passed the argument `y` holds the
transformed quantities `y=TY(X)` where `y` are the observed values
of the transformed quantities, `T` is the transformation matrix and
`Y(X)` is the underlying (untransformed) values of the function to
be fit that enter into the transformation. When `T` is `M`-by-`N`
and `y` has `M` elements, `X` and `n` will both be `N`-by-`D`.
Default is None (no transformation).
Raises
------
ValueError
Bad shapes for any of the inputs, negative values for `err_y` or `n`.
### Response:
def add_data(self, X, y, err_y=0, n=0, T=None):
"""Add data to the training data set of the GaussianProcess instance.
Parameters
----------
X : array, (`M`, `D`)
`M` input values of dimension `D`.
y : array, (`M`,)
`M` target values.
err_y : array, (`M`,) or scalar float, optional
Non-negative values only. Error given as standard deviation) in the
`M` target values. If `err_y` is a scalar, the data set is taken to
be homoscedastic (constant error). Otherwise, the length of `err_y`
must equal the length of `y`. Default value is 0 (noiseless
observations).
n : array, (`M`, `D`) or scalar float, optional
Non-negative integer values only. Degree of derivative for each
target. If `n` is a scalar it is taken to be the value for all
points in `y`. Otherwise, the length of n must equal the length of
`y`. Default value is 0 (observation of target value). If
non-integer values are passed, they will be silently rounded.
T : array, (`M`, `N`), optional
Linear transformation to get from latent variables to data in the
argument `y`. When `T` is passed the argument `y` holds the
transformed quantities `y=TY(X)` where `y` are the observed values
of the transformed quantities, `T` is the transformation matrix and
`Y(X)` is the underlying (untransformed) values of the function to
be fit that enter into the transformation. When `T` is `M`-by-`N`
and `y` has `M` elements, `X` and `n` will both be `N`-by-`D`.
Default is None (no transformation).
Raises
------
ValueError
Bad shapes for any of the inputs, negative values for `err_y` or `n`.
"""
# Verify y has only one non-trivial dimension:
y = scipy.atleast_1d(scipy.asarray(y, dtype=float))
if len(y.shape) != 1:
raise ValueError(
"Training targets y must have only one dimension with length "
"greater than one! Shape of y given is %s" % (y.shape,)
)
# Handle scalar error or verify shape of array error matches shape of y:
try:
iter(err_y)
except TypeError:
err_y = err_y * scipy.ones_like(y, dtype=float)
else:
err_y = scipy.asarray(err_y, dtype=float)
if err_y.shape != y.shape:
raise ValueError(
"When using array-like err_y, shape must match shape of y! "
"Shape of err_y given is %s, shape of y given is %s." % (err_y.shape, y.shape)
)
if (err_y < 0).any():
raise ValueError("All elements of err_y must be non-negative!")
# Handle scalar training input or convert array input into 2d.
X = scipy.atleast_2d(scipy.asarray(X, dtype=float))
# Correct single-dimension inputs:
if self.num_dim == 1 and X.shape[0] == 1:
X = X.T
if T is None and X.shape != (len(y), self.num_dim):
raise ValueError(
"Shape of training inputs must be (len(y), k.num_dim)! X given "
"has shape %s, shape of y is %s and num_dim=%d." % (X.shape, y.shape, self.num_dim)
)
# Handle scalar derivative orders or verify shape of array derivative
# orders matches shape of y:
try:
iter(n)
except TypeError:
n = n * scipy.ones_like(X, dtype=int)
else:
n = scipy.atleast_2d(scipy.asarray(n, dtype=int))
# Correct single-dimension inputs:
if self.num_dim == 1 and n.shape[1] != 1:
n = n.T
if n.shape != X.shape:
raise ValueError(
"When using array-like n, shape must be (len(y), k.num_dim)! "
"Shape of n given is %s, shape of y given is %s and num_dim=%d."
% (n.shape, y.shape, self.num_dim)
)
if (n < 0).any():
raise ValueError("All elements of n must be non-negative integers!")
# Handle transform:
if T is None and self.T is not None:
T = scipy.eye(len(y))
if T is not None:
T = scipy.atleast_2d(scipy.asarray(T, dtype=float))
if T.ndim != 2:
raise ValueError("T must have exactly 2 dimensions!")
if T.shape[0] != len(y):
raise ValueError(
"T must have as many rows are there are elements in y!"
)
if T.shape[1] != X.shape[0]:
raise ValueError(
"There must be as many columns in T as there are rows in X!"
)
if self.T is None and self.X is not None:
self.T = scipy.eye(len(self.y))
if self.T is None:
self.T = T
else:
self.T = scipy.linalg.block_diag(self.T, T)
if self.X is None:
self.X = X
else:
self.X = scipy.vstack((self.X, X))
self.y = scipy.append(self.y, y)
self.err_y = scipy.append(self.err_y, err_y)
if self.n is None:
self.n = n
else:
self.n = scipy.vstack((self.n, n))
self.K_up_to_date = False |
def df(self, list_of_points, force_read=True):
"""
When connected, calling DF should force a reading on the network.
"""
his = []
for point in list_of_points:
try:
his.append(self._findPoint(point, force_read=force_read).history)
except ValueError as ve:
self._log.error("{}".format(ve))
continue
if not _PANDAS:
return dict(zip(list_of_points, his))
return pd.DataFrame(dict(zip(list_of_points, his))) | When connected, calling DF should force a reading on the network. | Below is the the instruction that describes the task:
### Input:
When connected, calling DF should force a reading on the network.
### Response:
def df(self, list_of_points, force_read=True):
"""
When connected, calling DF should force a reading on the network.
"""
his = []
for point in list_of_points:
try:
his.append(self._findPoint(point, force_read=force_read).history)
except ValueError as ve:
self._log.error("{}".format(ve))
continue
if not _PANDAS:
return dict(zip(list_of_points, his))
return pd.DataFrame(dict(zip(list_of_points, his))) |
def adjust_commission_cost_basis(self, asset, cost):
"""
A note about cost-basis in zipline: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how zipline handles positions, zipline will
currently spread an externally-delivered commission charge across
all shares in a position.
"""
if asset != self.asset:
raise Exception('Updating a commission for a different asset?')
if cost == 0.0:
return
# If we no longer hold this position, there is no cost basis to
# adjust.
if self.amount == 0:
return
# We treat cost basis as the share price where we have broken even.
# For longs, commissions cause a relatively straight forward increase
# in the cost basis.
#
# For shorts, you actually want to decrease the cost basis because you
# break even and earn a profit when the share price decreases.
#
# Shorts are represented as having a negative `amount`.
#
# The multiplication and division by `amount` cancel out leaving the
# cost_basis positive, while subtracting the commission.
prev_cost = self.cost_basis * self.amount
if isinstance(asset, Future):
cost_to_use = cost / asset.price_multiplier
else:
cost_to_use = cost
new_cost = prev_cost + cost_to_use
self.cost_basis = new_cost / self.amount | A note about cost-basis in zipline: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how zipline handles positions, zipline will
currently spread an externally-delivered commission charge across
all shares in a position. | Below is the the instruction that describes the task:
### Input:
A note about cost-basis in zipline: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how zipline handles positions, zipline will
currently spread an externally-delivered commission charge across
all shares in a position.
### Response:
def adjust_commission_cost_basis(self, asset, cost):
"""
A note about cost-basis in zipline: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how zipline handles positions, zipline will
currently spread an externally-delivered commission charge across
all shares in a position.
"""
if asset != self.asset:
raise Exception('Updating a commission for a different asset?')
if cost == 0.0:
return
# If we no longer hold this position, there is no cost basis to
# adjust.
if self.amount == 0:
return
# We treat cost basis as the share price where we have broken even.
# For longs, commissions cause a relatively straight forward increase
# in the cost basis.
#
# For shorts, you actually want to decrease the cost basis because you
# break even and earn a profit when the share price decreases.
#
# Shorts are represented as having a negative `amount`.
#
# The multiplication and division by `amount` cancel out leaving the
# cost_basis positive, while subtracting the commission.
prev_cost = self.cost_basis * self.amount
if isinstance(asset, Future):
cost_to_use = cost / asset.price_multiplier
else:
cost_to_use = cost
new_cost = prev_cost + cost_to_use
self.cost_basis = new_cost / self.amount |
def batch_write_vasp_input(transformed_structures, vasp_input_set=MPRelaxSet,
output_dir=".", create_directory=True,
subfolder=None,
include_cif=False, **kwargs):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub(r"\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
create_directory=create_directory, **kwargs)
if include_cif:
from pymatgen.io.cif import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula))) | Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs. | Below is the the instruction that describes the task:
### Input:
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
### Response:
def batch_write_vasp_input(transformed_structures, vasp_input_set=MPRelaxSet,
output_dir=".", create_directory=True,
subfolder=None,
include_cif=False, **kwargs):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub(r"\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
create_directory=create_directory, **kwargs)
if include_cif:
from pymatgen.io.cif import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula))) |
def _get_binding_info(host_header='', ip_address='*', port=80):
'''
Combine the host header, IP address, and TCP port into bindingInformation
format. Binding Information specifies information to communicate with a
site. It includes the IP address, the port number, and an optional host
header (usually a host name) to communicate with the site.
Args:
host_header (str): Usually a hostname
ip_address (str): The IP address
port (int): The port
Returns:
str: A properly formatted bindingInformation string (IP:port:hostheader)
eg: 192.168.0.12:80:www.contoso.com
'''
return ':'.join([ip_address, six.text_type(port),
host_header.replace(' ', '')]) | Combine the host header, IP address, and TCP port into bindingInformation
format. Binding Information specifies information to communicate with a
site. It includes the IP address, the port number, and an optional host
header (usually a host name) to communicate with the site.
Args:
host_header (str): Usually a hostname
ip_address (str): The IP address
port (int): The port
Returns:
str: A properly formatted bindingInformation string (IP:port:hostheader)
eg: 192.168.0.12:80:www.contoso.com | Below is the the instruction that describes the task:
### Input:
Combine the host header, IP address, and TCP port into bindingInformation
format. Binding Information specifies information to communicate with a
site. It includes the IP address, the port number, and an optional host
header (usually a host name) to communicate with the site.
Args:
host_header (str): Usually a hostname
ip_address (str): The IP address
port (int): The port
Returns:
str: A properly formatted bindingInformation string (IP:port:hostheader)
eg: 192.168.0.12:80:www.contoso.com
### Response:
def _get_binding_info(host_header='', ip_address='*', port=80):
'''
Combine the host header, IP address, and TCP port into bindingInformation
format. Binding Information specifies information to communicate with a
site. It includes the IP address, the port number, and an optional host
header (usually a host name) to communicate with the site.
Args:
host_header (str): Usually a hostname
ip_address (str): The IP address
port (int): The port
Returns:
str: A properly formatted bindingInformation string (IP:port:hostheader)
eg: 192.168.0.12:80:www.contoso.com
'''
return ':'.join([ip_address, six.text_type(port),
host_header.replace(' ', '')]) |
def send_dynamic_message(sender, message):
"""Send a dynamic message to the listeners.
Dynamic messages represents a progress. Usually it will be appended to
the previous messages.
.. versionadded:: 3.3
:param sender: The sender.
:type sender: object
:param message: An instance of our rich message class.
:type message: safe.messaging.Message
"""
dispatcher.send(
signal=DYNAMIC_MESSAGE_SIGNAL,
sender=sender,
message=message) | Send a dynamic message to the listeners.
Dynamic messages represents a progress. Usually it will be appended to
the previous messages.
.. versionadded:: 3.3
:param sender: The sender.
:type sender: object
:param message: An instance of our rich message class.
:type message: safe.messaging.Message | Below is the the instruction that describes the task:
### Input:
Send a dynamic message to the listeners.
Dynamic messages represents a progress. Usually it will be appended to
the previous messages.
.. versionadded:: 3.3
:param sender: The sender.
:type sender: object
:param message: An instance of our rich message class.
:type message: safe.messaging.Message
### Response:
def send_dynamic_message(sender, message):
"""Send a dynamic message to the listeners.
Dynamic messages represents a progress. Usually it will be appended to
the previous messages.
.. versionadded:: 3.3
:param sender: The sender.
:type sender: object
:param message: An instance of our rich message class.
:type message: safe.messaging.Message
"""
dispatcher.send(
signal=DYNAMIC_MESSAGE_SIGNAL,
sender=sender,
message=message) |
def clean(self):
"""Global cleanup."""
super(LineFormSet, self).clean()
if any(self.errors):
# Already seen errors, let's skip.
return
self.clean_unique_fields() | Global cleanup. | Below is the the instruction that describes the task:
### Input:
Global cleanup.
### Response:
def clean(self):
"""Global cleanup."""
super(LineFormSet, self).clean()
if any(self.errors):
# Already seen errors, let's skip.
return
self.clean_unique_fields() |
def get_used_entities(self,use_specs):
"""
Returns the entities which are imported by a use statement. These
are contained in dicts.
"""
if len(use_specs.strip()) == 0:
return (self.pub_procs, self.pub_absints, self.pub_types, self.pub_vars)
only = bool(self.ONLY_RE.match(use_specs))
use_specs = self.ONLY_RE.sub('',use_specs)
ulist = self.SPLIT_RE.split(use_specs)
ulist[-1] = ulist[-1].strip()
uspecs = {}
for item in ulist:
match = self.RENAME_RE.search(item)
if match:
uspecs[match.group(1).lower()] = match.group(2)
else:
uspecs[item.lower()] = item
ret_procs = {}
ret_absints = {}
ret_types = {}
ret_vars = {}
for name, obj in self.pub_procs.items():
name = name.lower()
if only:
if name in uspecs:
ret_procs[name] = obj
else:
ret_procs[name] = obj
for name, obj in self.pub_absints.items():
name = name.lower()
if only:
if name in uspecs:
ret_absints[name] = obj
else:
ret_absints[name] = obj
for name, obj in self.pub_types.items():
name = name.lower()
if only:
if name in uspecs:
ret_types[name] = obj
else:
ret_types[name] = obj
for name, obj in self.pub_vars.items():
name = name.lower()
if only:
if name in uspecs:
ret_vars[name] = obj
else:
ret_vars[name] = obj
return (ret_procs,ret_absints,ret_types,ret_vars) | Returns the entities which are imported by a use statement. These
are contained in dicts. | Below is the the instruction that describes the task:
### Input:
Returns the entities which are imported by a use statement. These
are contained in dicts.
### Response:
def get_used_entities(self,use_specs):
"""
Returns the entities which are imported by a use statement. These
are contained in dicts.
"""
if len(use_specs.strip()) == 0:
return (self.pub_procs, self.pub_absints, self.pub_types, self.pub_vars)
only = bool(self.ONLY_RE.match(use_specs))
use_specs = self.ONLY_RE.sub('',use_specs)
ulist = self.SPLIT_RE.split(use_specs)
ulist[-1] = ulist[-1].strip()
uspecs = {}
for item in ulist:
match = self.RENAME_RE.search(item)
if match:
uspecs[match.group(1).lower()] = match.group(2)
else:
uspecs[item.lower()] = item
ret_procs = {}
ret_absints = {}
ret_types = {}
ret_vars = {}
for name, obj in self.pub_procs.items():
name = name.lower()
if only:
if name in uspecs:
ret_procs[name] = obj
else:
ret_procs[name] = obj
for name, obj in self.pub_absints.items():
name = name.lower()
if only:
if name in uspecs:
ret_absints[name] = obj
else:
ret_absints[name] = obj
for name, obj in self.pub_types.items():
name = name.lower()
if only:
if name in uspecs:
ret_types[name] = obj
else:
ret_types[name] = obj
for name, obj in self.pub_vars.items():
name = name.lower()
if only:
if name in uspecs:
ret_vars[name] = obj
else:
ret_vars[name] = obj
return (ret_procs,ret_absints,ret_types,ret_vars) |
def initialize(self, secret_shares=5, secret_threshold=3, pgp_keys=None, root_token_pgp_key=None,
stored_shares=None, recovery_shares=None, recovery_threshold=None, recovery_pgp_keys=None):
"""Initialize a new Vault.
The Vault must not have been previously initialized. The recovery options, as well as the stored shares option,
are only available when using Vault HSM.
Supported methods:
PUT: /sys/init. Produces: 200 application/json
:param secret_shares: The number of shares to split the master key into.
:type secret_shares: int
:param secret_threshold: Specifies the number of shares required to reconstruct the master key. This must be
less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as
secret_shares.
:type secret_threshold: int
:param pgp_keys: List of PGP public keys used to encrypt the output unseal keys.
Ordering is preserved. The keys must be base64-encoded from their original binary representation.
The size of this array must be the same as secret_shares.
:type pgp_keys: list
:param root_token_pgp_key: Specifies a PGP public key used to encrypt the initial root token. The
key must be base64-encoded from its original binary representation.
:type root_token_pgp_key: str | unicode
:param stored_shares: <enterprise only> Specifies the number of shares that should be encrypted by the HSM and
stored for auto-unsealing. Currently must be the same as secret_shares.
:type stored_shares: int
:param recovery_shares: <enterprise only> Specifies the number of shares to split the recovery key into.
:type recovery_shares: int
:param recovery_threshold: <enterprise only> Specifies the number of shares required to reconstruct the recovery
key. This must be less than or equal to recovery_shares.
:type recovery_threshold: int
:param recovery_pgp_keys: <enterprise only> Specifies an array of PGP public keys used to encrypt the output
recovery keys. Ordering is preserved. The keys must be base64-encoded from their original binary
representation. The size of this array must be the same as recovery_shares.
:type recovery_pgp_keys: list
:return: The JSON response of the request.
:rtype: dict
"""
params = {
'secret_shares': secret_shares,
'secret_threshold': secret_threshold,
'root_token_pgp_key': root_token_pgp_key,
}
if pgp_keys is not None:
if len(pgp_keys) != secret_shares:
raise ParamValidationError('length of pgp_keys list argument must equal secret_shares value')
params['pgp_keys'] = pgp_keys
if stored_shares is not None:
if stored_shares != secret_shares:
raise ParamValidationError('value for stored_shares argument must equal secret_shares argument')
params['stored_shares'] = stored_shares
if recovery_shares is not None:
params['recovery_shares'] = recovery_shares
if recovery_threshold is not None:
if recovery_threshold > recovery_shares:
error_msg = 'value for recovery_threshold argument be less than or equal to recovery_shares argument'
raise ParamValidationError(error_msg)
params['recovery_threshold'] = recovery_threshold
if recovery_pgp_keys is not None:
if len(recovery_pgp_keys) != recovery_shares:
raise ParamValidationError('length of recovery_pgp_keys list argument must equal recovery_shares value')
params['recovery_pgp_keys'] = recovery_pgp_keys
api_path = '/v1/sys/init'
response = self._adapter.put(
url=api_path,
json=params,
)
return response.json() | Initialize a new Vault.
The Vault must not have been previously initialized. The recovery options, as well as the stored shares option,
are only available when using Vault HSM.
Supported methods:
PUT: /sys/init. Produces: 200 application/json
:param secret_shares: The number of shares to split the master key into.
:type secret_shares: int
:param secret_threshold: Specifies the number of shares required to reconstruct the master key. This must be
less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as
secret_shares.
:type secret_threshold: int
:param pgp_keys: List of PGP public keys used to encrypt the output unseal keys.
Ordering is preserved. The keys must be base64-encoded from their original binary representation.
The size of this array must be the same as secret_shares.
:type pgp_keys: list
:param root_token_pgp_key: Specifies a PGP public key used to encrypt the initial root token. The
key must be base64-encoded from its original binary representation.
:type root_token_pgp_key: str | unicode
:param stored_shares: <enterprise only> Specifies the number of shares that should be encrypted by the HSM and
stored for auto-unsealing. Currently must be the same as secret_shares.
:type stored_shares: int
:param recovery_shares: <enterprise only> Specifies the number of shares to split the recovery key into.
:type recovery_shares: int
:param recovery_threshold: <enterprise only> Specifies the number of shares required to reconstruct the recovery
key. This must be less than or equal to recovery_shares.
:type recovery_threshold: int
:param recovery_pgp_keys: <enterprise only> Specifies an array of PGP public keys used to encrypt the output
recovery keys. Ordering is preserved. The keys must be base64-encoded from their original binary
representation. The size of this array must be the same as recovery_shares.
:type recovery_pgp_keys: list
:return: The JSON response of the request.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Initialize a new Vault.
The Vault must not have been previously initialized. The recovery options, as well as the stored shares option,
are only available when using Vault HSM.
Supported methods:
PUT: /sys/init. Produces: 200 application/json
:param secret_shares: The number of shares to split the master key into.
:type secret_shares: int
:param secret_threshold: Specifies the number of shares required to reconstruct the master key. This must be
less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as
secret_shares.
:type secret_threshold: int
:param pgp_keys: List of PGP public keys used to encrypt the output unseal keys.
Ordering is preserved. The keys must be base64-encoded from their original binary representation.
The size of this array must be the same as secret_shares.
:type pgp_keys: list
:param root_token_pgp_key: Specifies a PGP public key used to encrypt the initial root token. The
key must be base64-encoded from its original binary representation.
:type root_token_pgp_key: str | unicode
:param stored_shares: <enterprise only> Specifies the number of shares that should be encrypted by the HSM and
stored for auto-unsealing. Currently must be the same as secret_shares.
:type stored_shares: int
:param recovery_shares: <enterprise only> Specifies the number of shares to split the recovery key into.
:type recovery_shares: int
:param recovery_threshold: <enterprise only> Specifies the number of shares required to reconstruct the recovery
key. This must be less than or equal to recovery_shares.
:type recovery_threshold: int
:param recovery_pgp_keys: <enterprise only> Specifies an array of PGP public keys used to encrypt the output
recovery keys. Ordering is preserved. The keys must be base64-encoded from their original binary
representation. The size of this array must be the same as recovery_shares.
:type recovery_pgp_keys: list
:return: The JSON response of the request.
:rtype: dict
### Response:
def initialize(self, secret_shares=5, secret_threshold=3, pgp_keys=None, root_token_pgp_key=None,
stored_shares=None, recovery_shares=None, recovery_threshold=None, recovery_pgp_keys=None):
"""Initialize a new Vault.
The Vault must not have been previously initialized. The recovery options, as well as the stored shares option,
are only available when using Vault HSM.
Supported methods:
PUT: /sys/init. Produces: 200 application/json
:param secret_shares: The number of shares to split the master key into.
:type secret_shares: int
:param secret_threshold: Specifies the number of shares required to reconstruct the master key. This must be
less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as
secret_shares.
:type secret_threshold: int
:param pgp_keys: List of PGP public keys used to encrypt the output unseal keys.
Ordering is preserved. The keys must be base64-encoded from their original binary representation.
The size of this array must be the same as secret_shares.
:type pgp_keys: list
:param root_token_pgp_key: Specifies a PGP public key used to encrypt the initial root token. The
key must be base64-encoded from its original binary representation.
:type root_token_pgp_key: str | unicode
:param stored_shares: <enterprise only> Specifies the number of shares that should be encrypted by the HSM and
stored for auto-unsealing. Currently must be the same as secret_shares.
:type stored_shares: int
:param recovery_shares: <enterprise only> Specifies the number of shares to split the recovery key into.
:type recovery_shares: int
:param recovery_threshold: <enterprise only> Specifies the number of shares required to reconstruct the recovery
key. This must be less than or equal to recovery_shares.
:type recovery_threshold: int
:param recovery_pgp_keys: <enterprise only> Specifies an array of PGP public keys used to encrypt the output
recovery keys. Ordering is preserved. The keys must be base64-encoded from their original binary
representation. The size of this array must be the same as recovery_shares.
:type recovery_pgp_keys: list
:return: The JSON response of the request.
:rtype: dict
"""
params = {
'secret_shares': secret_shares,
'secret_threshold': secret_threshold,
'root_token_pgp_key': root_token_pgp_key,
}
if pgp_keys is not None:
if len(pgp_keys) != secret_shares:
raise ParamValidationError('length of pgp_keys list argument must equal secret_shares value')
params['pgp_keys'] = pgp_keys
if stored_shares is not None:
if stored_shares != secret_shares:
raise ParamValidationError('value for stored_shares argument must equal secret_shares argument')
params['stored_shares'] = stored_shares
if recovery_shares is not None:
params['recovery_shares'] = recovery_shares
if recovery_threshold is not None:
if recovery_threshold > recovery_shares:
error_msg = 'value for recovery_threshold argument be less than or equal to recovery_shares argument'
raise ParamValidationError(error_msg)
params['recovery_threshold'] = recovery_threshold
if recovery_pgp_keys is not None:
if len(recovery_pgp_keys) != recovery_shares:
raise ParamValidationError('length of recovery_pgp_keys list argument must equal recovery_shares value')
params['recovery_pgp_keys'] = recovery_pgp_keys
api_path = '/v1/sys/init'
response = self._adapter.put(
url=api_path,
json=params,
)
return response.json() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.