code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def parse_entry(self, row):
"""Parse an individual VCF entry and return a VCFEntry which contains information about
the call (such as alternative allele, zygosity, etc.)
"""
var_call = VCFEntry(self.individuals)
var_call.parse_entry(row)
return var_call | Parse an individual VCF entry and return a VCFEntry which contains information about
the call (such as alternative allele, zygosity, etc.) | Below is the the instruction that describes the task:
### Input:
Parse an individual VCF entry and return a VCFEntry which contains information about
the call (such as alternative allele, zygosity, etc.)
### Response:
def parse_entry(self, row):
"""Parse an individual VCF entry and return a VCFEntry which contains information about
the call (such as alternative allele, zygosity, etc.)
"""
var_call = VCFEntry(self.individuals)
var_call.parse_entry(row)
return var_call |
def get_all_handleable_roots(self):
"""
Get list of all handleable devices, return only those that represent
root nodes within the filtered device tree.
"""
nodes = self.get_device_tree()
return [node.device
for node in sorted(nodes.values(), key=DevNode._sort_key)
if not node.ignored and node.device
and (node.root == '/' or nodes[node.root].ignored)] | Get list of all handleable devices, return only those that represent
root nodes within the filtered device tree. | Below is the the instruction that describes the task:
### Input:
Get list of all handleable devices, return only those that represent
root nodes within the filtered device tree.
### Response:
def get_all_handleable_roots(self):
"""
Get list of all handleable devices, return only those that represent
root nodes within the filtered device tree.
"""
nodes = self.get_device_tree()
return [node.device
for node in sorted(nodes.values(), key=DevNode._sort_key)
if not node.ignored and node.device
and (node.root == '/' or nodes[node.root].ignored)] |
def Transactional(fn, self, *argv, **argd):
"""
Decorator that wraps DAO methods to handle transactions automatically.
It may only work with subclasses of L{BaseDAO}.
"""
return self._transactional(fn, *argv, **argd) | Decorator that wraps DAO methods to handle transactions automatically.
It may only work with subclasses of L{BaseDAO}. | Below is the the instruction that describes the task:
### Input:
Decorator that wraps DAO methods to handle transactions automatically.
It may only work with subclasses of L{BaseDAO}.
### Response:
def Transactional(fn, self, *argv, **argd):
"""
Decorator that wraps DAO methods to handle transactions automatically.
It may only work with subclasses of L{BaseDAO}.
"""
return self._transactional(fn, *argv, **argd) |
def get_env_pass(self,user=None,msg=None,note=None):
"""Gets a password from the user if one is not already recorded for this environment.
@param user: username we are getting password for
@param msg: message to put out there
"""
shutit = self.shutit
shutit.handle_note(note)
user = user or self.whoami()
# cygwin does not have root
pw = ''
if self.current_environment.distro == 'cygwin':
return pw
if user not in self.current_environment.users.keys():
self.current_environment.users.update({user:None})
if not self.current_environment.users[user] and user != 'root':
msg = msg or 'Please input the sudo password for user: ' + user
pw = shutit_util.get_input(msg,ispass=True)
self.current_environment.users[user] = pw
shutit_global.shutit_global_object.secret_words_set.add(self.current_environment.users[user])
return pw | Gets a password from the user if one is not already recorded for this environment.
@param user: username we are getting password for
@param msg: message to put out there | Below is the the instruction that describes the task:
### Input:
Gets a password from the user if one is not already recorded for this environment.
@param user: username we are getting password for
@param msg: message to put out there
### Response:
def get_env_pass(self,user=None,msg=None,note=None):
"""Gets a password from the user if one is not already recorded for this environment.
@param user: username we are getting password for
@param msg: message to put out there
"""
shutit = self.shutit
shutit.handle_note(note)
user = user or self.whoami()
# cygwin does not have root
pw = ''
if self.current_environment.distro == 'cygwin':
return pw
if user not in self.current_environment.users.keys():
self.current_environment.users.update({user:None})
if not self.current_environment.users[user] and user != 'root':
msg = msg or 'Please input the sudo password for user: ' + user
pw = shutit_util.get_input(msg,ispass=True)
self.current_environment.users[user] = pw
shutit_global.shutit_global_object.secret_words_set.add(self.current_environment.users[user])
return pw |
def from_uncharted_json_file(cls, file):
""" Construct an AnalysisGraph object from a file containing INDRA
statements serialized exported by Uncharted's CauseMos webapp.
"""
with open(file, "r") as f:
_dict = json.load(f)
return cls.from_uncharted_json_serialized_dict(_dict) | Construct an AnalysisGraph object from a file containing INDRA
statements serialized exported by Uncharted's CauseMos webapp. | Below is the the instruction that describes the task:
### Input:
Construct an AnalysisGraph object from a file containing INDRA
statements serialized exported by Uncharted's CauseMos webapp.
### Response:
def from_uncharted_json_file(cls, file):
""" Construct an AnalysisGraph object from a file containing INDRA
statements serialized exported by Uncharted's CauseMos webapp.
"""
with open(file, "r") as f:
_dict = json.load(f)
return cls.from_uncharted_json_serialized_dict(_dict) |
def resize(self, sizes, interpolation="cubic"):
"""
Resize the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Resized segmentation map object.
"""
arr_resized = ia.imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_resized = np.clip(arr_resized, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_resized, shape=self.shape)
segmap.input_was = self.input_was
return segmap | Resize the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Resized segmentation map object. | Below is the the instruction that describes the task:
### Input:
Resize the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Resized segmentation map object.
### Response:
def resize(self, sizes, interpolation="cubic"):
"""
Resize the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Resized segmentation map object.
"""
arr_resized = ia.imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_resized = np.clip(arr_resized, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_resized, shape=self.shape)
segmap.input_was = self.input_was
return segmap |
def dump_config(self, file_path):
"""
Dump system and routine configurations to an rc-formatted file.
Parameters
----------
file_path : str
path to the configuration file. The user will be prompted if the
file already exists.
Returns
-------
None
"""
if os.path.isfile(file_path):
logger.debug('File {} alreay exist. Overwrite? [y/N]'.format(file_path))
choice = input('File {} alreay exist. Overwrite? [y/N]'.format(file_path)).lower()
if len(choice) == 0 or choice[0] != 'y':
logger.info('File not overwritten.')
return
conf = self.config.dump_conf()
for r in routines.__all__:
conf = self.__dict__[r.lower()].config.dump_conf(conf)
with open(file_path, 'w') as f:
conf.write(f)
logger.info('Config written to {}'.format(file_path)) | Dump system and routine configurations to an rc-formatted file.
Parameters
----------
file_path : str
path to the configuration file. The user will be prompted if the
file already exists.
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Dump system and routine configurations to an rc-formatted file.
Parameters
----------
file_path : str
path to the configuration file. The user will be prompted if the
file already exists.
Returns
-------
None
### Response:
def dump_config(self, file_path):
"""
Dump system and routine configurations to an rc-formatted file.
Parameters
----------
file_path : str
path to the configuration file. The user will be prompted if the
file already exists.
Returns
-------
None
"""
if os.path.isfile(file_path):
logger.debug('File {} alreay exist. Overwrite? [y/N]'.format(file_path))
choice = input('File {} alreay exist. Overwrite? [y/N]'.format(file_path)).lower()
if len(choice) == 0 or choice[0] != 'y':
logger.info('File not overwritten.')
return
conf = self.config.dump_conf()
for r in routines.__all__:
conf = self.__dict__[r.lower()].config.dump_conf(conf)
with open(file_path, 'w') as f:
conf.write(f)
logger.info('Config written to {}'.format(file_path)) |
def get_relationship_form(self, *args, **kwargs):
"""Pass through to provider RelationshipAdminSession.get_relationship_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'relationship_record_types' in kwargs:
return self.get_relationship_form_for_create(*args, **kwargs)
else:
return self.get_relationship_form_for_update(*args, **kwargs) | Pass through to provider RelationshipAdminSession.get_relationship_form_for_update | Below is the the instruction that describes the task:
### Input:
Pass through to provider RelationshipAdminSession.get_relationship_form_for_update
### Response:
def get_relationship_form(self, *args, **kwargs):
"""Pass through to provider RelationshipAdminSession.get_relationship_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'relationship_record_types' in kwargs:
return self.get_relationship_form_for_create(*args, **kwargs)
else:
return self.get_relationship_form_for_update(*args, **kwargs) |
def anonymous_required(view, redirect_to=None):
"""
Only allow if user is NOT authenticated.
"""
if redirect_to is None:
redirect_to = settings.LOGIN_REDIRECT_URL
@wraps(view)
def wrapper(request, *a, **k):
if request.user and request.user.is_authenticated():
return HttpResponseRedirect(redirect_to)
return view(request, *a, **k)
return wrapper | Only allow if user is NOT authenticated. | Below is the the instruction that describes the task:
### Input:
Only allow if user is NOT authenticated.
### Response:
def anonymous_required(view, redirect_to=None):
"""
Only allow if user is NOT authenticated.
"""
if redirect_to is None:
redirect_to = settings.LOGIN_REDIRECT_URL
@wraps(view)
def wrapper(request, *a, **k):
if request.user and request.user.is_authenticated():
return HttpResponseRedirect(redirect_to)
return view(request, *a, **k)
return wrapper |
def dict_to_ddb(item):
# type: (Dict[str, Any]) -> Dict[str, Any]
# TODO: narrow these types down
"""Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict
"""
serializer = TypeSerializer()
return {key: serializer.serialize(value) for key, value in item.items()} | Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict
### Response:
def dict_to_ddb(item):
# type: (Dict[str, Any]) -> Dict[str, Any]
# TODO: narrow these types down
"""Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict
"""
serializer = TypeSerializer()
return {key: serializer.serialize(value) for key, value in item.items()} |
def make_root(self, name): # noqa: D302
r"""
Make a sub-node the root node of the tree.
All nodes not belonging to the sub-tree are deleted
:param name: New root node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.make_root('root.branch1')
>>> print(tobj)
root.branch1 (*)
├leaf1
│└subleaf1 (*)
└leaf2 (*)
└subleaf2
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
if (name != self.root_name) and (self._node_in_tree(name)):
for key in [node for node in self.nodes if node.find(name) != 0]:
del self._db[key]
self._db[name]["parent"] = ""
self._root = name
self._root_hierarchy_length = len(
self.root_name.split(self._node_separator)
) | r"""
Make a sub-node the root node of the tree.
All nodes not belonging to the sub-tree are deleted
:param name: New root node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.make_root('root.branch1')
>>> print(tobj)
root.branch1 (*)
├leaf1
│└subleaf1 (*)
└leaf2 (*)
└subleaf2 | Below is the the instruction that describes the task:
### Input:
r"""
Make a sub-node the root node of the tree.
All nodes not belonging to the sub-tree are deleted
:param name: New root node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.make_root('root.branch1')
>>> print(tobj)
root.branch1 (*)
├leaf1
│└subleaf1 (*)
└leaf2 (*)
└subleaf2
### Response:
def make_root(self, name): # noqa: D302
r"""
Make a sub-node the root node of the tree.
All nodes not belonging to the sub-tree are deleted
:param name: New root node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.make_root('root.branch1')
>>> print(tobj)
root.branch1 (*)
├leaf1
│└subleaf1 (*)
└leaf2 (*)
└subleaf2
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
if (name != self.root_name) and (self._node_in_tree(name)):
for key in [node for node in self.nodes if node.find(name) != 0]:
del self._db[key]
self._db[name]["parent"] = ""
self._root = name
self._root_hierarchy_length = len(
self.root_name.split(self._node_separator)
) |
def create_object(self, api, metadata=None):
"""
Create an object using the CDSTAR API, with the file content as bitstream.
:param api:
:return:
"""
metadata = {k: v for k, v in (metadata or {}).items()}
metadata.setdefault('creator', '{0.__name__} {0.__version__}'.format(pycdstar))
metadata.setdefault('path', '%s' % self.path)
metadata.update(self.add_metadata())
bitstream_specs = [self] + self.add_bitstreams()
obj = api.get_object()
res = {}
try:
obj.metadata = metadata
for file_ in bitstream_specs:
res[file_.bitstream_type] = file_.add_as_bitstream(obj)
except: # noqa: E722
obj.delete()
raise
return obj, metadata, res | Create an object using the CDSTAR API, with the file content as bitstream.
:param api:
:return: | Below is the the instruction that describes the task:
### Input:
Create an object using the CDSTAR API, with the file content as bitstream.
:param api:
:return:
### Response:
def create_object(self, api, metadata=None):
"""
Create an object using the CDSTAR API, with the file content as bitstream.
:param api:
:return:
"""
metadata = {k: v for k, v in (metadata or {}).items()}
metadata.setdefault('creator', '{0.__name__} {0.__version__}'.format(pycdstar))
metadata.setdefault('path', '%s' % self.path)
metadata.update(self.add_metadata())
bitstream_specs = [self] + self.add_bitstreams()
obj = api.get_object()
res = {}
try:
obj.metadata = metadata
for file_ in bitstream_specs:
res[file_.bitstream_type] = file_.add_as_bitstream(obj)
except: # noqa: E722
obj.delete()
raise
return obj, metadata, res |
def text_to_bool(value: str) -> bool:
"""
Tries to convert a text value to a bool. If unsuccessful returns if value is None or not
:param value: Value to check
"""
try:
return bool(strtobool(value))
except (ValueError, AttributeError):
return value is not None | Tries to convert a text value to a bool. If unsuccessful returns if value is None or not
:param value: Value to check | Below is the the instruction that describes the task:
### Input:
Tries to convert a text value to a bool. If unsuccessful returns if value is None or not
:param value: Value to check
### Response:
def text_to_bool(value: str) -> bool:
"""
Tries to convert a text value to a bool. If unsuccessful returns if value is None or not
:param value: Value to check
"""
try:
return bool(strtobool(value))
except (ValueError, AttributeError):
return value is not None |
def S_isothermal_pipe_to_two_planes(D, Z, L=1.):
r'''Returns the Shape factor `S` of a pipe of constant outer temperature
and of outer diameter `D` which is `Z` distance from two infinite
isothermal planes of equal temperatures, parallel to each other and
enclosing the pipe. Length `L` must be provided, but can be set to
1 to obtain a dimensionless shape factor used in some sources.
.. math::
S = \frac{2\pi L}{\ln\frac{8z}{\pi D}}
Parameters
----------
D : float
Diameter of the pipe, [m]
Z : float
Distance from the middle of the pipe to either of the planes, [m]
L : float, optional
Length of the pipe, [m]
Returns
-------
S : float
Shape factor [m]
Examples
--------
>>> S_isothermal_pipe_to_two_planes(.1, 5, 1)
1.2963749299921428
Notes
-----
L should be much larger than both diameters. L should be larger than W.
.. math::
Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk}
References
----------
.. [1] Shape Factors for Heat Conduction Through Bodies with Isothermal or
Convective Boundary Conditions, J. E. Sunderland, K. R. Johnson, ASHRAE
Transactions, Vol. 70, 1964.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
return 2.*pi*L/log(8.*Z/(pi*D)) | r'''Returns the Shape factor `S` of a pipe of constant outer temperature
and of outer diameter `D` which is `Z` distance from two infinite
isothermal planes of equal temperatures, parallel to each other and
enclosing the pipe. Length `L` must be provided, but can be set to
1 to obtain a dimensionless shape factor used in some sources.
.. math::
S = \frac{2\pi L}{\ln\frac{8z}{\pi D}}
Parameters
----------
D : float
Diameter of the pipe, [m]
Z : float
Distance from the middle of the pipe to either of the planes, [m]
L : float, optional
Length of the pipe, [m]
Returns
-------
S : float
Shape factor [m]
Examples
--------
>>> S_isothermal_pipe_to_two_planes(.1, 5, 1)
1.2963749299921428
Notes
-----
L should be much larger than both diameters. L should be larger than W.
.. math::
Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk}
References
----------
.. [1] Shape Factors for Heat Conduction Through Bodies with Isothermal or
Convective Boundary Conditions, J. E. Sunderland, K. R. Johnson, ASHRAE
Transactions, Vol. 70, 1964.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011. | Below is the the instruction that describes the task:
### Input:
r'''Returns the Shape factor `S` of a pipe of constant outer temperature
and of outer diameter `D` which is `Z` distance from two infinite
isothermal planes of equal temperatures, parallel to each other and
enclosing the pipe. Length `L` must be provided, but can be set to
1 to obtain a dimensionless shape factor used in some sources.
.. math::
S = \frac{2\pi L}{\ln\frac{8z}{\pi D}}
Parameters
----------
D : float
Diameter of the pipe, [m]
Z : float
Distance from the middle of the pipe to either of the planes, [m]
L : float, optional
Length of the pipe, [m]
Returns
-------
S : float
Shape factor [m]
Examples
--------
>>> S_isothermal_pipe_to_two_planes(.1, 5, 1)
1.2963749299921428
Notes
-----
L should be much larger than both diameters. L should be larger than W.
.. math::
Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk}
References
----------
.. [1] Shape Factors for Heat Conduction Through Bodies with Isothermal or
Convective Boundary Conditions, J. E. Sunderland, K. R. Johnson, ASHRAE
Transactions, Vol. 70, 1964.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
### Response:
def S_isothermal_pipe_to_two_planes(D, Z, L=1.):
r'''Returns the Shape factor `S` of a pipe of constant outer temperature
and of outer diameter `D` which is `Z` distance from two infinite
isothermal planes of equal temperatures, parallel to each other and
enclosing the pipe. Length `L` must be provided, but can be set to
1 to obtain a dimensionless shape factor used in some sources.
.. math::
S = \frac{2\pi L}{\ln\frac{8z}{\pi D}}
Parameters
----------
D : float
Diameter of the pipe, [m]
Z : float
Distance from the middle of the pipe to either of the planes, [m]
L : float, optional
Length of the pipe, [m]
Returns
-------
S : float
Shape factor [m]
Examples
--------
>>> S_isothermal_pipe_to_two_planes(.1, 5, 1)
1.2963749299921428
Notes
-----
L should be much larger than both diameters. L should be larger than W.
.. math::
Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk}
References
----------
.. [1] Shape Factors for Heat Conduction Through Bodies with Isothermal or
Convective Boundary Conditions, J. E. Sunderland, K. R. Johnson, ASHRAE
Transactions, Vol. 70, 1964.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
return 2.*pi*L/log(8.*Z/(pi*D)) |
def get_window():
"""Get IDA's top level window."""
tform = idaapi.get_current_tform()
# Required sometimes when closing IDBs and not IDA.
if not tform:
tform = idaapi.find_tform("Output window")
widget = form_to_widget(tform)
window = widget.window()
return window | Get IDA's top level window. | Below is the the instruction that describes the task:
### Input:
Get IDA's top level window.
### Response:
def get_window():
"""Get IDA's top level window."""
tform = idaapi.get_current_tform()
# Required sometimes when closing IDBs and not IDA.
if not tform:
tform = idaapi.find_tform("Output window")
widget = form_to_widget(tform)
window = widget.window()
return window |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key._to_dict()
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value._to_dict()
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key._to_dict()
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value._to_dict()
return _dict |
def union(self, *queries):
'''Return a new :class:`Query` obtained form the union of this
:class:`Query` with one or more *queries*.
For example, lets say we want to have the union
of two queries obtained from the :meth:`filter` method::
query = session.query(MyModel)
qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo'))
'''
q = self._clone()
q.unions += queries
return q | Return a new :class:`Query` obtained form the union of this
:class:`Query` with one or more *queries*.
For example, lets say we want to have the union
of two queries obtained from the :meth:`filter` method::
query = session.query(MyModel)
qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo')) | Below is the the instruction that describes the task:
### Input:
Return a new :class:`Query` obtained form the union of this
:class:`Query` with one or more *queries*.
For example, lets say we want to have the union
of two queries obtained from the :meth:`filter` method::
query = session.query(MyModel)
qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo'))
### Response:
def union(self, *queries):
'''Return a new :class:`Query` obtained form the union of this
:class:`Query` with one or more *queries*.
For example, lets say we want to have the union
of two queries obtained from the :meth:`filter` method::
query = session.query(MyModel)
qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo'))
'''
q = self._clone()
q.unions += queries
return q |
def __get_chunk_dimensions(self):
""" Sets the chunking dimmentions depending on the file type.
"""
#Usually '.0000.' is in self.filename
if np.abs(self.header[b'foff']) < 1e-5:
logger.info('Detecting high frequency resolution data.')
chunk_dim = (1,1,1048576) #1048576 is the number of channels in a coarse channel.
return chunk_dim
#Usually '.0001.' is in self.filename
elif np.abs(self.header[b'tsamp']) < 1e-3:
logger.info('Detecting high time resolution data.')
chunk_dim = (2048,1,512) #512 is the total number of channels per single band (ie. blc00)
return chunk_dim
#Usually '.0002.' is in self.filename
elif np.abs(self.header[b'foff']) < 1e-2 and np.abs(self.header[b'foff']) >= 1e-5:
logger.info('Detecting intermediate frequency and time resolution data.')
chunk_dim = (10,1,65536) #65536 is the total number of channels per single band (ie. blc00)
# chunk_dim = (1,1,65536/4)
return chunk_dim
else:
logger.warning('File format not known. Will use minimum chunking. NOT OPTIMAL.')
chunk_dim = (1,1,512)
return chunk_dim | Sets the chunking dimmentions depending on the file type. | Below is the the instruction that describes the task:
### Input:
Sets the chunking dimmentions depending on the file type.
### Response:
def __get_chunk_dimensions(self):
""" Sets the chunking dimmentions depending on the file type.
"""
#Usually '.0000.' is in self.filename
if np.abs(self.header[b'foff']) < 1e-5:
logger.info('Detecting high frequency resolution data.')
chunk_dim = (1,1,1048576) #1048576 is the number of channels in a coarse channel.
return chunk_dim
#Usually '.0001.' is in self.filename
elif np.abs(self.header[b'tsamp']) < 1e-3:
logger.info('Detecting high time resolution data.')
chunk_dim = (2048,1,512) #512 is the total number of channels per single band (ie. blc00)
return chunk_dim
#Usually '.0002.' is in self.filename
elif np.abs(self.header[b'foff']) < 1e-2 and np.abs(self.header[b'foff']) >= 1e-5:
logger.info('Detecting intermediate frequency and time resolution data.')
chunk_dim = (10,1,65536) #65536 is the total number of channels per single band (ie. blc00)
# chunk_dim = (1,1,65536/4)
return chunk_dim
else:
logger.warning('File format not known. Will use minimum chunking. NOT OPTIMAL.')
chunk_dim = (1,1,512)
return chunk_dim |
def sign(self, message):
"""
Generates a signature for the supplied message using NTLM2 Session Security
Note: [MS-NLMP] Section 3.4.4
The message signature for NTLM with extended session security is a 16-byte value that contains the following
components, as described by the NTLMSSP_MESSAGE_SIGNATURE structure:
- A 4-byte version-number value that is set to 1
- The first eight bytes of the message's HMAC_MD5
- The 4-byte sequence number (SeqNum)
:param message: The message to be signed
:return: The signature for supplied message
"""
hmac_context = hmac.new(self.outgoing_signing_key)
hmac_context.update(struct.pack('<i', self.outgoing_sequence) + message)
# If a key exchange key is negotiated the first 8 bytes of the HMAC MD5 are encrypted with RC4
if self.key_exchange:
checksum = self.outgoing_seal.update(hmac_context.digest()[:8])
else:
checksum = hmac_context.digest()[:8]
mac = _Ntlm2MessageSignature()
mac['checksum'] = struct.unpack('<q', checksum)[0]
mac['sequence'] = self.outgoing_sequence
#logger.debug("Signing Sequence Number: %s", str(self.outgoing_sequence))
# Increment the sequence number after signing each message
self.outgoing_sequence += 1
return str(mac) | Generates a signature for the supplied message using NTLM2 Session Security
Note: [MS-NLMP] Section 3.4.4
The message signature for NTLM with extended session security is a 16-byte value that contains the following
components, as described by the NTLMSSP_MESSAGE_SIGNATURE structure:
- A 4-byte version-number value that is set to 1
- The first eight bytes of the message's HMAC_MD5
- The 4-byte sequence number (SeqNum)
:param message: The message to be signed
:return: The signature for supplied message | Below is the the instruction that describes the task:
### Input:
Generates a signature for the supplied message using NTLM2 Session Security
Note: [MS-NLMP] Section 3.4.4
The message signature for NTLM with extended session security is a 16-byte value that contains the following
components, as described by the NTLMSSP_MESSAGE_SIGNATURE structure:
- A 4-byte version-number value that is set to 1
- The first eight bytes of the message's HMAC_MD5
- The 4-byte sequence number (SeqNum)
:param message: The message to be signed
:return: The signature for supplied message
### Response:
def sign(self, message):
"""
Generates a signature for the supplied message using NTLM2 Session Security
Note: [MS-NLMP] Section 3.4.4
The message signature for NTLM with extended session security is a 16-byte value that contains the following
components, as described by the NTLMSSP_MESSAGE_SIGNATURE structure:
- A 4-byte version-number value that is set to 1
- The first eight bytes of the message's HMAC_MD5
- The 4-byte sequence number (SeqNum)
:param message: The message to be signed
:return: The signature for supplied message
"""
hmac_context = hmac.new(self.outgoing_signing_key)
hmac_context.update(struct.pack('<i', self.outgoing_sequence) + message)
# If a key exchange key is negotiated the first 8 bytes of the HMAC MD5 are encrypted with RC4
if self.key_exchange:
checksum = self.outgoing_seal.update(hmac_context.digest()[:8])
else:
checksum = hmac_context.digest()[:8]
mac = _Ntlm2MessageSignature()
mac['checksum'] = struct.unpack('<q', checksum)[0]
mac['sequence'] = self.outgoing_sequence
#logger.debug("Signing Sequence Number: %s", str(self.outgoing_sequence))
# Increment the sequence number after signing each message
self.outgoing_sequence += 1
return str(mac) |
def download(url, file_name):
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-length'])
'''
if py3:
file_size = int(u.getheader("Content-Length")[0])
else:
file_size = int(u.info().getheaders("Content-Length")[0])
'''
file_exists = False
if os.path.isfile(file_name):
local_file_size = os.path.getsize(file_name)
if local_file_size == file_size:
sha1_file = file_name + '.sha1'
if os.path.isfile(sha1_file):
print('sha1 found')
with open(sha1_file) as f:
expected_sha1 = f.read()
BLOCKSIZE = 65536
sha1 = hashlib.sha1()
with open(file_name) as f:
buff = f.read(BLOCKSIZE)
while len(buff) > 0:
sha1.update(buff)
buff = f.read(BLOCKSIZE)
if expected_sha1 == sha1:
file_exists = True
else:
print("File corrupt. Downloading again.")
os.remove(file_name)
else:
file_exists = True
else:
print("File corrupt. Downloading again.")
os.remove(file_name)
if not file_exists:
factor = int(math.floor(math.log(file_size) / math.log(1024)))
display_file_size = str(file_size / 1024 ** factor) + \
['B', 'KB', 'MB', 'GB', 'TB', 'PB'][factor]
print("Source: " + url)
print("Destination " + file_name)
print("Size: " + display_file_size)
file_size_dl = 0
block_sz = 8192
f = open(file_name, 'wb')
pbar = ProgressBar(file_size)
for chunk in r.iter_content(chunk_size=block_sz):
if not chunk:
continue
chunk_size = len(chunk)
file_size_dl += chunk_size
f.write(chunk)
pbar.update(chunk_size)
# status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
# status = status + chr(8)*(len(status)+1)
# print(status)
f.close()
else:
print("File already exists - " + file_name)
return True | if py3:
file_size = int(u.getheader("Content-Length")[0])
else:
file_size = int(u.info().getheaders("Content-Length")[0]) | Below is the the instruction that describes the task:
### Input:
if py3:
file_size = int(u.getheader("Content-Length")[0])
else:
file_size = int(u.info().getheaders("Content-Length")[0])
### Response:
def download(url, file_name):
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-length'])
'''
if py3:
file_size = int(u.getheader("Content-Length")[0])
else:
file_size = int(u.info().getheaders("Content-Length")[0])
'''
file_exists = False
if os.path.isfile(file_name):
local_file_size = os.path.getsize(file_name)
if local_file_size == file_size:
sha1_file = file_name + '.sha1'
if os.path.isfile(sha1_file):
print('sha1 found')
with open(sha1_file) as f:
expected_sha1 = f.read()
BLOCKSIZE = 65536
sha1 = hashlib.sha1()
with open(file_name) as f:
buff = f.read(BLOCKSIZE)
while len(buff) > 0:
sha1.update(buff)
buff = f.read(BLOCKSIZE)
if expected_sha1 == sha1:
file_exists = True
else:
print("File corrupt. Downloading again.")
os.remove(file_name)
else:
file_exists = True
else:
print("File corrupt. Downloading again.")
os.remove(file_name)
if not file_exists:
factor = int(math.floor(math.log(file_size) / math.log(1024)))
display_file_size = str(file_size / 1024 ** factor) + \
['B', 'KB', 'MB', 'GB', 'TB', 'PB'][factor]
print("Source: " + url)
print("Destination " + file_name)
print("Size: " + display_file_size)
file_size_dl = 0
block_sz = 8192
f = open(file_name, 'wb')
pbar = ProgressBar(file_size)
for chunk in r.iter_content(chunk_size=block_sz):
if not chunk:
continue
chunk_size = len(chunk)
file_size_dl += chunk_size
f.write(chunk)
pbar.update(chunk_size)
# status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
# status = status + chr(8)*(len(status)+1)
# print(status)
f.close()
else:
print("File already exists - " + file_name)
return True |
def clear(self, color: Tuple[int, int, int]) -> None:
"""Fill this entire Image with color.
Args:
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
"""
lib.TCOD_image_clear(self.image_c, color) | Fill this entire Image with color.
Args:
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance. | Below is the the instruction that describes the task:
### Input:
Fill this entire Image with color.
Args:
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
### Response:
def clear(self, color: Tuple[int, int, int]) -> None:
"""Fill this entire Image with color.
Args:
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
"""
lib.TCOD_image_clear(self.image_c, color) |
def add_segmented_colorbar(da, colors, direction):
"""
Add 'non-rastered' colorbar to DrawingArea
"""
nbreak = len(colors)
if direction == 'vertical':
linewidth = da.height/nbreak
verts = [None] * nbreak
x1, x2 = 0, da.width
for i, color in enumerate(colors):
y1 = i * linewidth
y2 = y1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
else:
linewidth = da.width/nbreak
verts = [None] * nbreak
y1, y2 = 0, da.height
for i, color in enumerate(colors):
x1 = i * linewidth
x2 = x1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
coll = mcoll.PolyCollection(verts,
facecolors=colors,
linewidth=0,
antialiased=False)
da.add_artist(coll) | Add 'non-rastered' colorbar to DrawingArea | Below is the the instruction that describes the task:
### Input:
Add 'non-rastered' colorbar to DrawingArea
### Response:
def add_segmented_colorbar(da, colors, direction):
"""
Add 'non-rastered' colorbar to DrawingArea
"""
nbreak = len(colors)
if direction == 'vertical':
linewidth = da.height/nbreak
verts = [None] * nbreak
x1, x2 = 0, da.width
for i, color in enumerate(colors):
y1 = i * linewidth
y2 = y1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
else:
linewidth = da.width/nbreak
verts = [None] * nbreak
y1, y2 = 0, da.height
for i, color in enumerate(colors):
x1 = i * linewidth
x2 = x1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
coll = mcoll.PolyCollection(verts,
facecolors=colors,
linewidth=0,
antialiased=False)
da.add_artist(coll) |
def move_to_collection(self, source_collection, destination_collection):
"""Move entities from source to destination collection."""
for entity in self:
entity.move_to_collection(source_collection, destination_collection) | Move entities from source to destination collection. | Below is the the instruction that describes the task:
### Input:
Move entities from source to destination collection.
### Response:
def move_to_collection(self, source_collection, destination_collection):
"""Move entities from source to destination collection."""
for entity in self:
entity.move_to_collection(source_collection, destination_collection) |
def subject_area(soup):
"""
Find the subject areas from article-categories subject tags
"""
subject_area = []
tags = raw_parser.subject_area(soup)
for tag in tags:
subject_area.append(node_text(tag))
return subject_area | Find the subject areas from article-categories subject tags | Below is the the instruction that describes the task:
### Input:
Find the subject areas from article-categories subject tags
### Response:
def subject_area(soup):
"""
Find the subject areas from article-categories subject tags
"""
subject_area = []
tags = raw_parser.subject_area(soup)
for tag in tags:
subject_area.append(node_text(tag))
return subject_area |
def roster(opts, runner=None, utils=None, whitelist=None):
'''
Returns the roster modules
'''
return LazyLoader(
_module_dirs(opts, 'roster'),
opts,
tag='roster',
whitelist=whitelist,
pack={
'__runner__': runner,
'__utils__': utils,
},
) | Returns the roster modules | Below is the the instruction that describes the task:
### Input:
Returns the roster modules
### Response:
def roster(opts, runner=None, utils=None, whitelist=None):
'''
Returns the roster modules
'''
return LazyLoader(
_module_dirs(opts, 'roster'),
opts,
tag='roster',
whitelist=whitelist,
pack={
'__runner__': runner,
'__utils__': utils,
},
) |
def from_json(data):
"""Decode event encoded as JSON by processor"""
parsed_data = json.loads(data)
trigger = TriggerInfo(
parsed_data['trigger']['class'],
parsed_data['trigger']['kind'],
)
# extract content type, needed to decode body
content_type = parsed_data['content_type']
return Event(body=Event.decode_body(parsed_data['body'], content_type),
content_type=content_type,
trigger=trigger,
fields=parsed_data.get('fields'),
headers=parsed_data.get('headers'),
_id=parsed_data['id'],
method=parsed_data['method'],
path=parsed_data['path'],
size=parsed_data['size'],
timestamp=datetime.datetime.utcfromtimestamp(parsed_data['timestamp']),
url=parsed_data['url'],
_type=parsed_data['type'],
type_version=parsed_data['type_version'],
version=parsed_data['version']) | Decode event encoded as JSON by processor | Below is the the instruction that describes the task:
### Input:
Decode event encoded as JSON by processor
### Response:
def from_json(data):
"""Decode event encoded as JSON by processor"""
parsed_data = json.loads(data)
trigger = TriggerInfo(
parsed_data['trigger']['class'],
parsed_data['trigger']['kind'],
)
# extract content type, needed to decode body
content_type = parsed_data['content_type']
return Event(body=Event.decode_body(parsed_data['body'], content_type),
content_type=content_type,
trigger=trigger,
fields=parsed_data.get('fields'),
headers=parsed_data.get('headers'),
_id=parsed_data['id'],
method=parsed_data['method'],
path=parsed_data['path'],
size=parsed_data['size'],
timestamp=datetime.datetime.utcfromtimestamp(parsed_data['timestamp']),
url=parsed_data['url'],
_type=parsed_data['type'],
type_version=parsed_data['type_version'],
version=parsed_data['version']) |
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens])
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
tf.logging.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
tf.logging.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]
if reserved_tokens:
escaped_reserved_tokens = [
_escape_token(native_to_unicode(t), self._alphabet)
for t in reserved_tokens
]
new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
self._init_subtokens_from_list(new_subtoken_strings)
tf.logging.info("vocab_size = %d" % self.vocab_size) | Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in. | Below is the the instruction that describes the task:
### Input:
Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
### Response:
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens])
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
tf.logging.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
tf.logging.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]
if reserved_tokens:
escaped_reserved_tokens = [
_escape_token(native_to_unicode(t), self._alphabet)
for t in reserved_tokens
]
new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
self._init_subtokens_from_list(new_subtoken_strings)
tf.logging.info("vocab_size = %d" % self.vocab_size) |
def add_svc_comment(self, service, author, comment):
"""Add a service comment
Format of the line that triggers function call::
ADD_SVC_COMMENT;<host_name>;<service_description>;<persistent:obsolete>;<author>;<comment>
:param service: service to add the comment
:type service: alignak.objects.service.Service
:param author: author name
:type author: str
:param comment: text comment
:type comment: str
:return: None
"""
data = {
'author': author, 'comment': comment, 'comment_type': 2, 'entry_type': 1, 'source': 1,
'expires': False, 'ref': service.uuid
}
comm = Comment(data)
service.add_comment(comm)
self.send_an_element(service.get_update_status_brok())
try:
brok = make_monitoring_log('info', "SERVICE COMMENT: %s;%s;%s;%s"
% (self.hosts[service.host].get_name(),
service.get_name(),
str(author, 'utf-8'), str(comment, 'utf-8')))
except TypeError:
brok = make_monitoring_log('info', "SERVICE COMMENT: %s;%s;%s;%s"
% (self.hosts[service.host].get_name(),
service.get_name(), author, comment))
self.send_an_element(brok)
self.send_an_element(comm.get_comment_brok(
self.hosts[service.host].get_name(), service.get_name())) | Add a service comment
Format of the line that triggers function call::
ADD_SVC_COMMENT;<host_name>;<service_description>;<persistent:obsolete>;<author>;<comment>
:param service: service to add the comment
:type service: alignak.objects.service.Service
:param author: author name
:type author: str
:param comment: text comment
:type comment: str
:return: None | Below is the the instruction that describes the task:
### Input:
Add a service comment
Format of the line that triggers function call::
ADD_SVC_COMMENT;<host_name>;<service_description>;<persistent:obsolete>;<author>;<comment>
:param service: service to add the comment
:type service: alignak.objects.service.Service
:param author: author name
:type author: str
:param comment: text comment
:type comment: str
:return: None
### Response:
def add_svc_comment(self, service, author, comment):
"""Add a service comment
Format of the line that triggers function call::
ADD_SVC_COMMENT;<host_name>;<service_description>;<persistent:obsolete>;<author>;<comment>
:param service: service to add the comment
:type service: alignak.objects.service.Service
:param author: author name
:type author: str
:param comment: text comment
:type comment: str
:return: None
"""
data = {
'author': author, 'comment': comment, 'comment_type': 2, 'entry_type': 1, 'source': 1,
'expires': False, 'ref': service.uuid
}
comm = Comment(data)
service.add_comment(comm)
self.send_an_element(service.get_update_status_brok())
try:
brok = make_monitoring_log('info', "SERVICE COMMENT: %s;%s;%s;%s"
% (self.hosts[service.host].get_name(),
service.get_name(),
str(author, 'utf-8'), str(comment, 'utf-8')))
except TypeError:
brok = make_monitoring_log('info', "SERVICE COMMENT: %s;%s;%s;%s"
% (self.hosts[service.host].get_name(),
service.get_name(), author, comment))
self.send_an_element(brok)
self.send_an_element(comm.get_comment_brok(
self.hosts[service.host].get_name(), service.get_name())) |
def load_posts(self, post_type=None, max_pages=200, status=None):
"""
Load all WordPress posts of a given post_type from a site.
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param max_pages: kill counter to avoid infinite looping
:param status: load posts with the given status,
including any of: "publish", "private", "draft", "pending", "future", and "trash", or simply "any"
Note: non public statuses require authentication
:return: None
"""
logger.info("loading posts with post_type=%s", post_type)
# clear them all out so we don't get dupes
if self.purge_first:
Post.objects.filter(site_id=self.site_id, post_type=post_type).delete()
path = "sites/{}/posts".format(self.site_id)
# type allows us to pull information about pages, attachments, guest-authors, etc.
# you know, posts that aren't posts... thank you WordPress!
if not post_type:
post_type = "post"
if not status:
status = "publish"
params = {"number": self.batch_size, "type": post_type, "status": status}
self.set_posts_param_modified_after(params, post_type, status)
# get first page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
# process all posts in the response
self.process_posts_response(response, path, params, max_pages) | Load all WordPress posts of a given post_type from a site.
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param max_pages: kill counter to avoid infinite looping
:param status: load posts with the given status,
including any of: "publish", "private", "draft", "pending", "future", and "trash", or simply "any"
Note: non public statuses require authentication
:return: None | Below is the the instruction that describes the task:
### Input:
Load all WordPress posts of a given post_type from a site.
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param max_pages: kill counter to avoid infinite looping
:param status: load posts with the given status,
including any of: "publish", "private", "draft", "pending", "future", and "trash", or simply "any"
Note: non public statuses require authentication
:return: None
### Response:
def load_posts(self, post_type=None, max_pages=200, status=None):
"""
Load all WordPress posts of a given post_type from a site.
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param max_pages: kill counter to avoid infinite looping
:param status: load posts with the given status,
including any of: "publish", "private", "draft", "pending", "future", and "trash", or simply "any"
Note: non public statuses require authentication
:return: None
"""
logger.info("loading posts with post_type=%s", post_type)
# clear them all out so we don't get dupes
if self.purge_first:
Post.objects.filter(site_id=self.site_id, post_type=post_type).delete()
path = "sites/{}/posts".format(self.site_id)
# type allows us to pull information about pages, attachments, guest-authors, etc.
# you know, posts that aren't posts... thank you WordPress!
if not post_type:
post_type = "post"
if not status:
status = "publish"
params = {"number": self.batch_size, "type": post_type, "status": status}
self.set_posts_param_modified_after(params, post_type, status)
# get first page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
# process all posts in the response
self.process_posts_response(response, path, params, max_pages) |
def apply_transforms_to_points( dim, points, transformlist,
whichtoinvert=None, verbose=False ):
"""
Apply a transform list to map a pointset from one domain to
another. In registration, one computes mappings between pairs of
domains. These transforms are often a sequence of increasingly
complex maps, e.g. from translation, to rigid, to affine to
deformation. The list of such transforms is passed to this
function to interpolate one image domain into the next image
domain, as below. The order matters strongly and the user is
advised to familiarize with the standards established in examples.
Importantly, point mapping goes the opposite direction of image
mapping, for both reasons of convention and engineering.
ANTsR function: `antsApplyTransformsToPoints`
Arguments
---------
dim: integer
dimensionality of the transformation.
points: data frame
moving point set with n-points in rows of at least dim
columns - we maintain extra information in additional
columns. this should be a data frame with columns names x, y, z, t.
transformlist : list of strings
list of transforms generated by ants.registration where each transform is a filename.
whichtoinvert : list of booleans (optional)
Must be same length as transformlist.
whichtoinvert[i] is True if transformlist[i] is a matrix,
and the matrix should be inverted. If transformlist[i] is a
warp field, whichtoinvert[i] must be False.
If the transform list is a matrix followed by a warp field,
whichtoinvert defaults to (True,False). Otherwise it defaults
to [False]*len(transformlist)).
verbose : boolean
Returns
-------
data frame of transformed points
Example
-------
>>> import ants
>>> fixed = ants.image_read( ants.get_ants_data('r16') )
>>> moving = ants.image_read( ants.get_ants_data('r27') )
>>> reg = ants.registration( fixed, moving, 'Affine' )
>>> d = {'x': [128, 127], 'y': [101, 111]}
>>> pts = pd.DataFrame(data=d)
>>> ptsw = ants.apply_transforms_to_points( 2, pts, reg['fwdtransforms'])
"""
if not isinstance(transformlist, (tuple, list)) and (transformlist is not None):
transformlist = [transformlist]
args = [dim, points, transformlist, whichtoinvert]
for tl_path in transformlist:
if not os.path.exists(tl_path):
raise Exception('Transform %s does not exist' % tl_path)
mytx = []
if whichtoinvert is None or (isinstance(whichtoinvert, (tuple,list)) and (sum([w is not None for w in whichtoinvert])==0)):
if (len(transformlist) == 2) and ('.mat' in transformlist[0]) and ('.mat' not in transformlist[1]):
whichtoinvert = (True, False)
else:
whichtoinvert = tuple([False]*len(transformlist))
if len(whichtoinvert) != len(transformlist):
raise ValueError('Transform list and inversion list must be the same length')
for i in range(len(transformlist)):
ismat = False
if '.mat' in transformlist[i]:
ismat = True
if whichtoinvert[i] and (not ismat):
raise ValueError('Cannot invert transform %i (%s) because it is not a matrix' % (i, transformlist[i]))
if whichtoinvert[i]:
mytx = mytx + ['-t', '[%s,1]' % (transformlist[i])]
else:
mytx = mytx + ['-t', transformlist[i]]
if dim == 2:
pointsSub = points[['x','y']]
if dim == 3:
pointsSub = points[['x','y','z']]
if dim == 4:
pointsSub = points[['x','y','z','t']]
pointImage = core.make_image( pointsSub.shape, pointsSub.values.flatten())
pointsOut = pointImage.clone()
args = ['-d', dim,
'-i', pointImage,
'-o', pointsOut ]
args = args + mytx
myargs = utils._int_antsProcessArguments(args)
myverb = int(verbose)
if verbose:
print(myargs)
processed_args = myargs + [ '-f', str(1), '--precision', str(0)]
libfn = utils.get_lib_fn('antsApplyTransformsToPoints')
libfn(processed_args)
mynp = pointsOut.numpy()
pointsOutDF = points.copy()
pointsOutDF['x'] = mynp[:,0]
if dim >= 2:
pointsOutDF['y'] = mynp[:,1]
if dim >= 3:
pointsOutDF['z'] = mynp[:,2]
if dim >= 4:
pointsOutDF['t'] = mynp[:,3]
return pointsOutDF | Apply a transform list to map a pointset from one domain to
another. In registration, one computes mappings between pairs of
domains. These transforms are often a sequence of increasingly
complex maps, e.g. from translation, to rigid, to affine to
deformation. The list of such transforms is passed to this
function to interpolate one image domain into the next image
domain, as below. The order matters strongly and the user is
advised to familiarize with the standards established in examples.
Importantly, point mapping goes the opposite direction of image
mapping, for both reasons of convention and engineering.
ANTsR function: `antsApplyTransformsToPoints`
Arguments
---------
dim: integer
dimensionality of the transformation.
points: data frame
moving point set with n-points in rows of at least dim
columns - we maintain extra information in additional
columns. this should be a data frame with columns names x, y, z, t.
transformlist : list of strings
list of transforms generated by ants.registration where each transform is a filename.
whichtoinvert : list of booleans (optional)
Must be same length as transformlist.
whichtoinvert[i] is True if transformlist[i] is a matrix,
and the matrix should be inverted. If transformlist[i] is a
warp field, whichtoinvert[i] must be False.
If the transform list is a matrix followed by a warp field,
whichtoinvert defaults to (True,False). Otherwise it defaults
to [False]*len(transformlist)).
verbose : boolean
Returns
-------
data frame of transformed points
Example
-------
>>> import ants
>>> fixed = ants.image_read( ants.get_ants_data('r16') )
>>> moving = ants.image_read( ants.get_ants_data('r27') )
>>> reg = ants.registration( fixed, moving, 'Affine' )
>>> d = {'x': [128, 127], 'y': [101, 111]}
>>> pts = pd.DataFrame(data=d)
>>> ptsw = ants.apply_transforms_to_points( 2, pts, reg['fwdtransforms']) | Below is the the instruction that describes the task:
### Input:
Apply a transform list to map a pointset from one domain to
another. In registration, one computes mappings between pairs of
domains. These transforms are often a sequence of increasingly
complex maps, e.g. from translation, to rigid, to affine to
deformation. The list of such transforms is passed to this
function to interpolate one image domain into the next image
domain, as below. The order matters strongly and the user is
advised to familiarize with the standards established in examples.
Importantly, point mapping goes the opposite direction of image
mapping, for both reasons of convention and engineering.
ANTsR function: `antsApplyTransformsToPoints`
Arguments
---------
dim: integer
dimensionality of the transformation.
points: data frame
moving point set with n-points in rows of at least dim
columns - we maintain extra information in additional
columns. this should be a data frame with columns names x, y, z, t.
transformlist : list of strings
list of transforms generated by ants.registration where each transform is a filename.
whichtoinvert : list of booleans (optional)
Must be same length as transformlist.
whichtoinvert[i] is True if transformlist[i] is a matrix,
and the matrix should be inverted. If transformlist[i] is a
warp field, whichtoinvert[i] must be False.
If the transform list is a matrix followed by a warp field,
whichtoinvert defaults to (True,False). Otherwise it defaults
to [False]*len(transformlist)).
verbose : boolean
Returns
-------
data frame of transformed points
Example
-------
>>> import ants
>>> fixed = ants.image_read( ants.get_ants_data('r16') )
>>> moving = ants.image_read( ants.get_ants_data('r27') )
>>> reg = ants.registration( fixed, moving, 'Affine' )
>>> d = {'x': [128, 127], 'y': [101, 111]}
>>> pts = pd.DataFrame(data=d)
>>> ptsw = ants.apply_transforms_to_points( 2, pts, reg['fwdtransforms'])
### Response:
def apply_transforms_to_points( dim, points, transformlist,
whichtoinvert=None, verbose=False ):
"""
Apply a transform list to map a pointset from one domain to
another. In registration, one computes mappings between pairs of
domains. These transforms are often a sequence of increasingly
complex maps, e.g. from translation, to rigid, to affine to
deformation. The list of such transforms is passed to this
function to interpolate one image domain into the next image
domain, as below. The order matters strongly and the user is
advised to familiarize with the standards established in examples.
Importantly, point mapping goes the opposite direction of image
mapping, for both reasons of convention and engineering.
ANTsR function: `antsApplyTransformsToPoints`
Arguments
---------
dim: integer
dimensionality of the transformation.
points: data frame
moving point set with n-points in rows of at least dim
columns - we maintain extra information in additional
columns. this should be a data frame with columns names x, y, z, t.
transformlist : list of strings
list of transforms generated by ants.registration where each transform is a filename.
whichtoinvert : list of booleans (optional)
Must be same length as transformlist.
whichtoinvert[i] is True if transformlist[i] is a matrix,
and the matrix should be inverted. If transformlist[i] is a
warp field, whichtoinvert[i] must be False.
If the transform list is a matrix followed by a warp field,
whichtoinvert defaults to (True,False). Otherwise it defaults
to [False]*len(transformlist)).
verbose : boolean
Returns
-------
data frame of transformed points
Example
-------
>>> import ants
>>> fixed = ants.image_read( ants.get_ants_data('r16') )
>>> moving = ants.image_read( ants.get_ants_data('r27') )
>>> reg = ants.registration( fixed, moving, 'Affine' )
>>> d = {'x': [128, 127], 'y': [101, 111]}
>>> pts = pd.DataFrame(data=d)
>>> ptsw = ants.apply_transforms_to_points( 2, pts, reg['fwdtransforms'])
"""
if not isinstance(transformlist, (tuple, list)) and (transformlist is not None):
transformlist = [transformlist]
args = [dim, points, transformlist, whichtoinvert]
for tl_path in transformlist:
if not os.path.exists(tl_path):
raise Exception('Transform %s does not exist' % tl_path)
mytx = []
if whichtoinvert is None or (isinstance(whichtoinvert, (tuple,list)) and (sum([w is not None for w in whichtoinvert])==0)):
if (len(transformlist) == 2) and ('.mat' in transformlist[0]) and ('.mat' not in transformlist[1]):
whichtoinvert = (True, False)
else:
whichtoinvert = tuple([False]*len(transformlist))
if len(whichtoinvert) != len(transformlist):
raise ValueError('Transform list and inversion list must be the same length')
for i in range(len(transformlist)):
ismat = False
if '.mat' in transformlist[i]:
ismat = True
if whichtoinvert[i] and (not ismat):
raise ValueError('Cannot invert transform %i (%s) because it is not a matrix' % (i, transformlist[i]))
if whichtoinvert[i]:
mytx = mytx + ['-t', '[%s,1]' % (transformlist[i])]
else:
mytx = mytx + ['-t', transformlist[i]]
if dim == 2:
pointsSub = points[['x','y']]
if dim == 3:
pointsSub = points[['x','y','z']]
if dim == 4:
pointsSub = points[['x','y','z','t']]
pointImage = core.make_image( pointsSub.shape, pointsSub.values.flatten())
pointsOut = pointImage.clone()
args = ['-d', dim,
'-i', pointImage,
'-o', pointsOut ]
args = args + mytx
myargs = utils._int_antsProcessArguments(args)
myverb = int(verbose)
if verbose:
print(myargs)
processed_args = myargs + [ '-f', str(1), '--precision', str(0)]
libfn = utils.get_lib_fn('antsApplyTransformsToPoints')
libfn(processed_args)
mynp = pointsOut.numpy()
pointsOutDF = points.copy()
pointsOutDF['x'] = mynp[:,0]
if dim >= 2:
pointsOutDF['y'] = mynp[:,1]
if dim >= 3:
pointsOutDF['z'] = mynp[:,2]
if dim >= 4:
pointsOutDF['t'] = mynp[:,3]
return pointsOutDF |
def replace_capacities(self, capacities, team_context, iteration_id):
"""ReplaceCapacities.
Replace a team's capacity
:param [TeamMemberCapacity] capacities: Team capacity to replace
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: [TeamMemberCapacity]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(capacities, '[TeamMemberCapacity]')
response = self._send(http_method='PUT',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('[TeamMemberCapacity]', self._unwrap_collection(response)) | ReplaceCapacities.
Replace a team's capacity
:param [TeamMemberCapacity] capacities: Team capacity to replace
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: [TeamMemberCapacity] | Below is the the instruction that describes the task:
### Input:
ReplaceCapacities.
Replace a team's capacity
:param [TeamMemberCapacity] capacities: Team capacity to replace
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: [TeamMemberCapacity]
### Response:
def replace_capacities(self, capacities, team_context, iteration_id):
"""ReplaceCapacities.
Replace a team's capacity
:param [TeamMemberCapacity] capacities: Team capacity to replace
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: [TeamMemberCapacity]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(capacities, '[TeamMemberCapacity]')
response = self._send(http_method='PUT',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('[TeamMemberCapacity]', self._unwrap_collection(response)) |
def remove(self, index=None, hash=None, keepSorted=True):
"""
Removes a particle from the simulation.
Parameters
----------
index : int, optional
Specify particle to remove by index.
hash : c_uint32 or string, optional
Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated).
keepSorted : bool, optional
By default, remove preserves the order of particles in the particles array.
Might set it to zero in cases with many particles and many removals to speed things up.
"""
if index is not None:
clibrebound.reb_remove(byref(self), index, keepSorted)
if hash is not None:
hash_types = c_uint32, c_uint, c_ulong
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
int_types = int,
else:
string_types = basestring,
int_types = int, long
if isinstance(hash, string_types):
clibrebound.reb_remove_by_hash(byref(self), rebhash(hash), keepSorted)
elif isinstance(hash, int_types):
clibrebound.reb_remove_by_hash(byref(self), c_uint32(hash), keepSorted)
elif isinstance(hash, hash_types):
clibrebound.reb_remove_by_hash(byref(self), hash, keepSorted)
if hasattr(self, '_widgets'):
self._display_heartbeat(pointer(self))
self.process_messages() | Removes a particle from the simulation.
Parameters
----------
index : int, optional
Specify particle to remove by index.
hash : c_uint32 or string, optional
Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated).
keepSorted : bool, optional
By default, remove preserves the order of particles in the particles array.
Might set it to zero in cases with many particles and many removals to speed things up. | Below is the the instruction that describes the task:
### Input:
Removes a particle from the simulation.
Parameters
----------
index : int, optional
Specify particle to remove by index.
hash : c_uint32 or string, optional
Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated).
keepSorted : bool, optional
By default, remove preserves the order of particles in the particles array.
Might set it to zero in cases with many particles and many removals to speed things up.
### Response:
def remove(self, index=None, hash=None, keepSorted=True):
"""
Removes a particle from the simulation.
Parameters
----------
index : int, optional
Specify particle to remove by index.
hash : c_uint32 or string, optional
Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated).
keepSorted : bool, optional
By default, remove preserves the order of particles in the particles array.
Might set it to zero in cases with many particles and many removals to speed things up.
"""
if index is not None:
clibrebound.reb_remove(byref(self), index, keepSorted)
if hash is not None:
hash_types = c_uint32, c_uint, c_ulong
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
int_types = int,
else:
string_types = basestring,
int_types = int, long
if isinstance(hash, string_types):
clibrebound.reb_remove_by_hash(byref(self), rebhash(hash), keepSorted)
elif isinstance(hash, int_types):
clibrebound.reb_remove_by_hash(byref(self), c_uint32(hash), keepSorted)
elif isinstance(hash, hash_types):
clibrebound.reb_remove_by_hash(byref(self), hash, keepSorted)
if hasattr(self, '_widgets'):
self._display_heartbeat(pointer(self))
self.process_messages() |
def list_files(directory):
'''
Return a list of all files found under directory (and its subdirectories)
'''
ret = set()
ret.add(directory)
for root, dirs, files in safe_walk(directory):
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return list(ret) | Return a list of all files found under directory (and its subdirectories) | Below is the the instruction that describes the task:
### Input:
Return a list of all files found under directory (and its subdirectories)
### Response:
def list_files(directory):
'''
Return a list of all files found under directory (and its subdirectories)
'''
ret = set()
ret.add(directory)
for root, dirs, files in safe_walk(directory):
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return list(ret) |
def exception_handler(self, ex): # pylint: disable=no-self-use
""" The default exception handler """
if isinstance(ex, CLIError):
logger.error(ex)
else:
logger.exception(ex)
return 1 | The default exception handler | Below is the the instruction that describes the task:
### Input:
The default exception handler
### Response:
def exception_handler(self, ex): # pylint: disable=no-self-use
""" The default exception handler """
if isinstance(ex, CLIError):
logger.error(ex)
else:
logger.exception(ex)
return 1 |
def loads(self, schema_txt: str) -> ShExJ.Schema:
""" Parse and return schema as a ShExJ Schema
:param schema_txt: ShExC or ShExJ representation of a ShEx Schema
:return: ShEx Schema representation of schema
"""
self.schema_text = schema_txt
if schema_txt.strip()[0] == '{':
# TODO: figure out how to propagate self.base_location into this parse
return cast(ShExJ.Schema, loads(schema_txt, ShExJ))
else:
return generate_shexj.parse(schema_txt, self.base_location) | Parse and return schema as a ShExJ Schema
:param schema_txt: ShExC or ShExJ representation of a ShEx Schema
:return: ShEx Schema representation of schema | Below is the the instruction that describes the task:
### Input:
Parse and return schema as a ShExJ Schema
:param schema_txt: ShExC or ShExJ representation of a ShEx Schema
:return: ShEx Schema representation of schema
### Response:
def loads(self, schema_txt: str) -> ShExJ.Schema:
""" Parse and return schema as a ShExJ Schema
:param schema_txt: ShExC or ShExJ representation of a ShEx Schema
:return: ShEx Schema representation of schema
"""
self.schema_text = schema_txt
if schema_txt.strip()[0] == '{':
# TODO: figure out how to propagate self.base_location into this parse
return cast(ShExJ.Schema, loads(schema_txt, ShExJ))
else:
return generate_shexj.parse(schema_txt, self.base_location) |
def apply_custom_filter(self, filter_func, to_ngrams=False):
"""
Apply a custom filter function `filter_func` to all tokens or ngrams (if `to_ngrams` is True).
`filter_func` must accept a single parameter: a dictionary of structure `{<doc_label>: <tokens list>}`. It
must return a dictionary with the same structure.
This function can only be run on a single process, hence it could be slow for large corpora.
"""
# Because it is not possible to send a function to the workers, all tokens must be fetched from the workers
# first and then the custom function is called and run in a single process (the main process). After that, the
# filtered tokens are send back to the worker processes.
if not callable(filter_func):
raise ValueError('`filter_func` must be callable')
self._require_tokens()
if to_ngrams:
self._require_ngrams()
get_task = 'get_ngrams_with_worker_id'
set_task = 'set_ngrams'
set_task_param = 'ngrams'
self._invalidate_workers_ngrams()
else:
get_task = 'get_tokens_with_worker_id'
set_task = 'set_tokens'
set_task_param = 'tokens'
self._invalidate_workers_tokens()
self._send_task_to_workers(get_task)
docs_of_workers = {}
for _ in range(self.n_workers):
pair = self.results_queue.get()
docs_of_workers[pair[0]] = pair[1]
assert len(docs_of_workers) == self.n_workers
tok = {}
for docs in docs_of_workers.values():
tok.update(docs)
logger.info('applying custom filter function to tokens')
new_tok = filter_func(tok)
require_dictlike(new_tok)
if set(new_tok.keys()) != set(tok.keys()):
raise ValueError('the document labels and number of documents must stay unchanged during custom filtering')
logger.debug('sending task `%s` to all workers' % set_task)
for w_id, docs in docs_of_workers.items():
new_w_docs = {dl: new_tok.pop(dl) for dl in docs}
self.tasks_queues[w_id].put((set_task, {set_task_param: new_w_docs}))
[q.join() for q in self.tasks_queues]
return self | Apply a custom filter function `filter_func` to all tokens or ngrams (if `to_ngrams` is True).
`filter_func` must accept a single parameter: a dictionary of structure `{<doc_label>: <tokens list>}`. It
must return a dictionary with the same structure.
This function can only be run on a single process, hence it could be slow for large corpora. | Below is the the instruction that describes the task:
### Input:
Apply a custom filter function `filter_func` to all tokens or ngrams (if `to_ngrams` is True).
`filter_func` must accept a single parameter: a dictionary of structure `{<doc_label>: <tokens list>}`. It
must return a dictionary with the same structure.
This function can only be run on a single process, hence it could be slow for large corpora.
### Response:
def apply_custom_filter(self, filter_func, to_ngrams=False):
"""
Apply a custom filter function `filter_func` to all tokens or ngrams (if `to_ngrams` is True).
`filter_func` must accept a single parameter: a dictionary of structure `{<doc_label>: <tokens list>}`. It
must return a dictionary with the same structure.
This function can only be run on a single process, hence it could be slow for large corpora.
"""
# Because it is not possible to send a function to the workers, all tokens must be fetched from the workers
# first and then the custom function is called and run in a single process (the main process). After that, the
# filtered tokens are send back to the worker processes.
if not callable(filter_func):
raise ValueError('`filter_func` must be callable')
self._require_tokens()
if to_ngrams:
self._require_ngrams()
get_task = 'get_ngrams_with_worker_id'
set_task = 'set_ngrams'
set_task_param = 'ngrams'
self._invalidate_workers_ngrams()
else:
get_task = 'get_tokens_with_worker_id'
set_task = 'set_tokens'
set_task_param = 'tokens'
self._invalidate_workers_tokens()
self._send_task_to_workers(get_task)
docs_of_workers = {}
for _ in range(self.n_workers):
pair = self.results_queue.get()
docs_of_workers[pair[0]] = pair[1]
assert len(docs_of_workers) == self.n_workers
tok = {}
for docs in docs_of_workers.values():
tok.update(docs)
logger.info('applying custom filter function to tokens')
new_tok = filter_func(tok)
require_dictlike(new_tok)
if set(new_tok.keys()) != set(tok.keys()):
raise ValueError('the document labels and number of documents must stay unchanged during custom filtering')
logger.debug('sending task `%s` to all workers' % set_task)
for w_id, docs in docs_of_workers.items():
new_w_docs = {dl: new_tok.pop(dl) for dl in docs}
self.tasks_queues[w_id].put((set_task, {set_task_param: new_w_docs}))
[q.join() for q in self.tasks_queues]
return self |
def get_property_example(cls, property_, nested=None, **kw):
""" Get example for property
:param dict property_:
:param set nested:
:return: example value
"""
paths = kw.get('paths', [])
name = kw.get('name', '')
result = None
if name and paths:
paths = list(map(lambda path: '.'.join((path, name)), paths))
result, path = cls._get_custom_example(paths)
if result is not None and property_['type'] in PRIMITIVE_TYPES:
cls._example_validate(
path, result, property_['type'], property_['type_format'])
return result
if SchemaObjects.contains(property_['type']):
schema = SchemaObjects.get(property_['type'])
if result is not None:
if schema.is_array:
if not isinstance(result, list):
result = [result] * cls.EXAMPLE_ARRAY_ITEMS_COUNT
else:
if isinstance(result, list):
cls.logger.warning(
'Example type mismatch in path {}'.format(schema.ref_path))
else:
result = cls.get_example_by_schema(schema, **kw)
if (not result) and schema.nested_schemas:
for _schema_id in schema.nested_schemas:
_schema = SchemaObjects.get(_schema_id)
if _schema:
if isinstance(_schema, SchemaMapWrapper):
result[_schema.name] = cls.get_example_by_schema(_schema, **kw)
elif _schema.nested_schemas:
for _schema__id in _schema.nested_schemas:
_schema_ = SchemaObjects.get(_schema__id)
if isinstance(_schema_, SchemaMapWrapper):
result[_schema.name] = cls.get_example_by_schema(_schema_, **kw)
else:
result = cls.get_example_value_for_primitive_type(
property_['type'],
property_['type_properties'],
property_['type_format'],
**kw
)
return result | Get example for property
:param dict property_:
:param set nested:
:return: example value | Below is the the instruction that describes the task:
### Input:
Get example for property
:param dict property_:
:param set nested:
:return: example value
### Response:
def get_property_example(cls, property_, nested=None, **kw):
""" Get example for property
:param dict property_:
:param set nested:
:return: example value
"""
paths = kw.get('paths', [])
name = kw.get('name', '')
result = None
if name and paths:
paths = list(map(lambda path: '.'.join((path, name)), paths))
result, path = cls._get_custom_example(paths)
if result is not None and property_['type'] in PRIMITIVE_TYPES:
cls._example_validate(
path, result, property_['type'], property_['type_format'])
return result
if SchemaObjects.contains(property_['type']):
schema = SchemaObjects.get(property_['type'])
if result is not None:
if schema.is_array:
if not isinstance(result, list):
result = [result] * cls.EXAMPLE_ARRAY_ITEMS_COUNT
else:
if isinstance(result, list):
cls.logger.warning(
'Example type mismatch in path {}'.format(schema.ref_path))
else:
result = cls.get_example_by_schema(schema, **kw)
if (not result) and schema.nested_schemas:
for _schema_id in schema.nested_schemas:
_schema = SchemaObjects.get(_schema_id)
if _schema:
if isinstance(_schema, SchemaMapWrapper):
result[_schema.name] = cls.get_example_by_schema(_schema, **kw)
elif _schema.nested_schemas:
for _schema__id in _schema.nested_schemas:
_schema_ = SchemaObjects.get(_schema__id)
if isinstance(_schema_, SchemaMapWrapper):
result[_schema.name] = cls.get_example_by_schema(_schema_, **kw)
else:
result = cls.get_example_value_for_primitive_type(
property_['type'],
property_['type_properties'],
property_['type_format'],
**kw
)
return result |
def reverse_cipher(message):
"""
反转加密法
:param message: 待加密字符串
:return: 被加密字符串
"""
translated = ''
i = len(message) - 1
while i >= 0:
translated = translated + message[i]
i = i - 1
return translated | 反转加密法
:param message: 待加密字符串
:return: 被加密字符串 | Below is the the instruction that describes the task:
### Input:
反转加密法
:param message: 待加密字符串
:return: 被加密字符串
### Response:
def reverse_cipher(message):
"""
反转加密法
:param message: 待加密字符串
:return: 被加密字符串
"""
translated = ''
i = len(message) - 1
while i >= 0:
translated = translated + message[i]
i = i - 1
return translated |
def __put_year_col_first(d):
"""
Always write year column first. Reorder dictionary so that year is first
:param dict d: data
:return dict: Reordered data
"""
if "year" in d:
D = OrderedDict()
# store the year column first
D["year"] = d["year"]
for k,v in d.items():
if k != "year":
# store the other columns
D[k] = v
return D
else:
# year is not found, return data as-is
return d | Always write year column first. Reorder dictionary so that year is first
:param dict d: data
:return dict: Reordered data | Below is the the instruction that describes the task:
### Input:
Always write year column first. Reorder dictionary so that year is first
:param dict d: data
:return dict: Reordered data
### Response:
def __put_year_col_first(d):
"""
Always write year column first. Reorder dictionary so that year is first
:param dict d: data
:return dict: Reordered data
"""
if "year" in d:
D = OrderedDict()
# store the year column first
D["year"] = d["year"]
for k,v in d.items():
if k != "year":
# store the other columns
D[k] = v
return D
else:
# year is not found, return data as-is
return d |
def _name_messages_complete(self):
"""
Check if all name messages have been received
"""
for channel in range(1, self.number_of_channels() + 1):
try:
for name_index in range(1, 4):
if not isinstance(self._name_data[channel][name_index], str):
return False
except Exception:
return False
return True | Check if all name messages have been received | Below is the the instruction that describes the task:
### Input:
Check if all name messages have been received
### Response:
def _name_messages_complete(self):
"""
Check if all name messages have been received
"""
for channel in range(1, self.number_of_channels() + 1):
try:
for name_index in range(1, 4):
if not isinstance(self._name_data[channel][name_index], str):
return False
except Exception:
return False
return True |
def set_note_footer(data, trigger):
"""
handle the footer of the note
"""
footer = ''
if data.get('link'):
provided_by = _('Provided by')
provided_from = _('from')
footer_from = "<br/><br/>{} <em>{}</em> {} <a href='{}'>{}</a>"
footer = footer_from.format(
provided_by, trigger.trigger.description, provided_from,
data.get('link'), data.get('link'))
return footer | handle the footer of the note | Below is the the instruction that describes the task:
### Input:
handle the footer of the note
### Response:
def set_note_footer(data, trigger):
"""
handle the footer of the note
"""
footer = ''
if data.get('link'):
provided_by = _('Provided by')
provided_from = _('from')
footer_from = "<br/><br/>{} <em>{}</em> {} <a href='{}'>{}</a>"
footer = footer_from.format(
provided_by, trigger.trigger.description, provided_from,
data.get('link'), data.get('link'))
return footer |
def poll(self):
"""Start the poll process by invoking the get_stats method of the
consumers. If we hit this after another interval without fully
processing, note it with a warning.
"""
self.set_state(self.STATE_ACTIVE)
# If we don't have any active consumers, spawn new ones
if not self.total_process_count:
LOGGER.debug('Did not find any active consumers in poll')
return self.check_process_counts()
# Start our data collection dict
self.poll_data = {'timestamp': time.time(), 'processes': list()}
# Iterate through all of the consumers
for proc in list(self.active_processes()):
if proc == multiprocessing.current_process():
continue
# Send the profile signal
os.kill(int(proc.pid), signal.SIGPROF)
self.poll_data['processes'].append(proc.name)
# Check if we need to start more processes
self.check_process_counts() | Start the poll process by invoking the get_stats method of the
consumers. If we hit this after another interval without fully
processing, note it with a warning. | Below is the the instruction that describes the task:
### Input:
Start the poll process by invoking the get_stats method of the
consumers. If we hit this after another interval without fully
processing, note it with a warning.
### Response:
def poll(self):
"""Start the poll process by invoking the get_stats method of the
consumers. If we hit this after another interval without fully
processing, note it with a warning.
"""
self.set_state(self.STATE_ACTIVE)
# If we don't have any active consumers, spawn new ones
if not self.total_process_count:
LOGGER.debug('Did not find any active consumers in poll')
return self.check_process_counts()
# Start our data collection dict
self.poll_data = {'timestamp': time.time(), 'processes': list()}
# Iterate through all of the consumers
for proc in list(self.active_processes()):
if proc == multiprocessing.current_process():
continue
# Send the profile signal
os.kill(int(proc.pid), signal.SIGPROF)
self.poll_data['processes'].append(proc.name)
# Check if we need to start more processes
self.check_process_counts() |
def permutations(iterable, r=None):
"""permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)"""
pool = tuple(iterable)
n = len(pool)
if r is None:
r = n
indices = list(range(n))
cycles = list(range(n - r + 1, n + 1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i + 1:] + indices[i:i + 1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return | permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1) | Below is the the instruction that describes the task:
### Input:
permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)
### Response:
def permutations(iterable, r=None):
"""permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)"""
pool = tuple(iterable)
n = len(pool)
if r is None:
r = n
indices = list(range(n))
cycles = list(range(n - r + 1, n + 1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i + 1:] + indices[i:i + 1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return |
def operator(self):
"""Get a ``LinearOperator`` corresponding to apply().
:return: a LinearOperator that calls apply().
"""
# is projection the zero operator?
if self.V.shape[1] == 0:
N = self.V.shape[0]
return ZeroLinearOperator((N, N))
return self._get_operator(self.apply, self.apply_adj) | Get a ``LinearOperator`` corresponding to apply().
:return: a LinearOperator that calls apply(). | Below is the the instruction that describes the task:
### Input:
Get a ``LinearOperator`` corresponding to apply().
:return: a LinearOperator that calls apply().
### Response:
def operator(self):
"""Get a ``LinearOperator`` corresponding to apply().
:return: a LinearOperator that calls apply().
"""
# is projection the zero operator?
if self.V.shape[1] == 0:
N = self.V.shape[0]
return ZeroLinearOperator((N, N))
return self._get_operator(self.apply, self.apply_adj) |
def bmpm(
word,
language_arg=0,
name_mode='gen',
match_mode='approx',
concat=False,
filter_langs=False,
):
"""Return the Beider-Morse Phonetic Matching encoding(s) of a term.
This is a wrapper for :py:meth:`BeiderMorse.encode`.
Parameters
----------
word : str
The word to transform
language_arg : str
The language of the term; supported values include:
- ``any``
- ``arabic``
- ``cyrillic``
- ``czech``
- ``dutch``
- ``english``
- ``french``
- ``german``
- ``greek``
- ``greeklatin``
- ``hebrew``
- ``hungarian``
- ``italian``
- ``latvian``
- ``polish``
- ``portuguese``
- ``romanian``
- ``russian``
- ``spanish``
- ``turkish``
name_mode : str
The name mode of the algorithm:
- ``gen`` -- general (default)
- ``ash`` -- Ashkenazi
- ``sep`` -- Sephardic
match_mode : str
Matching mode: ``approx`` or ``exact``
concat : bool
Concatenation mode
filter_langs : bool
Filter out incompatible languages
Returns
-------
tuple
The Beider-Morse phonetic value(s)
Examples
--------
>>> bmpm('Christopher')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir xristopi xritopir xritopi xristofi xritofir xritofi
tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir
zristofi zritofir zritofi'
>>> bmpm('Niall')
'nial niol'
>>> bmpm('Smith')
'zmit'
>>> bmpm('Schmidt')
'zmit stzmit'
>>> bmpm('Christopher', language_arg='German')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='English')
'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir
xristafir xrQstafir'
>>> bmpm('Christopher', language_arg='German', name_mode='ash')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='German', match_mode='exact')
'xriStopher xriStofer xristopher xristofer'
"""
return BeiderMorse().encode(
word, language_arg, name_mode, match_mode, concat, filter_langs
) | Return the Beider-Morse Phonetic Matching encoding(s) of a term.
This is a wrapper for :py:meth:`BeiderMorse.encode`.
Parameters
----------
word : str
The word to transform
language_arg : str
The language of the term; supported values include:
- ``any``
- ``arabic``
- ``cyrillic``
- ``czech``
- ``dutch``
- ``english``
- ``french``
- ``german``
- ``greek``
- ``greeklatin``
- ``hebrew``
- ``hungarian``
- ``italian``
- ``latvian``
- ``polish``
- ``portuguese``
- ``romanian``
- ``russian``
- ``spanish``
- ``turkish``
name_mode : str
The name mode of the algorithm:
- ``gen`` -- general (default)
- ``ash`` -- Ashkenazi
- ``sep`` -- Sephardic
match_mode : str
Matching mode: ``approx`` or ``exact``
concat : bool
Concatenation mode
filter_langs : bool
Filter out incompatible languages
Returns
-------
tuple
The Beider-Morse phonetic value(s)
Examples
--------
>>> bmpm('Christopher')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir xristopi xritopir xritopi xristofi xritofir xritofi
tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir
zristofi zritofir zritofi'
>>> bmpm('Niall')
'nial niol'
>>> bmpm('Smith')
'zmit'
>>> bmpm('Schmidt')
'zmit stzmit'
>>> bmpm('Christopher', language_arg='German')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='English')
'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir
xristafir xrQstafir'
>>> bmpm('Christopher', language_arg='German', name_mode='ash')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='German', match_mode='exact')
'xriStopher xriStofer xristopher xristofer' | Below is the the instruction that describes the task:
### Input:
Return the Beider-Morse Phonetic Matching encoding(s) of a term.
This is a wrapper for :py:meth:`BeiderMorse.encode`.
Parameters
----------
word : str
The word to transform
language_arg : str
The language of the term; supported values include:
- ``any``
- ``arabic``
- ``cyrillic``
- ``czech``
- ``dutch``
- ``english``
- ``french``
- ``german``
- ``greek``
- ``greeklatin``
- ``hebrew``
- ``hungarian``
- ``italian``
- ``latvian``
- ``polish``
- ``portuguese``
- ``romanian``
- ``russian``
- ``spanish``
- ``turkish``
name_mode : str
The name mode of the algorithm:
- ``gen`` -- general (default)
- ``ash`` -- Ashkenazi
- ``sep`` -- Sephardic
match_mode : str
Matching mode: ``approx`` or ``exact``
concat : bool
Concatenation mode
filter_langs : bool
Filter out incompatible languages
Returns
-------
tuple
The Beider-Morse phonetic value(s)
Examples
--------
>>> bmpm('Christopher')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir xristopi xritopir xritopi xristofi xritofir xritofi
tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir
zristofi zritofir zritofi'
>>> bmpm('Niall')
'nial niol'
>>> bmpm('Smith')
'zmit'
>>> bmpm('Schmidt')
'zmit stzmit'
>>> bmpm('Christopher', language_arg='German')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='English')
'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir
xristafir xrQstafir'
>>> bmpm('Christopher', language_arg='German', name_mode='ash')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='German', match_mode='exact')
'xriStopher xriStofer xristopher xristofer'
### Response:
def bmpm(
word,
language_arg=0,
name_mode='gen',
match_mode='approx',
concat=False,
filter_langs=False,
):
"""Return the Beider-Morse Phonetic Matching encoding(s) of a term.
This is a wrapper for :py:meth:`BeiderMorse.encode`.
Parameters
----------
word : str
The word to transform
language_arg : str
The language of the term; supported values include:
- ``any``
- ``arabic``
- ``cyrillic``
- ``czech``
- ``dutch``
- ``english``
- ``french``
- ``german``
- ``greek``
- ``greeklatin``
- ``hebrew``
- ``hungarian``
- ``italian``
- ``latvian``
- ``polish``
- ``portuguese``
- ``romanian``
- ``russian``
- ``spanish``
- ``turkish``
name_mode : str
The name mode of the algorithm:
- ``gen`` -- general (default)
- ``ash`` -- Ashkenazi
- ``sep`` -- Sephardic
match_mode : str
Matching mode: ``approx`` or ``exact``
concat : bool
Concatenation mode
filter_langs : bool
Filter out incompatible languages
Returns
-------
tuple
The Beider-Morse phonetic value(s)
Examples
--------
>>> bmpm('Christopher')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir xristopi xritopir xritopi xristofi xritofir xritofi
tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir
zristofi zritofir zritofi'
>>> bmpm('Niall')
'nial niol'
>>> bmpm('Smith')
'zmit'
>>> bmpm('Schmidt')
'zmit stzmit'
>>> bmpm('Christopher', language_arg='German')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='English')
'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir
xristafir xrQstafir'
>>> bmpm('Christopher', language_arg='German', name_mode='ash')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='German', match_mode='exact')
'xriStopher xriStofer xristopher xristofer'
"""
return BeiderMorse().encode(
word, language_arg, name_mode, match_mode, concat, filter_langs
) |
def receive_accepted(self, msg):
'''
Called when an Accepted message is received from an acceptor. Once the final value
is determined, the return value of this method will be a Resolution message containing
the consentual value. Subsequent calls after the resolution is chosen will continue to add
new Acceptors to the final_acceptors set and return Resolution messages.
'''
if self.final_value is not None:
if msg.proposal_id >= self.final_proposal_id and msg.proposal_value == self.final_value:
self.final_acceptors.add(msg.from_uid)
return Resolution(self.network_uid, self.final_value)
last_pn = self.acceptors.get(msg.from_uid)
if last_pn is not None and msg.proposal_id <= last_pn:
return # Old message
self.acceptors[msg.from_uid] = msg.proposal_id
if last_pn is not None:
# String proposal_key, need string keys for JSON.
proposal_key = str(last_pn)
ps = self.proposals[proposal_key]
ps.retain_count -= 1
ps.acceptors.remove(msg.from_uid)
if ps.retain_count == 0:
del self.proposals[proposal_key]
# String proposal_key, need string keys for JSON.
proposal_key = str(msg.proposal_id)
if not proposal_key in self.proposals:
self.proposals[proposal_key] = ProposalStatus(msg.proposal_value)
ps = self.proposals[proposal_key]
assert msg.proposal_value == ps.value, 'Value mismatch for single proposal!'
ps.accept_count += 1
ps.retain_count += 1
ps.acceptors.add(msg.from_uid)
if ps.accept_count == self.quorum_size:
self.final_proposal_id = msg.proposal_id
self.final_value = msg.proposal_value
self.final_acceptors = ps.acceptors
self.proposals = None
self.acceptors = None
return Resolution(self.network_uid, self.final_value) | Called when an Accepted message is received from an acceptor. Once the final value
is determined, the return value of this method will be a Resolution message containing
the consentual value. Subsequent calls after the resolution is chosen will continue to add
new Acceptors to the final_acceptors set and return Resolution messages. | Below is the the instruction that describes the task:
### Input:
Called when an Accepted message is received from an acceptor. Once the final value
is determined, the return value of this method will be a Resolution message containing
the consentual value. Subsequent calls after the resolution is chosen will continue to add
new Acceptors to the final_acceptors set and return Resolution messages.
### Response:
def receive_accepted(self, msg):
'''
Called when an Accepted message is received from an acceptor. Once the final value
is determined, the return value of this method will be a Resolution message containing
the consentual value. Subsequent calls after the resolution is chosen will continue to add
new Acceptors to the final_acceptors set and return Resolution messages.
'''
if self.final_value is not None:
if msg.proposal_id >= self.final_proposal_id and msg.proposal_value == self.final_value:
self.final_acceptors.add(msg.from_uid)
return Resolution(self.network_uid, self.final_value)
last_pn = self.acceptors.get(msg.from_uid)
if last_pn is not None and msg.proposal_id <= last_pn:
return # Old message
self.acceptors[msg.from_uid] = msg.proposal_id
if last_pn is not None:
# String proposal_key, need string keys for JSON.
proposal_key = str(last_pn)
ps = self.proposals[proposal_key]
ps.retain_count -= 1
ps.acceptors.remove(msg.from_uid)
if ps.retain_count == 0:
del self.proposals[proposal_key]
# String proposal_key, need string keys for JSON.
proposal_key = str(msg.proposal_id)
if not proposal_key in self.proposals:
self.proposals[proposal_key] = ProposalStatus(msg.proposal_value)
ps = self.proposals[proposal_key]
assert msg.proposal_value == ps.value, 'Value mismatch for single proposal!'
ps.accept_count += 1
ps.retain_count += 1
ps.acceptors.add(msg.from_uid)
if ps.accept_count == self.quorum_size:
self.final_proposal_id = msg.proposal_id
self.final_value = msg.proposal_value
self.final_acceptors = ps.acceptors
self.proposals = None
self.acceptors = None
return Resolution(self.network_uid, self.final_value) |
def _flush_notifications(self):
"""Flush notifications of engine registrations waiting
in ZMQ queue."""
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._notification_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s"%msg.msg_type)
else:
handler(msg)
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK) | Flush notifications of engine registrations waiting
in ZMQ queue. | Below is the the instruction that describes the task:
### Input:
Flush notifications of engine registrations waiting
in ZMQ queue.
### Response:
def _flush_notifications(self):
"""Flush notifications of engine registrations waiting
in ZMQ queue."""
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._notification_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s"%msg.msg_type)
else:
handler(msg)
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK) |
def failed_hosts(self):
"""Hosts that failed during the execution of the task."""
return {h: r for h, r in self.items() if r.failed} | Hosts that failed during the execution of the task. | Below is the the instruction that describes the task:
### Input:
Hosts that failed during the execution of the task.
### Response:
def failed_hosts(self):
"""Hosts that failed during the execution of the task."""
return {h: r for h, r in self.items() if r.failed} |
def _jdn(self):
"""Return the Julian date number for the given date."""
if self._last_updated == "gdate":
return conv.gdate_to_jdn(self.gdate)
return conv.hdate_to_jdn(self.hdate) | Return the Julian date number for the given date. | Below is the the instruction that describes the task:
### Input:
Return the Julian date number for the given date.
### Response:
def _jdn(self):
"""Return the Julian date number for the given date."""
if self._last_updated == "gdate":
return conv.gdate_to_jdn(self.gdate)
return conv.hdate_to_jdn(self.hdate) |
def _check_environ(variable, value):
"""check if a variable is present in the environmental variables"""
if is_not_none(value):
return value
else:
value = os.environ.get(variable)
if is_none(value):
stop(''.join([variable,
""" not supplied and no entry in environmental
variables"""]))
else:
return value | check if a variable is present in the environmental variables | Below is the the instruction that describes the task:
### Input:
check if a variable is present in the environmental variables
### Response:
def _check_environ(variable, value):
"""check if a variable is present in the environmental variables"""
if is_not_none(value):
return value
else:
value = os.environ.get(variable)
if is_none(value):
stop(''.join([variable,
""" not supplied and no entry in environmental
variables"""]))
else:
return value |
def _ensure_ifaces_tuple(ifaces):
"""Convert to a tuple of interfaces and raise if not interfaces."""
try:
ifaces = tuple(ifaces)
except TypeError:
ifaces = (ifaces,)
for iface in ifaces:
if not _issubclass(iface, ibc.Iface):
raise TypeError('Can only compare against interfaces.')
return ifaces | Convert to a tuple of interfaces and raise if not interfaces. | Below is the the instruction that describes the task:
### Input:
Convert to a tuple of interfaces and raise if not interfaces.
### Response:
def _ensure_ifaces_tuple(ifaces):
"""Convert to a tuple of interfaces and raise if not interfaces."""
try:
ifaces = tuple(ifaces)
except TypeError:
ifaces = (ifaces,)
for iface in ifaces:
if not _issubclass(iface, ibc.Iface):
raise TypeError('Can only compare against interfaces.')
return ifaces |
def logout(self):
"""
Log currently authenticated user out, invalidating any existing tokens.
"""
# Remove token from local cache
# MAINT: need to expire token on server
data = self._read_uaa_cache()
if self.uri in data:
for client in data[self.uri]:
if client['id'] == self.client['id']:
data[self.uri].remove(client)
with open(self._cache_path, 'w') as output:
output.write(json.dumps(data, sort_keys=True, indent=4)) | Log currently authenticated user out, invalidating any existing tokens. | Below is the the instruction that describes the task:
### Input:
Log currently authenticated user out, invalidating any existing tokens.
### Response:
def logout(self):
"""
Log currently authenticated user out, invalidating any existing tokens.
"""
# Remove token from local cache
# MAINT: need to expire token on server
data = self._read_uaa_cache()
if self.uri in data:
for client in data[self.uri]:
if client['id'] == self.client['id']:
data[self.uri].remove(client)
with open(self._cache_path, 'w') as output:
output.write(json.dumps(data, sort_keys=True, indent=4)) |
def is_bbox_not_intersecting(self, other):
"""Returns False iif bounding boxed of self and other intersect"""
self_x_min, self_x_max, self_y_min, self_y_max = self.get_bbox()
other_x_min, other_x_max, other_y_min, other_y_max = other.get_bbox()
return \
self_x_min > other_x_max or \
other_x_min > self_x_max or \
self_y_min > other_y_max or \
other_y_min > self_y_max | Returns False iif bounding boxed of self and other intersect | Below is the the instruction that describes the task:
### Input:
Returns False iif bounding boxed of self and other intersect
### Response:
def is_bbox_not_intersecting(self, other):
"""Returns False iif bounding boxed of self and other intersect"""
self_x_min, self_x_max, self_y_min, self_y_max = self.get_bbox()
other_x_min, other_x_max, other_y_min, other_y_max = other.get_bbox()
return \
self_x_min > other_x_max or \
other_x_min > self_x_max or \
self_y_min > other_y_max or \
other_y_min > self_y_max |
def parse_request(cls, request_string):
"""JSONRPC allows for **batch** requests to be communicated
as array of dicts. This method parses out each individual
element in the batch and returns a list of tuples, each
tuple a result of parsing of each item in the batch.
:Returns: | tuple of (results, is_batch_mode_flag)
| where:
| - results is a tuple describing the request
| - Is_batch_mode_flag is a Bool indicating if the
| request came in in batch mode (as array of requests) or not.
:Raises: RPCParseError, RPCInvalidRequest
"""
try:
batch = cls.json_loads(request_string)
except ValueError as err:
raise errors.RPCParseError("No valid JSON. (%s)" % str(err))
if isinstance(batch, (list, tuple)) and batch:
# batch is true batch.
# list of parsed request objects, is_batch_mode_flag
return [cls._parse_single_request_trap_errors(request) for request in batch], True
elif isinstance(batch, dict):
# `batch` is actually single request object
return [cls._parse_single_request_trap_errors(batch)], False
raise errors.RPCInvalidRequest("Neither a batch array nor a single request object found in the request.") | JSONRPC allows for **batch** requests to be communicated
as array of dicts. This method parses out each individual
element in the batch and returns a list of tuples, each
tuple a result of parsing of each item in the batch.
:Returns: | tuple of (results, is_batch_mode_flag)
| where:
| - results is a tuple describing the request
| - Is_batch_mode_flag is a Bool indicating if the
| request came in in batch mode (as array of requests) or not.
:Raises: RPCParseError, RPCInvalidRequest | Below is the the instruction that describes the task:
### Input:
JSONRPC allows for **batch** requests to be communicated
as array of dicts. This method parses out each individual
element in the batch and returns a list of tuples, each
tuple a result of parsing of each item in the batch.
:Returns: | tuple of (results, is_batch_mode_flag)
| where:
| - results is a tuple describing the request
| - Is_batch_mode_flag is a Bool indicating if the
| request came in in batch mode (as array of requests) or not.
:Raises: RPCParseError, RPCInvalidRequest
### Response:
def parse_request(cls, request_string):
"""JSONRPC allows for **batch** requests to be communicated
as array of dicts. This method parses out each individual
element in the batch and returns a list of tuples, each
tuple a result of parsing of each item in the batch.
:Returns: | tuple of (results, is_batch_mode_flag)
| where:
| - results is a tuple describing the request
| - Is_batch_mode_flag is a Bool indicating if the
| request came in in batch mode (as array of requests) or not.
:Raises: RPCParseError, RPCInvalidRequest
"""
try:
batch = cls.json_loads(request_string)
except ValueError as err:
raise errors.RPCParseError("No valid JSON. (%s)" % str(err))
if isinstance(batch, (list, tuple)) and batch:
# batch is true batch.
# list of parsed request objects, is_batch_mode_flag
return [cls._parse_single_request_trap_errors(request) for request in batch], True
elif isinstance(batch, dict):
# `batch` is actually single request object
return [cls._parse_single_request_trap_errors(batch)], False
raise errors.RPCInvalidRequest("Neither a batch array nor a single request object found in the request.") |
def make_simple():
"""
Create a L{SimpleAuthenticator} instance using values read from coilmq configuration.
@return: The configured L{SimpleAuthenticator}
@rtype: L{SimpleAuthenticator}
@raise ConfigError: If there is a configuration error.
"""
authfile = config.get('coilmq', 'auth.simple.file')
if not authfile:
raise ConfigError('Missing configuration parameter: auth.simple.file')
sa = SimpleAuthenticator()
sa.from_configfile(authfile)
return sa | Create a L{SimpleAuthenticator} instance using values read from coilmq configuration.
@return: The configured L{SimpleAuthenticator}
@rtype: L{SimpleAuthenticator}
@raise ConfigError: If there is a configuration error. | Below is the the instruction that describes the task:
### Input:
Create a L{SimpleAuthenticator} instance using values read from coilmq configuration.
@return: The configured L{SimpleAuthenticator}
@rtype: L{SimpleAuthenticator}
@raise ConfigError: If there is a configuration error.
### Response:
def make_simple():
"""
Create a L{SimpleAuthenticator} instance using values read from coilmq configuration.
@return: The configured L{SimpleAuthenticator}
@rtype: L{SimpleAuthenticator}
@raise ConfigError: If there is a configuration error.
"""
authfile = config.get('coilmq', 'auth.simple.file')
if not authfile:
raise ConfigError('Missing configuration parameter: auth.simple.file')
sa = SimpleAuthenticator()
sa.from_configfile(authfile)
return sa |
def render(self, request, template, context):
"""
Returns a response. By default, this will contain the rendered PDF, but
if both ``allow_force_html`` is ``True`` and the querystring
``html=true`` was set it will return a plain HTML.
"""
if self.allow_force_html and self.request.GET.get('html', False):
html = get_template(template).render(context)
return HttpResponse(html)
else:
response = HttpResponse(content_type='application/pdf')
if self.prompt_download:
response['Content-Disposition'] = 'attachment; filename="{}"' \
.format(self.get_download_name())
helpers.render_pdf(
template=template,
file_=response,
url_fetcher=self.url_fetcher,
context=context,
)
return response | Returns a response. By default, this will contain the rendered PDF, but
if both ``allow_force_html`` is ``True`` and the querystring
``html=true`` was set it will return a plain HTML. | Below is the the instruction that describes the task:
### Input:
Returns a response. By default, this will contain the rendered PDF, but
if both ``allow_force_html`` is ``True`` and the querystring
``html=true`` was set it will return a plain HTML.
### Response:
def render(self, request, template, context):
"""
Returns a response. By default, this will contain the rendered PDF, but
if both ``allow_force_html`` is ``True`` and the querystring
``html=true`` was set it will return a plain HTML.
"""
if self.allow_force_html and self.request.GET.get('html', False):
html = get_template(template).render(context)
return HttpResponse(html)
else:
response = HttpResponse(content_type='application/pdf')
if self.prompt_download:
response['Content-Disposition'] = 'attachment; filename="{}"' \
.format(self.get_download_name())
helpers.render_pdf(
template=template,
file_=response,
url_fetcher=self.url_fetcher,
context=context,
)
return response |
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname) | Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported) | Below is the the instruction that describes the task:
### Input:
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
### Response:
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname) |
def to_json(self):
"""Convert the Design Day to a dictionary."""
return {
'location': self.location.to_json(),
'design_days': [des_d.to_json() for des_d in self.design_days]
} | Convert the Design Day to a dictionary. | Below is the the instruction that describes the task:
### Input:
Convert the Design Day to a dictionary.
### Response:
def to_json(self):
"""Convert the Design Day to a dictionary."""
return {
'location': self.location.to_json(),
'design_days': [des_d.to_json() for des_d in self.design_days]
} |
def board_links_to_ids(self):
""" Convert board links to ids """
resp = self.stats.session.open(
"{0}/members/{1}/boards?{2}".format(
self.stats.url, self.username, urllib.urlencode({
"key": self.key,
"token": self.token,
"fields": "shortLink"})))
boards = json.loads(resp.read())
return [board['id'] for board in boards if self.board_links == [""]
or board['shortLink'] in self.board_links] | Convert board links to ids | Below is the the instruction that describes the task:
### Input:
Convert board links to ids
### Response:
def board_links_to_ids(self):
""" Convert board links to ids """
resp = self.stats.session.open(
"{0}/members/{1}/boards?{2}".format(
self.stats.url, self.username, urllib.urlencode({
"key": self.key,
"token": self.token,
"fields": "shortLink"})))
boards = json.loads(resp.read())
return [board['id'] for board in boards if self.board_links == [""]
or board['shortLink'] in self.board_links] |
def police_priority_map_exceed_map_pri1_exceed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri1_exceed = ET.SubElement(exceed, "map-pri1-exceed")
map_pri1_exceed.text = kwargs.pop('map_pri1_exceed')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def police_priority_map_exceed_map_pri1_exceed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri1_exceed = ET.SubElement(exceed, "map-pri1-exceed")
map_pri1_exceed.text = kwargs.pop('map_pri1_exceed')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def add_mag_drift_unit_vectors(inst, max_steps=40000, step_size=10.):
"""Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis.
"""
# vectors are returned in geo/ecef coordinate system
add_mag_drift_unit_vectors_ecef(inst, max_steps=max_steps, step_size=step_size)
# convert them to S/C using transformation supplied by OA
inst['unit_zon_x'], inst['unit_zon_y'], inst['unit_zon_z'] = project_ecef_vector_onto_basis(inst['unit_zon_ecef_x'], inst['unit_zon_ecef_y'], inst['unit_zon_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_fa_x'], inst['unit_fa_y'], inst['unit_fa_z'] = project_ecef_vector_onto_basis(inst['unit_fa_ecef_x'], inst['unit_fa_ecef_y'], inst['unit_fa_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_mer_x'], inst['unit_mer_y'], inst['unit_mer_z'] = project_ecef_vector_onto_basis(inst['unit_mer_ecef_x'], inst['unit_mer_ecef_y'], inst['unit_mer_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst.meta['unit_zon_x'] = { 'long_name':'Zonal direction along IVM-x',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-X component',
'axis': 'Zonal Unit Vector: IVM-X component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_y'] = {'long_name':'Zonal direction along IVM-y',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Y component',
'axis': 'Zonal Unit Vector: IVM-Y component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_z'] = {'long_name':'Zonal direction along IVM-z',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Z component',
'axis': 'Zonal Unit Vector: IVM-Z component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_x'] = {'long_name':'Field-aligned direction along IVM-x',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-X component',
'axis': 'Field Aligned Unit Vector: IVM-X component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_y'] = {'long_name':'Field-aligned direction along IVM-y',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Y component',
'axis': 'Field Aligned Unit Vector: IVM-Y component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_z'] = {'long_name':'Field-aligned direction along IVM-z',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Z component',
'axis': 'Field Aligned Unit Vector: IVM-Z component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_x'] = {'long_name':'Meridional direction along IVM-x',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-X component',
'axis': 'Meridional Unit Vector: IVM-X component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_y'] = {'long_name':'Meridional direction along IVM-y',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Y component',
'axis': 'Meridional Unit Vector: IVM-Y component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_z'] = {'long_name':'Meridional direction along IVM-z',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Z component',
'axis': 'Meridional Unit Vector: IVM-Z component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
return | Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis. | Below is the the instruction that describes the task:
### Input:
Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis.
### Response:
def add_mag_drift_unit_vectors(inst, max_steps=40000, step_size=10.):
"""Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis.
"""
# vectors are returned in geo/ecef coordinate system
add_mag_drift_unit_vectors_ecef(inst, max_steps=max_steps, step_size=step_size)
# convert them to S/C using transformation supplied by OA
inst['unit_zon_x'], inst['unit_zon_y'], inst['unit_zon_z'] = project_ecef_vector_onto_basis(inst['unit_zon_ecef_x'], inst['unit_zon_ecef_y'], inst['unit_zon_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_fa_x'], inst['unit_fa_y'], inst['unit_fa_z'] = project_ecef_vector_onto_basis(inst['unit_fa_ecef_x'], inst['unit_fa_ecef_y'], inst['unit_fa_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_mer_x'], inst['unit_mer_y'], inst['unit_mer_z'] = project_ecef_vector_onto_basis(inst['unit_mer_ecef_x'], inst['unit_mer_ecef_y'], inst['unit_mer_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst.meta['unit_zon_x'] = { 'long_name':'Zonal direction along IVM-x',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-X component',
'axis': 'Zonal Unit Vector: IVM-X component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_y'] = {'long_name':'Zonal direction along IVM-y',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Y component',
'axis': 'Zonal Unit Vector: IVM-Y component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_z'] = {'long_name':'Zonal direction along IVM-z',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Z component',
'axis': 'Zonal Unit Vector: IVM-Z component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_x'] = {'long_name':'Field-aligned direction along IVM-x',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-X component',
'axis': 'Field Aligned Unit Vector: IVM-X component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_y'] = {'long_name':'Field-aligned direction along IVM-y',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Y component',
'axis': 'Field Aligned Unit Vector: IVM-Y component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_z'] = {'long_name':'Field-aligned direction along IVM-z',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Z component',
'axis': 'Field Aligned Unit Vector: IVM-Z component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_x'] = {'long_name':'Meridional direction along IVM-x',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-X component',
'axis': 'Meridional Unit Vector: IVM-X component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_y'] = {'long_name':'Meridional direction along IVM-y',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Y component',
'axis': 'Meridional Unit Vector: IVM-Y component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_z'] = {'long_name':'Meridional direction along IVM-z',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Z component',
'axis': 'Meridional Unit Vector: IVM-Z component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
return |
def get_view(self, request, view_class, opts=None):
"""
Instantiates and returns the view class that will generate the
actual context for this plugin.
"""
kwargs = {}
if opts:
if not isinstance(opts, dict):
opts = opts.__dict__
else:
opts = {}
if not view_class in VALID_MIXIN_OPTIONS:
valid_options = view_class.__dict__.keys()
for cls in view_class.__bases__:
if cls != object:
valid_options += cls.__dict__.keys()
VALID_MIXIN_OPTIONS[view_class] = valid_options
for key in VALID_MIXIN_OPTIONS[view_class]:
if key in opts:
kwargs[key] = opts[key]
elif hasattr(self, key):
kwargs[key] = getattr(self, key)
view = view_class(**kwargs)
view.request = request
view.kwargs = {}
return view | Instantiates and returns the view class that will generate the
actual context for this plugin. | Below is the the instruction that describes the task:
### Input:
Instantiates and returns the view class that will generate the
actual context for this plugin.
### Response:
def get_view(self, request, view_class, opts=None):
"""
Instantiates and returns the view class that will generate the
actual context for this plugin.
"""
kwargs = {}
if opts:
if not isinstance(opts, dict):
opts = opts.__dict__
else:
opts = {}
if not view_class in VALID_MIXIN_OPTIONS:
valid_options = view_class.__dict__.keys()
for cls in view_class.__bases__:
if cls != object:
valid_options += cls.__dict__.keys()
VALID_MIXIN_OPTIONS[view_class] = valid_options
for key in VALID_MIXIN_OPTIONS[view_class]:
if key in opts:
kwargs[key] = opts[key]
elif hasattr(self, key):
kwargs[key] = getattr(self, key)
view = view_class(**kwargs)
view.request = request
view.kwargs = {}
return view |
def setpassword(self, password):
"""Sets the password to use when extracting.
"""
self._password = password
if self._file_parser:
if self._file_parser.has_header_encryption():
self._file_parser = None
if not self._file_parser:
self._parse()
else:
self._file_parser.setpassword(self._password) | Sets the password to use when extracting. | Below is the the instruction that describes the task:
### Input:
Sets the password to use when extracting.
### Response:
def setpassword(self, password):
"""Sets the password to use when extracting.
"""
self._password = password
if self._file_parser:
if self._file_parser.has_header_encryption():
self._file_parser = None
if not self._file_parser:
self._parse()
else:
self._file_parser.setpassword(self._password) |
def _set_get_nameserver_detail(self, v, load=False):
"""
Setter method for get_nameserver_detail, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_nameserver_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_nameserver_detail() directly.
YANG Description: A function to display the detailed information of
the devices stored in the Name Server database.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_nameserver_detail.get_nameserver_detail, is_leaf=True, yang_name="get-nameserver-detail", rest_name="get-nameserver-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'display detail device information.', u'hidden': u'rpccmd', u'actionpoint': u'show_ns_detail'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_nameserver_detail must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_nameserver_detail.get_nameserver_detail, is_leaf=True, yang_name="get-nameserver-detail", rest_name="get-nameserver-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'display detail device information.', u'hidden': u'rpccmd', u'actionpoint': u'show_ns_detail'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='rpc', is_config=True)""",
})
self.__get_nameserver_detail = t
if hasattr(self, '_set'):
self._set() | Setter method for get_nameserver_detail, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_nameserver_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_nameserver_detail() directly.
YANG Description: A function to display the detailed information of
the devices stored in the Name Server database. | Below is the the instruction that describes the task:
### Input:
Setter method for get_nameserver_detail, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_nameserver_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_nameserver_detail() directly.
YANG Description: A function to display the detailed information of
the devices stored in the Name Server database.
### Response:
def _set_get_nameserver_detail(self, v, load=False):
"""
Setter method for get_nameserver_detail, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_nameserver_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_nameserver_detail() directly.
YANG Description: A function to display the detailed information of
the devices stored in the Name Server database.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_nameserver_detail.get_nameserver_detail, is_leaf=True, yang_name="get-nameserver-detail", rest_name="get-nameserver-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'display detail device information.', u'hidden': u'rpccmd', u'actionpoint': u'show_ns_detail'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_nameserver_detail must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_nameserver_detail.get_nameserver_detail, is_leaf=True, yang_name="get-nameserver-detail", rest_name="get-nameserver-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'display detail device information.', u'hidden': u'rpccmd', u'actionpoint': u'show_ns_detail'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='rpc', is_config=True)""",
})
self.__get_nameserver_detail = t
if hasattr(self, '_set'):
self._set() |
def integration(temporalcommunities, staticcommunities):
"""
Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533.
"""
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Icoeff = np.zeros(len(staticcommunities))
# calc integration for each node
for i, statcom in enumerate(len(staticcommunities)):
Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom])
return Icoeff | Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533. | Below is the the instruction that describes the task:
### Input:
Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533.
### Response:
def integration(temporalcommunities, staticcommunities):
"""
Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533.
"""
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Icoeff = np.zeros(len(staticcommunities))
# calc integration for each node
for i, statcom in enumerate(len(staticcommunities)):
Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom])
return Icoeff |
def _set_wait_for_bgp(self, v, load=False):
"""
Setter method for wait_for_bgp, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/set_overload_bit/on_startup/wait_for_bgp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_wait_for_bgp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_wait_for_bgp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=wait_for_bgp.wait_for_bgp, is_container='container', presence=True, yang_name="wait-for-bgp", rest_name="wait-for-bgp", parent=self, choice=(u'ch-on-startup', u'ca-on-startup-wfbgp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """wait_for_bgp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=wait_for_bgp.wait_for_bgp, is_container='container', presence=True, yang_name="wait-for-bgp", rest_name="wait-for-bgp", parent=self, choice=(u'ch-on-startup', u'ca-on-startup-wfbgp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__wait_for_bgp = t
if hasattr(self, '_set'):
self._set() | Setter method for wait_for_bgp, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/set_overload_bit/on_startup/wait_for_bgp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_wait_for_bgp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_wait_for_bgp() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for wait_for_bgp, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/set_overload_bit/on_startup/wait_for_bgp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_wait_for_bgp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_wait_for_bgp() directly.
### Response:
def _set_wait_for_bgp(self, v, load=False):
"""
Setter method for wait_for_bgp, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/set_overload_bit/on_startup/wait_for_bgp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_wait_for_bgp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_wait_for_bgp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=wait_for_bgp.wait_for_bgp, is_container='container', presence=True, yang_name="wait-for-bgp", rest_name="wait-for-bgp", parent=self, choice=(u'ch-on-startup', u'ca-on-startup-wfbgp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """wait_for_bgp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=wait_for_bgp.wait_for_bgp, is_container='container', presence=True, yang_name="wait-for-bgp", rest_name="wait-for-bgp", parent=self, choice=(u'ch-on-startup', u'ca-on-startup-wfbgp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__wait_for_bgp = t
if hasattr(self, '_set'):
self._set() |
def verify_false(self, expr, msg=None):
"""
Soft assert for whether the condition is false
:params expr: the statement to evaluate
:params msg: (Optional) msg explaining the difference
"""
try:
self.assert_false(expr, msg)
except AssertionError, e:
if msg:
m = "%s:\n%s" % (msg, str(e))
else:
m = str(e)
self.verification_erorrs.append(m) | Soft assert for whether the condition is false
:params expr: the statement to evaluate
:params msg: (Optional) msg explaining the difference | Below is the the instruction that describes the task:
### Input:
Soft assert for whether the condition is false
:params expr: the statement to evaluate
:params msg: (Optional) msg explaining the difference
### Response:
def verify_false(self, expr, msg=None):
"""
Soft assert for whether the condition is false
:params expr: the statement to evaluate
:params msg: (Optional) msg explaining the difference
"""
try:
self.assert_false(expr, msg)
except AssertionError, e:
if msg:
m = "%s:\n%s" % (msg, str(e))
else:
m = str(e)
self.verification_erorrs.append(m) |
def serialise(self):
"""Creates standard market book json response,
will error if EX_MARKET_DEF not incl.
"""
return {
'marketId': self.market_id,
'totalAvailable': None,
'isMarketDataDelayed': None,
'lastMatchTime': None,
'betDelay': self.market_definition.get('betDelay'),
'version': self.market_definition.get('version'),
'complete': self.market_definition.get('complete'),
'runnersVoidable': self.market_definition.get('runnersVoidable'),
'totalMatched': self.total_matched,
'status': self.market_definition.get('status'),
'bspReconciled': self.market_definition.get('bspReconciled'),
'crossMatching': self.market_definition.get('crossMatching'),
'inplay': self.market_definition.get('inPlay'),
'numberOfWinners': self.market_definition.get('numberOfWinners'),
'numberOfRunners': len(self.market_definition.get('runners')),
'numberOfActiveRunners': self.market_definition.get('numberOfActiveRunners'),
'runners': [
runner.serialise(
self.market_definition_runner_dict[(runner.selection_id, runner.handicap)]
) for runner in self.runners
],
'publishTime': self.publish_time,
'priceLadderDefinition': self.market_definition.get('priceLadderDefinition'),
'keyLineDescription': self.market_definition.get('keyLineDefinition'),
'marketDefinition': self.market_definition, # used in lightweight
} | Creates standard market book json response,
will error if EX_MARKET_DEF not incl. | Below is the the instruction that describes the task:
### Input:
Creates standard market book json response,
will error if EX_MARKET_DEF not incl.
### Response:
def serialise(self):
"""Creates standard market book json response,
will error if EX_MARKET_DEF not incl.
"""
return {
'marketId': self.market_id,
'totalAvailable': None,
'isMarketDataDelayed': None,
'lastMatchTime': None,
'betDelay': self.market_definition.get('betDelay'),
'version': self.market_definition.get('version'),
'complete': self.market_definition.get('complete'),
'runnersVoidable': self.market_definition.get('runnersVoidable'),
'totalMatched': self.total_matched,
'status': self.market_definition.get('status'),
'bspReconciled': self.market_definition.get('bspReconciled'),
'crossMatching': self.market_definition.get('crossMatching'),
'inplay': self.market_definition.get('inPlay'),
'numberOfWinners': self.market_definition.get('numberOfWinners'),
'numberOfRunners': len(self.market_definition.get('runners')),
'numberOfActiveRunners': self.market_definition.get('numberOfActiveRunners'),
'runners': [
runner.serialise(
self.market_definition_runner_dict[(runner.selection_id, runner.handicap)]
) for runner in self.runners
],
'publishTime': self.publish_time,
'priceLadderDefinition': self.market_definition.get('priceLadderDefinition'),
'keyLineDescription': self.market_definition.get('keyLineDefinition'),
'marketDefinition': self.market_definition, # used in lightweight
} |
def k8s_ports_to_metadata_ports(k8s_ports):
"""
:param k8s_ports: list of V1ServicePort
:return: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp']
"""
ports = []
for k8s_port in k8s_ports:
if k8s_port.protocol is not None:
ports.append("%s/%s" % (k8s_port.port, k8s_port.protocol.lower()))
else:
ports.append(str(k8s_port.port))
return ports | :param k8s_ports: list of V1ServicePort
:return: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp'] | Below is the the instruction that describes the task:
### Input:
:param k8s_ports: list of V1ServicePort
:return: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp']
### Response:
def k8s_ports_to_metadata_ports(k8s_ports):
"""
:param k8s_ports: list of V1ServicePort
:return: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp']
"""
ports = []
for k8s_port in k8s_ports:
if k8s_port.protocol is not None:
ports.append("%s/%s" % (k8s_port.port, k8s_port.protocol.lower()))
else:
ports.append(str(k8s_port.port))
return ports |
def compare(self,
query_classes: Set,
reference_classes: Set,
method: Optional) -> SimResult:
"""
Given two lists of entites (classes, individual)
return their similarity
"""
raise NotImplementedError | Given two lists of entites (classes, individual)
return their similarity | Below is the the instruction that describes the task:
### Input:
Given two lists of entites (classes, individual)
return their similarity
### Response:
def compare(self,
query_classes: Set,
reference_classes: Set,
method: Optional) -> SimResult:
"""
Given two lists of entites (classes, individual)
return their similarity
"""
raise NotImplementedError |
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns) | Indexes on the left, other fields in alphabetical order on the right. | Below is the the instruction that describes the task:
### Input:
Indexes on the left, other fields in alphabetical order on the right.
### Response:
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns) |
def execstr_funckw(func):
"""
for doctests kwargs
SeeAlso:
ut.exec_func_src
ut.argparse_funckw
"""
import utool as ut
funckw = ut.get_func_kwargs(func)
return ut.execstr_dict(funckw, explicit=True) | for doctests kwargs
SeeAlso:
ut.exec_func_src
ut.argparse_funckw | Below is the the instruction that describes the task:
### Input:
for doctests kwargs
SeeAlso:
ut.exec_func_src
ut.argparse_funckw
### Response:
def execstr_funckw(func):
"""
for doctests kwargs
SeeAlso:
ut.exec_func_src
ut.argparse_funckw
"""
import utool as ut
funckw = ut.get_func_kwargs(func)
return ut.execstr_dict(funckw, explicit=True) |
def parse_dbus_header(header):
"""Parse a D-BUS header. Return the message size."""
if six.indexbytes(header, 0) == ord('l'):
endian = '<'
elif six.indexbytes(header, 0) == ord('B'):
endian = '>'
else:
raise ValueError('illegal endianness')
if not 1 <= six.indexbytes(header, 1) <= 4:
raise ValueError('illegel message type')
if struct.unpack(endian + 'I', header[8:12])[0] == 0:
raise ValueError('illegal serial number')
harrlen = struct.unpack(endian + 'I', header[12:16])[0]
padlen = (8 - harrlen) % 8
bodylen = struct.unpack(endian + 'I', header[4:8])[0]
return 16 + harrlen + padlen + bodylen | Parse a D-BUS header. Return the message size. | Below is the the instruction that describes the task:
### Input:
Parse a D-BUS header. Return the message size.
### Response:
def parse_dbus_header(header):
"""Parse a D-BUS header. Return the message size."""
if six.indexbytes(header, 0) == ord('l'):
endian = '<'
elif six.indexbytes(header, 0) == ord('B'):
endian = '>'
else:
raise ValueError('illegal endianness')
if not 1 <= six.indexbytes(header, 1) <= 4:
raise ValueError('illegel message type')
if struct.unpack(endian + 'I', header[8:12])[0] == 0:
raise ValueError('illegal serial number')
harrlen = struct.unpack(endian + 'I', header[12:16])[0]
padlen = (8 - harrlen) % 8
bodylen = struct.unpack(endian + 'I', header[4:8])[0]
return 16 + harrlen + padlen + bodylen |
def from_raw(self, rule_ids, outputs, raw_rules):
"""
A helper function that converts the results returned from C function
:param rule_ids:
:param outputs:
:param raw_rules:
:return:
"""
self._rule_pool = [([], [])] + raw_rules
self._rule_list = []
for i, idx in enumerate(rule_ids):
rule = Rule([Clause(f, c) for f, c in zip(*self._rule_pool[idx])], outputs[i])
self._rule_list.append(rule)
# self._rule_list.append(rule_str2rule(_rule_name, outputs[i]))
self._rule_ids = rule_ids
self._rule_outputs = outputs | A helper function that converts the results returned from C function
:param rule_ids:
:param outputs:
:param raw_rules:
:return: | Below is the the instruction that describes the task:
### Input:
A helper function that converts the results returned from C function
:param rule_ids:
:param outputs:
:param raw_rules:
:return:
### Response:
def from_raw(self, rule_ids, outputs, raw_rules):
"""
A helper function that converts the results returned from C function
:param rule_ids:
:param outputs:
:param raw_rules:
:return:
"""
self._rule_pool = [([], [])] + raw_rules
self._rule_list = []
for i, idx in enumerate(rule_ids):
rule = Rule([Clause(f, c) for f, c in zip(*self._rule_pool[idx])], outputs[i])
self._rule_list.append(rule)
# self._rule_list.append(rule_str2rule(_rule_name, outputs[i]))
self._rule_ids = rule_ids
self._rule_outputs = outputs |
def is_ioinfo(obj, keys=None):
"""
:return: True if given 'obj' is a 'IOInfo' namedtuple object.
>>> assert not is_ioinfo(1)
>>> assert not is_ioinfo("aaa")
>>> assert not is_ioinfo({})
>>> assert not is_ioinfo(('a', 1, {}))
>>> inp = anyconfig.globals.IOInfo("/etc/hosts", "path", "/etc/hosts",
... None, open)
>>> assert is_ioinfo(inp)
"""
if keys is None:
keys = anyconfig.globals.IOI_KEYS
if isinstance(obj, tuple) and getattr(obj, "_asdict", False):
return all(k in obj._asdict() for k in keys)
return False | :return: True if given 'obj' is a 'IOInfo' namedtuple object.
>>> assert not is_ioinfo(1)
>>> assert not is_ioinfo("aaa")
>>> assert not is_ioinfo({})
>>> assert not is_ioinfo(('a', 1, {}))
>>> inp = anyconfig.globals.IOInfo("/etc/hosts", "path", "/etc/hosts",
... None, open)
>>> assert is_ioinfo(inp) | Below is the the instruction that describes the task:
### Input:
:return: True if given 'obj' is a 'IOInfo' namedtuple object.
>>> assert not is_ioinfo(1)
>>> assert not is_ioinfo("aaa")
>>> assert not is_ioinfo({})
>>> assert not is_ioinfo(('a', 1, {}))
>>> inp = anyconfig.globals.IOInfo("/etc/hosts", "path", "/etc/hosts",
... None, open)
>>> assert is_ioinfo(inp)
### Response:
def is_ioinfo(obj, keys=None):
"""
:return: True if given 'obj' is a 'IOInfo' namedtuple object.
>>> assert not is_ioinfo(1)
>>> assert not is_ioinfo("aaa")
>>> assert not is_ioinfo({})
>>> assert not is_ioinfo(('a', 1, {}))
>>> inp = anyconfig.globals.IOInfo("/etc/hosts", "path", "/etc/hosts",
... None, open)
>>> assert is_ioinfo(inp)
"""
if keys is None:
keys = anyconfig.globals.IOI_KEYS
if isinstance(obj, tuple) and getattr(obj, "_asdict", False):
return all(k in obj._asdict() for k in keys)
return False |
def download(user, dl_type, name):
"""
Download user items of dl_type (ie. all, playlists, liked, commented, etc.)
"""
username = user['username']
user_id = user['id']
logger.info(
'Retrieving all {0} of user {1}...'.format(name, username)
)
dl_url = url[dl_type].format(user_id)
logger.debug(dl_url)
resources = client.get_collection(dl_url, token)
del resources[:offset - 1]
logger.debug(resources)
total = len(resources)
logger.info('Retrieved {0} {1}'.format(total, name))
for counter, item in enumerate(resources, offset):
try:
logger.debug(item)
logger.info('{0} n°{1} of {2}'.format(
name.capitalize(), counter, total)
)
if dl_type == 'all':
item_name = item['type'].split('-')[0] # remove the '-repost'
uri = item[item_name]['uri']
parse_url(uri)
elif dl_type == 'playlists':
download_playlist(item)
elif dl_type == 'playlists-liked':
parse_url(item['playlist']['uri'])
elif dl_type == 'commented':
item = get_track_info(item['track_id'])
download_track(item)
else:
download_track(item)
except Exception as e:
logger.exception(e)
logger.info('Downloaded all {0} {1} of user {2}!'.format(
total, name, username)
) | Download user items of dl_type (ie. all, playlists, liked, commented, etc.) | Below is the the instruction that describes the task:
### Input:
Download user items of dl_type (ie. all, playlists, liked, commented, etc.)
### Response:
def download(user, dl_type, name):
"""
Download user items of dl_type (ie. all, playlists, liked, commented, etc.)
"""
username = user['username']
user_id = user['id']
logger.info(
'Retrieving all {0} of user {1}...'.format(name, username)
)
dl_url = url[dl_type].format(user_id)
logger.debug(dl_url)
resources = client.get_collection(dl_url, token)
del resources[:offset - 1]
logger.debug(resources)
total = len(resources)
logger.info('Retrieved {0} {1}'.format(total, name))
for counter, item in enumerate(resources, offset):
try:
logger.debug(item)
logger.info('{0} n°{1} of {2}'.format(
name.capitalize(), counter, total)
)
if dl_type == 'all':
item_name = item['type'].split('-')[0] # remove the '-repost'
uri = item[item_name]['uri']
parse_url(uri)
elif dl_type == 'playlists':
download_playlist(item)
elif dl_type == 'playlists-liked':
parse_url(item['playlist']['uri'])
elif dl_type == 'commented':
item = get_track_info(item['track_id'])
download_track(item)
else:
download_track(item)
except Exception as e:
logger.exception(e)
logger.info('Downloaded all {0} {1} of user {2}!'.format(
total, name, username)
) |
def epub_zip(outdirect):
"""
Zips up the input file directory into an EPUB file.
"""
def recursive_zip(zipf, directory, folder=None):
if folder is None:
folder = ''
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
zipf.write(os.path.join(directory, item),
os.path.join(directory, item))
elif os.path.isdir(os.path.join(directory, item)):
recursive_zip(zipf, os.path.join(directory, item),
os.path.join(folder, item))
log.info('Zipping up the directory {0}'.format(outdirect))
epub_filename = outdirect + '.epub'
epub = zipfile.ZipFile(epub_filename, 'w')
current_dir = os.getcwd()
os.chdir(outdirect)
epub.write('mimetype')
log.info('Recursively zipping META-INF and EPUB')
for item in os.listdir('.'):
if item == 'mimetype':
continue
recursive_zip(epub, item)
os.chdir(current_dir)
epub.close() | Zips up the input file directory into an EPUB file. | Below is the the instruction that describes the task:
### Input:
Zips up the input file directory into an EPUB file.
### Response:
def epub_zip(outdirect):
"""
Zips up the input file directory into an EPUB file.
"""
def recursive_zip(zipf, directory, folder=None):
if folder is None:
folder = ''
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
zipf.write(os.path.join(directory, item),
os.path.join(directory, item))
elif os.path.isdir(os.path.join(directory, item)):
recursive_zip(zipf, os.path.join(directory, item),
os.path.join(folder, item))
log.info('Zipping up the directory {0}'.format(outdirect))
epub_filename = outdirect + '.epub'
epub = zipfile.ZipFile(epub_filename, 'w')
current_dir = os.getcwd()
os.chdir(outdirect)
epub.write('mimetype')
log.info('Recursively zipping META-INF and EPUB')
for item in os.listdir('.'):
if item == 'mimetype':
continue
recursive_zip(epub, item)
os.chdir(current_dir)
epub.close() |
async def start(self, *args, **kwargs):
"""|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
"""
bot = kwargs.pop('bot', True)
reconnect = kwargs.pop('reconnect', True)
await self.login(*args, bot=bot)
await self.connect(reconnect=reconnect) | |coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`. | Below is the the instruction that describes the task:
### Input:
|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
### Response:
async def start(self, *args, **kwargs):
"""|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
"""
bot = kwargs.pop('bot', True)
reconnect = kwargs.pop('reconnect', True)
await self.login(*args, bot=bot)
await self.connect(reconnect=reconnect) |
def _apply_Create(self, change):
'''A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void
'''
ar = _AzureRecord(self._resource_group, change.new)
create = self._dns_client.record_sets.create_or_update
create(resource_group_name=ar.resource_group,
zone_name=ar.zone_name,
relative_record_set_name=ar.relative_record_set_name,
record_type=ar.record_type,
parameters=ar.params)
self.log.debug('* Success Create/Update: {}'.format(ar)) | A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void | Below is the the instruction that describes the task:
### Input:
A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void
### Response:
def _apply_Create(self, change):
'''A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void
'''
ar = _AzureRecord(self._resource_group, change.new)
create = self._dns_client.record_sets.create_or_update
create(resource_group_name=ar.resource_group,
zone_name=ar.zone_name,
relative_record_set_name=ar.relative_record_set_name,
record_type=ar.record_type,
parameters=ar.params)
self.log.debug('* Success Create/Update: {}'.format(ar)) |
def modifyBits(inputVal, maxChanges):
""" Modifies up to maxChanges number of bits in the inputVal
"""
changes = np.random.random_integers(0, maxChanges, 1)[0]
if changes == 0:
return inputVal
inputWidth = len(inputVal)
whatToChange = np.random.random_integers(0, 41, changes)
runningIndex = -1
numModsDone = 0
for i in xrange(inputWidth):
if numModsDone >= changes:
break
if inputVal[i] == 1:
runningIndex += 1
if runningIndex in whatToChange:
if i != 0 and inputVal[i-1] == 0:
inputVal[i-1] = 1
inputVal[i] = 0
return inputVal | Modifies up to maxChanges number of bits in the inputVal | Below is the the instruction that describes the task:
### Input:
Modifies up to maxChanges number of bits in the inputVal
### Response:
def modifyBits(inputVal, maxChanges):
""" Modifies up to maxChanges number of bits in the inputVal
"""
changes = np.random.random_integers(0, maxChanges, 1)[0]
if changes == 0:
return inputVal
inputWidth = len(inputVal)
whatToChange = np.random.random_integers(0, 41, changes)
runningIndex = -1
numModsDone = 0
for i in xrange(inputWidth):
if numModsDone >= changes:
break
if inputVal[i] == 1:
runningIndex += 1
if runningIndex in whatToChange:
if i != 0 and inputVal[i-1] == 0:
inputVal[i-1] = 1
inputVal[i] = 0
return inputVal |
def include(d, e):
"""Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern"""
return (d, [f for f in glob.glob('%s/%s' % (d, e)) if os.path.isfile(f)]) | Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern | Below is the the instruction that describes the task:
### Input:
Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern
### Response:
def include(d, e):
"""Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern"""
return (d, [f for f in glob.glob('%s/%s' % (d, e)) if os.path.isfile(f)]) |
def mark_for_update(self):
'''
Note that a change has been made so all Statuses need update
'''
self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE)
push_key.delay(self) | Note that a change has been made so all Statuses need update | Below is the the instruction that describes the task:
### Input:
Note that a change has been made so all Statuses need update
### Response:
def mark_for_update(self):
'''
Note that a change has been made so all Statuses need update
'''
self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE)
push_key.delay(self) |
def result(self) -> workflow.IntervalGeneratorType:
"""
Generate intervals indicating the valid sentences.
"""
config = cast(SentenceSegementationConfig, self.config)
index = -1
labels = None
while True:
# 1. Find the start of the sentence.
start = -1
while True:
# Check the ``labels`` generated from step (2).
if labels is None:
# https://www.python.org/dev/peps/pep-0479/
try:
index, labels = next(self.index_labels_generator)
except StopIteration:
return
# Check if we found a valid sentence char.
if labels[SentenceValidCharacterLabeler]:
start = index
break
# Trigger next(...) action.
labels = None
index = -1
# 2. Find the ending.
end = -1
try:
while True:
index, labels = next(self.index_labels_generator)
# Detected invalid char.
if config.enable_strict_sentence_charset and \
not labels[SentenceValidCharacterLabeler] and \
not labels[WhitespaceLabeler]:
end = index
break
# Detected sentence ending.
if self._labels_indicate_sentence_ending(labels):
# Consume the ending span.
while True:
index, labels = next(self.index_labels_generator)
is_ending = (self._labels_indicate_sentence_ending(labels) or
(config.extend_ending_with_delimiters and
labels[DelimitersLabeler]))
if not is_ending:
end = index
break
# yeah we found the ending.
break
except StopIteration:
end = len(self.input_sequence)
# Trigger next(...) action.
labels = None
index = -1
yield start, end | Generate intervals indicating the valid sentences. | Below is the the instruction that describes the task:
### Input:
Generate intervals indicating the valid sentences.
### Response:
def result(self) -> workflow.IntervalGeneratorType:
"""
Generate intervals indicating the valid sentences.
"""
config = cast(SentenceSegementationConfig, self.config)
index = -1
labels = None
while True:
# 1. Find the start of the sentence.
start = -1
while True:
# Check the ``labels`` generated from step (2).
if labels is None:
# https://www.python.org/dev/peps/pep-0479/
try:
index, labels = next(self.index_labels_generator)
except StopIteration:
return
# Check if we found a valid sentence char.
if labels[SentenceValidCharacterLabeler]:
start = index
break
# Trigger next(...) action.
labels = None
index = -1
# 2. Find the ending.
end = -1
try:
while True:
index, labels = next(self.index_labels_generator)
# Detected invalid char.
if config.enable_strict_sentence_charset and \
not labels[SentenceValidCharacterLabeler] and \
not labels[WhitespaceLabeler]:
end = index
break
# Detected sentence ending.
if self._labels_indicate_sentence_ending(labels):
# Consume the ending span.
while True:
index, labels = next(self.index_labels_generator)
is_ending = (self._labels_indicate_sentence_ending(labels) or
(config.extend_ending_with_delimiters and
labels[DelimitersLabeler]))
if not is_ending:
end = index
break
# yeah we found the ending.
break
except StopIteration:
end = len(self.input_sequence)
# Trigger next(...) action.
labels = None
index = -1
yield start, end |
def find_models(self, constructor, constraints=None, *, columns=None, order_by=None,
limiting=None, table_name=None):
"""Specialization of DataAccess.find_all that returns models instead of cursor objects."""
return self._find_models(
constructor, table_name or constructor.table_name, constraints, columns=columns,
order_by=order_by, limiting=limiting) | Specialization of DataAccess.find_all that returns models instead of cursor objects. | Below is the the instruction that describes the task:
### Input:
Specialization of DataAccess.find_all that returns models instead of cursor objects.
### Response:
def find_models(self, constructor, constraints=None, *, columns=None, order_by=None,
limiting=None, table_name=None):
"""Specialization of DataAccess.find_all that returns models instead of cursor objects."""
return self._find_models(
constructor, table_name or constructor.table_name, constraints, columns=columns,
order_by=order_by, limiting=limiting) |
def format_currency(value, decimals=2):
"""
Return a number suitably formatted for display as currency, with
thousands separated by commas and up to two decimal points.
>>> format_currency(1000)
'1,000'
>>> format_currency(100)
'100'
>>> format_currency(999.95)
'999.95'
>>> format_currency(99.95)
'99.95'
>>> format_currency(100000)
'100,000'
>>> format_currency(1000.00)
'1,000'
>>> format_currency(1000.41)
'1,000.41'
>>> format_currency(23.21, decimals=3)
'23.210'
>>> format_currency(1000, decimals=3)
'1,000'
>>> format_currency(123456789.123456789)
'123,456,789.12'
"""
number, decimal = ((u'%%.%df' % decimals) % value).split(u'.')
parts = []
while len(number) > 3:
part, number = number[-3:], number[:-3]
parts.append(part)
parts.append(number)
parts.reverse()
if int(decimal) == 0:
return u','.join(parts)
else:
return u','.join(parts) + u'.' + decimal | Return a number suitably formatted for display as currency, with
thousands separated by commas and up to two decimal points.
>>> format_currency(1000)
'1,000'
>>> format_currency(100)
'100'
>>> format_currency(999.95)
'999.95'
>>> format_currency(99.95)
'99.95'
>>> format_currency(100000)
'100,000'
>>> format_currency(1000.00)
'1,000'
>>> format_currency(1000.41)
'1,000.41'
>>> format_currency(23.21, decimals=3)
'23.210'
>>> format_currency(1000, decimals=3)
'1,000'
>>> format_currency(123456789.123456789)
'123,456,789.12' | Below is the the instruction that describes the task:
### Input:
Return a number suitably formatted for display as currency, with
thousands separated by commas and up to two decimal points.
>>> format_currency(1000)
'1,000'
>>> format_currency(100)
'100'
>>> format_currency(999.95)
'999.95'
>>> format_currency(99.95)
'99.95'
>>> format_currency(100000)
'100,000'
>>> format_currency(1000.00)
'1,000'
>>> format_currency(1000.41)
'1,000.41'
>>> format_currency(23.21, decimals=3)
'23.210'
>>> format_currency(1000, decimals=3)
'1,000'
>>> format_currency(123456789.123456789)
'123,456,789.12'
### Response:
def format_currency(value, decimals=2):
"""
Return a number suitably formatted for display as currency, with
thousands separated by commas and up to two decimal points.
>>> format_currency(1000)
'1,000'
>>> format_currency(100)
'100'
>>> format_currency(999.95)
'999.95'
>>> format_currency(99.95)
'99.95'
>>> format_currency(100000)
'100,000'
>>> format_currency(1000.00)
'1,000'
>>> format_currency(1000.41)
'1,000.41'
>>> format_currency(23.21, decimals=3)
'23.210'
>>> format_currency(1000, decimals=3)
'1,000'
>>> format_currency(123456789.123456789)
'123,456,789.12'
"""
number, decimal = ((u'%%.%df' % decimals) % value).split(u'.')
parts = []
while len(number) > 3:
part, number = number[-3:], number[:-3]
parts.append(part)
parts.append(number)
parts.reverse()
if int(decimal) == 0:
return u','.join(parts)
else:
return u','.join(parts) + u'.' + decimal |
def get_distributions(catalog, filter_in=None, filter_out=None,
meta_field=None, exclude_meta_fields=None,
only_time_series=False):
"""Devuelve lista de distribuciones del catálogo o de uno de sus metadatos.
Args:
catalog (dict, str or DataJson): Representación externa/interna de un
catálogo. Una representación _externa_ es un path local o una
URL remota a un archivo con la metadata de un catálogo, en
formato JSON o XLSX. La representación _interna_ de un catálogo
es un diccionario. Ejemplos: http://datos.gob.ar/data.json,
http://www.ign.gob.ar/descargas/geodatos/catalog.xlsx,
"/energia/catalog.xlsx".
filter_in (dict): Devuelve los distribuciones cuyos atributos
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que pertenezcan a un dataset
de ese publisher_name.
filter_out (dict): Devuelve los distribuciones cuyos atributos no
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que no pertenezcan a un
dataset de ese publisher_name.
meta_field (str): Nombre de un metadato de Distribution. En lugar de
devolver los objetos completos Distribution, devuelve una lista de
valores para ese metadato presentes en el catálogo.
exclude_meta_fields (list): Metadatos de Distribution que se quieren
excluir de los objetos Distribution devueltos.
only_time_series (bool): Si es verdadero, sólo devuelve distribuciones
que sean distribuciones de series de tiempo.
"""
filter_in = filter_in or {}
filter_out = filter_out or {}
catalog = read_catalog_obj(catalog)
distributions = []
for dataset in get_datasets(catalog, filter_in, filter_out):
for distribution in dataset.get("distribution", []):
# agrega el id del dataset
distribution["dataset_identifier"] = dataset["identifier"]
distributions.append(distribution)
filtered_distributions = [
distribution for distribution in distributions if
_filter_dictionary(distribution, filter_in.get("distribution"),
filter_out.get("distribution"))
]
# realiza filtros especiales
if only_time_series:
filtered_distributions = [distribution for distribution in
filtered_distributions if
distribution_has_time_index(distribution)]
if meta_field:
return [distribution[meta_field]
for distribution in filtered_distributions
if meta_field in distribution]
if exclude_meta_fields:
meta_filtered_distributions = []
for distribution in filtered_distributions:
distribution_meta_filtered = distribution.copy()
for excluded_meta_field in exclude_meta_fields:
distribution_meta_filtered.pop(excluded_meta_field, None)
meta_filtered_distributions.append(distribution_meta_filtered)
return meta_filtered_distributions
else:
return filtered_distributions | Devuelve lista de distribuciones del catálogo o de uno de sus metadatos.
Args:
catalog (dict, str or DataJson): Representación externa/interna de un
catálogo. Una representación _externa_ es un path local o una
URL remota a un archivo con la metadata de un catálogo, en
formato JSON o XLSX. La representación _interna_ de un catálogo
es un diccionario. Ejemplos: http://datos.gob.ar/data.json,
http://www.ign.gob.ar/descargas/geodatos/catalog.xlsx,
"/energia/catalog.xlsx".
filter_in (dict): Devuelve los distribuciones cuyos atributos
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que pertenezcan a un dataset
de ese publisher_name.
filter_out (dict): Devuelve los distribuciones cuyos atributos no
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que no pertenezcan a un
dataset de ese publisher_name.
meta_field (str): Nombre de un metadato de Distribution. En lugar de
devolver los objetos completos Distribution, devuelve una lista de
valores para ese metadato presentes en el catálogo.
exclude_meta_fields (list): Metadatos de Distribution que se quieren
excluir de los objetos Distribution devueltos.
only_time_series (bool): Si es verdadero, sólo devuelve distribuciones
que sean distribuciones de series de tiempo. | Below is the the instruction that describes the task:
### Input:
Devuelve lista de distribuciones del catálogo o de uno de sus metadatos.
Args:
catalog (dict, str or DataJson): Representación externa/interna de un
catálogo. Una representación _externa_ es un path local o una
URL remota a un archivo con la metadata de un catálogo, en
formato JSON o XLSX. La representación _interna_ de un catálogo
es un diccionario. Ejemplos: http://datos.gob.ar/data.json,
http://www.ign.gob.ar/descargas/geodatos/catalog.xlsx,
"/energia/catalog.xlsx".
filter_in (dict): Devuelve los distribuciones cuyos atributos
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que pertenezcan a un dataset
de ese publisher_name.
filter_out (dict): Devuelve los distribuciones cuyos atributos no
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que no pertenezcan a un
dataset de ese publisher_name.
meta_field (str): Nombre de un metadato de Distribution. En lugar de
devolver los objetos completos Distribution, devuelve una lista de
valores para ese metadato presentes en el catálogo.
exclude_meta_fields (list): Metadatos de Distribution que se quieren
excluir de los objetos Distribution devueltos.
only_time_series (bool): Si es verdadero, sólo devuelve distribuciones
que sean distribuciones de series de tiempo.
### Response:
def get_distributions(catalog, filter_in=None, filter_out=None,
meta_field=None, exclude_meta_fields=None,
only_time_series=False):
"""Devuelve lista de distribuciones del catálogo o de uno de sus metadatos.
Args:
catalog (dict, str or DataJson): Representación externa/interna de un
catálogo. Una representación _externa_ es un path local o una
URL remota a un archivo con la metadata de un catálogo, en
formato JSON o XLSX. La representación _interna_ de un catálogo
es un diccionario. Ejemplos: http://datos.gob.ar/data.json,
http://www.ign.gob.ar/descargas/geodatos/catalog.xlsx,
"/energia/catalog.xlsx".
filter_in (dict): Devuelve los distribuciones cuyos atributos
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que pertenezcan a un dataset
de ese publisher_name.
filter_out (dict): Devuelve los distribuciones cuyos atributos no
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que no pertenezcan a un
dataset de ese publisher_name.
meta_field (str): Nombre de un metadato de Distribution. En lugar de
devolver los objetos completos Distribution, devuelve una lista de
valores para ese metadato presentes en el catálogo.
exclude_meta_fields (list): Metadatos de Distribution que se quieren
excluir de los objetos Distribution devueltos.
only_time_series (bool): Si es verdadero, sólo devuelve distribuciones
que sean distribuciones de series de tiempo.
"""
filter_in = filter_in or {}
filter_out = filter_out or {}
catalog = read_catalog_obj(catalog)
distributions = []
for dataset in get_datasets(catalog, filter_in, filter_out):
for distribution in dataset.get("distribution", []):
# agrega el id del dataset
distribution["dataset_identifier"] = dataset["identifier"]
distributions.append(distribution)
filtered_distributions = [
distribution for distribution in distributions if
_filter_dictionary(distribution, filter_in.get("distribution"),
filter_out.get("distribution"))
]
# realiza filtros especiales
if only_time_series:
filtered_distributions = [distribution for distribution in
filtered_distributions if
distribution_has_time_index(distribution)]
if meta_field:
return [distribution[meta_field]
for distribution in filtered_distributions
if meta_field in distribution]
if exclude_meta_fields:
meta_filtered_distributions = []
for distribution in filtered_distributions:
distribution_meta_filtered = distribution.copy()
for excluded_meta_field in exclude_meta_fields:
distribution_meta_filtered.pop(excluded_meta_field, None)
meta_filtered_distributions.append(distribution_meta_filtered)
return meta_filtered_distributions
else:
return filtered_distributions |
def scaled_fft(fft, scale=1.0):
"""
Produces a nicer graph, I'm not sure if this is correct
"""
data = np.zeros(len(fft))
for i, v in enumerate(fft):
data[i] = scale * (i * v) / NUM_SAMPLES
return data | Produces a nicer graph, I'm not sure if this is correct | Below is the the instruction that describes the task:
### Input:
Produces a nicer graph, I'm not sure if this is correct
### Response:
def scaled_fft(fft, scale=1.0):
"""
Produces a nicer graph, I'm not sure if this is correct
"""
data = np.zeros(len(fft))
for i, v in enumerate(fft):
data[i] = scale * (i * v) / NUM_SAMPLES
return data |
def CaffeLMDB(lmdb_path, shuffle=True, keys=None):
"""
Read a Caffe LMDB file where each value contains a ``caffe.Datum`` protobuf.
Produces datapoints of the format: [HWC image, label].
Note that Caffe LMDB format is not efficient: it stores serialized raw
arrays rather than JPEG images.
Args:
lmdb_path, shuffle, keys: same as :class:`LMDBData`.
Returns:
a :class:`LMDBDataDecoder` instance.
Example:
.. code-block:: python
ds = CaffeLMDB("/tmp/validation", keys='{:0>8d}')
"""
cpb = get_caffe_pb()
lmdb_data = LMDBData(lmdb_path, shuffle, keys)
def decoder(k, v):
try:
datum = cpb.Datum()
datum.ParseFromString(v)
img = np.fromstring(datum.data, dtype=np.uint8)
img = img.reshape(datum.channels, datum.height, datum.width)
except Exception:
log_once("Cannot read key {}".format(k), 'warn')
return None
return [img.transpose(1, 2, 0), datum.label]
logger.warn("Caffe LMDB format doesn't store jpeg-compressed images, \
it's not recommended due to its inferior performance.")
return LMDBDataDecoder(lmdb_data, decoder) | Read a Caffe LMDB file where each value contains a ``caffe.Datum`` protobuf.
Produces datapoints of the format: [HWC image, label].
Note that Caffe LMDB format is not efficient: it stores serialized raw
arrays rather than JPEG images.
Args:
lmdb_path, shuffle, keys: same as :class:`LMDBData`.
Returns:
a :class:`LMDBDataDecoder` instance.
Example:
.. code-block:: python
ds = CaffeLMDB("/tmp/validation", keys='{:0>8d}') | Below is the the instruction that describes the task:
### Input:
Read a Caffe LMDB file where each value contains a ``caffe.Datum`` protobuf.
Produces datapoints of the format: [HWC image, label].
Note that Caffe LMDB format is not efficient: it stores serialized raw
arrays rather than JPEG images.
Args:
lmdb_path, shuffle, keys: same as :class:`LMDBData`.
Returns:
a :class:`LMDBDataDecoder` instance.
Example:
.. code-block:: python
ds = CaffeLMDB("/tmp/validation", keys='{:0>8d}')
### Response:
def CaffeLMDB(lmdb_path, shuffle=True, keys=None):
"""
Read a Caffe LMDB file where each value contains a ``caffe.Datum`` protobuf.
Produces datapoints of the format: [HWC image, label].
Note that Caffe LMDB format is not efficient: it stores serialized raw
arrays rather than JPEG images.
Args:
lmdb_path, shuffle, keys: same as :class:`LMDBData`.
Returns:
a :class:`LMDBDataDecoder` instance.
Example:
.. code-block:: python
ds = CaffeLMDB("/tmp/validation", keys='{:0>8d}')
"""
cpb = get_caffe_pb()
lmdb_data = LMDBData(lmdb_path, shuffle, keys)
def decoder(k, v):
try:
datum = cpb.Datum()
datum.ParseFromString(v)
img = np.fromstring(datum.data, dtype=np.uint8)
img = img.reshape(datum.channels, datum.height, datum.width)
except Exception:
log_once("Cannot read key {}".format(k), 'warn')
return None
return [img.transpose(1, 2, 0), datum.label]
logger.warn("Caffe LMDB format doesn't store jpeg-compressed images, \
it's not recommended due to its inferior performance.")
return LMDBDataDecoder(lmdb_data, decoder) |
def write(self, s, size=None):
"""
Writes the content of the specified C{s} into this buffer.
@param s: Raw bytes
"""
self._buffer.write(s)
self._len_changed = True | Writes the content of the specified C{s} into this buffer.
@param s: Raw bytes | Below is the the instruction that describes the task:
### Input:
Writes the content of the specified C{s} into this buffer.
@param s: Raw bytes
### Response:
def write(self, s, size=None):
"""
Writes the content of the specified C{s} into this buffer.
@param s: Raw bytes
"""
self._buffer.write(s)
self._len_changed = True |
def get_sha(path=None, log=None, short=False, timeout=None):
"""Use `git rev-parse HEAD <REPO>` to get current SHA.
"""
# git_command = "git rev-parse HEAD {}".format(repo_name).split()
# git_command = "git rev-parse HEAD".split()
git_command = ["git", "rev-parse"]
if short:
git_command.append("--short")
git_command.append("HEAD")
kwargs = {}
if path is not None:
kwargs['cwd'] = path
if timeout is not None:
kwargs['timeout'] = timeout
if log is not None:
log.debug("{} {}".format(git_command, str(kwargs)))
sha = subprocess.check_output(git_command, **kwargs)
try:
sha = sha.decode('ascii').strip()
except:
if log is not None:
log.debug("decode of '{}' failed".format(sha))
return sha | Use `git rev-parse HEAD <REPO>` to get current SHA. | Below is the the instruction that describes the task:
### Input:
Use `git rev-parse HEAD <REPO>` to get current SHA.
### Response:
def get_sha(path=None, log=None, short=False, timeout=None):
"""Use `git rev-parse HEAD <REPO>` to get current SHA.
"""
# git_command = "git rev-parse HEAD {}".format(repo_name).split()
# git_command = "git rev-parse HEAD".split()
git_command = ["git", "rev-parse"]
if short:
git_command.append("--short")
git_command.append("HEAD")
kwargs = {}
if path is not None:
kwargs['cwd'] = path
if timeout is not None:
kwargs['timeout'] = timeout
if log is not None:
log.debug("{} {}".format(git_command, str(kwargs)))
sha = subprocess.check_output(git_command, **kwargs)
try:
sha = sha.decode('ascii').strip()
except:
if log is not None:
log.debug("decode of '{}' failed".format(sha))
return sha |
def from_config(cls, cp, section, outputs, skip_opts=None,
additional_opts=None):
"""Initializes a transform from the given section.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
"""
tag = outputs
if skip_opts is None:
skip_opts = []
if additional_opts is None:
additional_opts = {}
else:
additional_opts = additional_opts.copy()
outputs = set(outputs.split(VARARGS_DELIM))
special_args = ['name'] + skip_opts + additional_opts.keys()
# get any extra arguments to pass to init
extra_args = {}
for opt in cp.options("-".join([section, tag])):
if opt in special_args:
continue
# check if option can be cast as a float
val = cp.get_opt_tag(section, opt, tag)
try:
val = float(val)
except ValueError:
pass
# add option
extra_args.update({opt:val})
extra_args.update(additional_opts)
out = cls(**extra_args)
# check that the outputs matches
if outputs-out.outputs != set() or out.outputs-outputs != set():
raise ValueError("outputs of class do not match outputs specified "
"in section")
return out | Initializes a transform from the given section.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class. | Below is the the instruction that describes the task:
### Input:
Initializes a transform from the given section.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
### Response:
def from_config(cls, cp, section, outputs, skip_opts=None,
additional_opts=None):
"""Initializes a transform from the given section.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
"""
tag = outputs
if skip_opts is None:
skip_opts = []
if additional_opts is None:
additional_opts = {}
else:
additional_opts = additional_opts.copy()
outputs = set(outputs.split(VARARGS_DELIM))
special_args = ['name'] + skip_opts + additional_opts.keys()
# get any extra arguments to pass to init
extra_args = {}
for opt in cp.options("-".join([section, tag])):
if opt in special_args:
continue
# check if option can be cast as a float
val = cp.get_opt_tag(section, opt, tag)
try:
val = float(val)
except ValueError:
pass
# add option
extra_args.update({opt:val})
extra_args.update(additional_opts)
out = cls(**extra_args)
# check that the outputs matches
if outputs-out.outputs != set() or out.outputs-outputs != set():
raise ValueError("outputs of class do not match outputs specified "
"in section")
return out |
def visit_field(self, _, children):
"""A simple field.
Arguments
---------
_ (node) : parsimonious.nodes.Node.
children : list
- 0: for ``FILTERS``: list of instances of ``.resources.Field``.
Returns
-------
.resources.Field
An instance of ``.resources.Field`` with the correct name.
Example
-------
>>> DataQLParser(r'foo', default_rule='FIELD').data
<Field[foo] />
>>> DataQLParser(r'foo(1)', default_rule='FIELD').data
<Field[foo] .foo(1) />
>>> DataQLParser(r'foo.bar()', default_rule='FIELD').data
<Field[foo] .foo.bar() />
"""
filters = children[0]
return self.Field(getattr(filters[0], 'name', None), filters=filters) | A simple field.
Arguments
---------
_ (node) : parsimonious.nodes.Node.
children : list
- 0: for ``FILTERS``: list of instances of ``.resources.Field``.
Returns
-------
.resources.Field
An instance of ``.resources.Field`` with the correct name.
Example
-------
>>> DataQLParser(r'foo', default_rule='FIELD').data
<Field[foo] />
>>> DataQLParser(r'foo(1)', default_rule='FIELD').data
<Field[foo] .foo(1) />
>>> DataQLParser(r'foo.bar()', default_rule='FIELD').data
<Field[foo] .foo.bar() /> | Below is the the instruction that describes the task:
### Input:
A simple field.
Arguments
---------
_ (node) : parsimonious.nodes.Node.
children : list
- 0: for ``FILTERS``: list of instances of ``.resources.Field``.
Returns
-------
.resources.Field
An instance of ``.resources.Field`` with the correct name.
Example
-------
>>> DataQLParser(r'foo', default_rule='FIELD').data
<Field[foo] />
>>> DataQLParser(r'foo(1)', default_rule='FIELD').data
<Field[foo] .foo(1) />
>>> DataQLParser(r'foo.bar()', default_rule='FIELD').data
<Field[foo] .foo.bar() />
### Response:
def visit_field(self, _, children):
"""A simple field.
Arguments
---------
_ (node) : parsimonious.nodes.Node.
children : list
- 0: for ``FILTERS``: list of instances of ``.resources.Field``.
Returns
-------
.resources.Field
An instance of ``.resources.Field`` with the correct name.
Example
-------
>>> DataQLParser(r'foo', default_rule='FIELD').data
<Field[foo] />
>>> DataQLParser(r'foo(1)', default_rule='FIELD').data
<Field[foo] .foo(1) />
>>> DataQLParser(r'foo.bar()', default_rule='FIELD').data
<Field[foo] .foo.bar() />
"""
filters = children[0]
return self.Field(getattr(filters[0], 'name', None), filters=filters) |
def average_over_area(q, x, y):
"""Averages a quantity `q` over a rectangular area given a 2D array and
the x and y vectors for sample locations, using the trapezoidal rule"""
area = (np.max(x) - np.min(x))*(np.max(y) - np.min(y))
integral = np.trapz(np.trapz(q, y, axis=0), x)
return integral/area | Averages a quantity `q` over a rectangular area given a 2D array and
the x and y vectors for sample locations, using the trapezoidal rule | Below is the the instruction that describes the task:
### Input:
Averages a quantity `q` over a rectangular area given a 2D array and
the x and y vectors for sample locations, using the trapezoidal rule
### Response:
def average_over_area(q, x, y):
"""Averages a quantity `q` over a rectangular area given a 2D array and
the x and y vectors for sample locations, using the trapezoidal rule"""
area = (np.max(x) - np.min(x))*(np.max(y) - np.min(y))
integral = np.trapz(np.trapz(q, y, axis=0), x)
return integral/area |
def queryString_required(strList):
""" An decorator checking whether queryString key is valid or not
Args:
str: allowed queryString key
Returns:
if contains invalid queryString key, it will raise exception.
"""
def _dec(function):
@wraps(function)
def _wrap(request, *args, **kwargs):
for i in strList:
if i not in request.GET:
raise Http404("api does not exist")
return function(request, *args, **kwargs)
return _wrap
return _dec | An decorator checking whether queryString key is valid or not
Args:
str: allowed queryString key
Returns:
if contains invalid queryString key, it will raise exception. | Below is the the instruction that describes the task:
### Input:
An decorator checking whether queryString key is valid or not
Args:
str: allowed queryString key
Returns:
if contains invalid queryString key, it will raise exception.
### Response:
def queryString_required(strList):
""" An decorator checking whether queryString key is valid or not
Args:
str: allowed queryString key
Returns:
if contains invalid queryString key, it will raise exception.
"""
def _dec(function):
@wraps(function)
def _wrap(request, *args, **kwargs):
for i in strList:
if i not in request.GET:
raise Http404("api does not exist")
return function(request, *args, **kwargs)
return _wrap
return _dec |
def _normalize(obj):
"""
Normalize dicts and lists
:param obj:
:return: normalized object
"""
if isinstance(obj, list):
return [_normalize(item) for item in obj]
elif isinstance(obj, dict):
return {k: _normalize(v) for k, v in obj.items() if v is not None}
elif hasattr(obj, 'to_python'):
return obj.to_python()
return obj | Normalize dicts and lists
:param obj:
:return: normalized object | Below is the the instruction that describes the task:
### Input:
Normalize dicts and lists
:param obj:
:return: normalized object
### Response:
def _normalize(obj):
"""
Normalize dicts and lists
:param obj:
:return: normalized object
"""
if isinstance(obj, list):
return [_normalize(item) for item in obj]
elif isinstance(obj, dict):
return {k: _normalize(v) for k, v in obj.items() if v is not None}
elif hasattr(obj, 'to_python'):
return obj.to_python()
return obj |
def generate(cls, partial_props=None):
"""
Generate new connection file props from
defaults
"""
partial_props = partial_props or {}
props = partial_props.copy()
props.update(cls.DEFAULT_PROPERTIES)
return cls(props) | Generate new connection file props from
defaults | Below is the the instruction that describes the task:
### Input:
Generate new connection file props from
defaults
### Response:
def generate(cls, partial_props=None):
"""
Generate new connection file props from
defaults
"""
partial_props = partial_props or {}
props = partial_props.copy()
props.update(cls.DEFAULT_PROPERTIES)
return cls(props) |
def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest() | Compute composite key.
Used in header verification and payload decryption. | Below is the the instruction that describes the task:
### Input:
Compute composite key.
Used in header verification and payload decryption.
### Response:
def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest() |
def attach_attrs_table(key, value, fmt, meta):
"""Extracts attributes and attaches them to element."""
# We can't use attach_attrs_factory() because Table is a block-level element
if key in ['Table']:
assert len(value) == 5
caption = value[0] # caption, align, x, head, body
# Set n to the index where the attributes start
n = 0
while n < len(caption) and not \
(caption[n]['t'] == 'Str' and caption[n]['c'].startswith('{')):
n += 1
try:
attrs = extract_attrs(caption, n)
value.insert(0, attrs)
except (ValueError, IndexError):
pass | Extracts attributes and attaches them to element. | Below is the the instruction that describes the task:
### Input:
Extracts attributes and attaches them to element.
### Response:
def attach_attrs_table(key, value, fmt, meta):
"""Extracts attributes and attaches them to element."""
# We can't use attach_attrs_factory() because Table is a block-level element
if key in ['Table']:
assert len(value) == 5
caption = value[0] # caption, align, x, head, body
# Set n to the index where the attributes start
n = 0
while n < len(caption) and not \
(caption[n]['t'] == 'Str' and caption[n]['c'].startswith('{')):
n += 1
try:
attrs = extract_attrs(caption, n)
value.insert(0, attrs)
except (ValueError, IndexError):
pass |
def fetch():
"""
Fetches the latest exchange rate info from the European Central Bank. These
rates need to be used for displaying invoices since some countries require
local currency be quoted. Also useful to store the GBP rate of the VAT
collected at time of purchase to prevent fluctuations in exchange rates from
significantly altering the amount of tax due the HMRC (if you are using them
for VAT MOSS).
:return:
A dict with string keys that are currency codes and values that are
Decimals of the exchange rate with the base (1.0000) being the Euro
(EUR). The following currencies are included, based on this library
being build for EU and Norway VAT, plus USD for the author:
- BGN
- CZK
- DKK
- EUR
- GBP
- HUF
- HRK
- NOK
- PLN
- RON
- SEK
- USD
"""
response = urlopen('https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml')
_, params = cgi.parse_header(response.headers['Content-Type'])
if 'charset' in params:
encoding = params['charset']
else:
encoding = 'utf-8'
return_xml = response.read().decode(encoding)
# Example return data
#
# <gesmes:Envelope xmlns:gesmes="http://www.gesmes.org/xml/2002-08-01" xmlns="http://www.ecb.int/vocabulary/2002-08-01/eurofxref">
# <gesmes:subject>Reference rates</gesmes:subject>
# <gesmes:Sender>
# <gesmes:name>European Central Bank</gesmes:name>
# </gesmes:Sender>
# <Cube>
# <Cube time="2015-01-09">
# <Cube currency="USD" rate="1.1813"/>
# <Cube currency="JPY" rate="140.81"/>
# <Cube currency="BGN" rate="1.9558"/>
# <Cube currency="CZK" rate="28.062"/>
# <Cube currency="DKK" rate="7.4393"/>
# <Cube currency="GBP" rate="0.77990"/>
# <Cube currency="HUF" rate="317.39"/>
# <Cube currency="PLN" rate="4.2699"/>
# <Cube currency="RON" rate="4.4892"/>
# <Cube currency="SEK" rate="9.4883"/>
# <Cube currency="CHF" rate="1.2010"/>
# <Cube currency="NOK" rate="9.0605"/>
# <Cube currency="HRK" rate="7.6780"/>
# <Cube currency="RUB" rate="72.8910"/>
# <Cube currency="TRY" rate="2.7154"/>
# <Cube currency="AUD" rate="1.4506"/>
# <Cube currency="BRL" rate="3.1389"/>
# <Cube currency="CAD" rate="1.3963"/>
# <Cube currency="CNY" rate="7.3321"/>
# <Cube currency="HKD" rate="9.1593"/>
# <Cube currency="IDR" rate="14925.34"/>
# <Cube currency="ILS" rate="4.6614"/>
# <Cube currency="INR" rate="73.6233"/>
# <Cube currency="KRW" rate="1290.29"/>
# <Cube currency="MXN" rate="17.3190"/>
# <Cube currency="MYR" rate="4.2054"/>
# <Cube currency="NZD" rate="1.5115"/>
# <Cube currency="PHP" rate="53.090"/>
# <Cube currency="SGD" rate="1.5789"/>
# <Cube currency="THB" rate="38.846"/>
# <Cube currency="ZAR" rate="13.6655"/>
# </Cube>
# </Cube>
# </gesmes:Envelope>
# If we don't explicitly recode to UTF-8, ElementTree stupidly uses
# ascii on Python 2.7
envelope = ElementTree.fromstring(return_xml.encode('utf-8'))
namespaces = {
'gesmes': 'http://www.gesmes.org/xml/2002-08-01',
'eurofxref': 'http://www.ecb.int/vocabulary/2002-08-01/eurofxref'
}
date_elements = envelope.findall('./eurofxref:Cube/eurofxref:Cube[@time]', namespaces)
if not date_elements:
# Fail loudly if the XML seems to have changed
raise WebServiceError('Unable to find <Cube time=""> tag in ECB XML')
date = date_elements[0].get('time')
if not isinstance(date, str_cls):
date = date.decode('utf-8')
currency_elements = envelope.findall('./eurofxref:Cube/eurofxref:Cube/eurofxref:Cube[@currency][@rate]', namespaces)
if not currency_elements:
# Fail loudly if the XML seems to have changed
raise WebServiceError('Unable to find <Cube currency="" rate=""> tags in ECB XML')
rates = {
'EUR': Decimal('1.0000')
}
applicable_currenties = {
'BGN': True,
'CZK': True,
'DKK': True,
'EUR': True,
'GBP': True,
'HRK': True,
'HUF': True,
'NOK': True,
'PLN': True,
'RON': True,
'SEK': True,
'USD': True
}
for currency_element in currency_elements:
code = currency_element.attrib.get('currency')
if code not in applicable_currenties:
continue
rate = currency_element.attrib.get('rate')
rates[code] = Decimal(rate)
return (date, rates) | Fetches the latest exchange rate info from the European Central Bank. These
rates need to be used for displaying invoices since some countries require
local currency be quoted. Also useful to store the GBP rate of the VAT
collected at time of purchase to prevent fluctuations in exchange rates from
significantly altering the amount of tax due the HMRC (if you are using them
for VAT MOSS).
:return:
A dict with string keys that are currency codes and values that are
Decimals of the exchange rate with the base (1.0000) being the Euro
(EUR). The following currencies are included, based on this library
being build for EU and Norway VAT, plus USD for the author:
- BGN
- CZK
- DKK
- EUR
- GBP
- HUF
- HRK
- NOK
- PLN
- RON
- SEK
- USD | Below is the the instruction that describes the task:
### Input:
Fetches the latest exchange rate info from the European Central Bank. These
rates need to be used for displaying invoices since some countries require
local currency be quoted. Also useful to store the GBP rate of the VAT
collected at time of purchase to prevent fluctuations in exchange rates from
significantly altering the amount of tax due the HMRC (if you are using them
for VAT MOSS).
:return:
A dict with string keys that are currency codes and values that are
Decimals of the exchange rate with the base (1.0000) being the Euro
(EUR). The following currencies are included, based on this library
being build for EU and Norway VAT, plus USD for the author:
- BGN
- CZK
- DKK
- EUR
- GBP
- HUF
- HRK
- NOK
- PLN
- RON
- SEK
- USD
### Response:
def fetch():
"""
Fetches the latest exchange rate info from the European Central Bank. These
rates need to be used for displaying invoices since some countries require
local currency be quoted. Also useful to store the GBP rate of the VAT
collected at time of purchase to prevent fluctuations in exchange rates from
significantly altering the amount of tax due the HMRC (if you are using them
for VAT MOSS).
:return:
A dict with string keys that are currency codes and values that are
Decimals of the exchange rate with the base (1.0000) being the Euro
(EUR). The following currencies are included, based on this library
being build for EU and Norway VAT, plus USD for the author:
- BGN
- CZK
- DKK
- EUR
- GBP
- HUF
- HRK
- NOK
- PLN
- RON
- SEK
- USD
"""
response = urlopen('https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml')
_, params = cgi.parse_header(response.headers['Content-Type'])
if 'charset' in params:
encoding = params['charset']
else:
encoding = 'utf-8'
return_xml = response.read().decode(encoding)
# Example return data
#
# <gesmes:Envelope xmlns:gesmes="http://www.gesmes.org/xml/2002-08-01" xmlns="http://www.ecb.int/vocabulary/2002-08-01/eurofxref">
# <gesmes:subject>Reference rates</gesmes:subject>
# <gesmes:Sender>
# <gesmes:name>European Central Bank</gesmes:name>
# </gesmes:Sender>
# <Cube>
# <Cube time="2015-01-09">
# <Cube currency="USD" rate="1.1813"/>
# <Cube currency="JPY" rate="140.81"/>
# <Cube currency="BGN" rate="1.9558"/>
# <Cube currency="CZK" rate="28.062"/>
# <Cube currency="DKK" rate="7.4393"/>
# <Cube currency="GBP" rate="0.77990"/>
# <Cube currency="HUF" rate="317.39"/>
# <Cube currency="PLN" rate="4.2699"/>
# <Cube currency="RON" rate="4.4892"/>
# <Cube currency="SEK" rate="9.4883"/>
# <Cube currency="CHF" rate="1.2010"/>
# <Cube currency="NOK" rate="9.0605"/>
# <Cube currency="HRK" rate="7.6780"/>
# <Cube currency="RUB" rate="72.8910"/>
# <Cube currency="TRY" rate="2.7154"/>
# <Cube currency="AUD" rate="1.4506"/>
# <Cube currency="BRL" rate="3.1389"/>
# <Cube currency="CAD" rate="1.3963"/>
# <Cube currency="CNY" rate="7.3321"/>
# <Cube currency="HKD" rate="9.1593"/>
# <Cube currency="IDR" rate="14925.34"/>
# <Cube currency="ILS" rate="4.6614"/>
# <Cube currency="INR" rate="73.6233"/>
# <Cube currency="KRW" rate="1290.29"/>
# <Cube currency="MXN" rate="17.3190"/>
# <Cube currency="MYR" rate="4.2054"/>
# <Cube currency="NZD" rate="1.5115"/>
# <Cube currency="PHP" rate="53.090"/>
# <Cube currency="SGD" rate="1.5789"/>
# <Cube currency="THB" rate="38.846"/>
# <Cube currency="ZAR" rate="13.6655"/>
# </Cube>
# </Cube>
# </gesmes:Envelope>
# If we don't explicitly recode to UTF-8, ElementTree stupidly uses
# ascii on Python 2.7
envelope = ElementTree.fromstring(return_xml.encode('utf-8'))
namespaces = {
'gesmes': 'http://www.gesmes.org/xml/2002-08-01',
'eurofxref': 'http://www.ecb.int/vocabulary/2002-08-01/eurofxref'
}
date_elements = envelope.findall('./eurofxref:Cube/eurofxref:Cube[@time]', namespaces)
if not date_elements:
# Fail loudly if the XML seems to have changed
raise WebServiceError('Unable to find <Cube time=""> tag in ECB XML')
date = date_elements[0].get('time')
if not isinstance(date, str_cls):
date = date.decode('utf-8')
currency_elements = envelope.findall('./eurofxref:Cube/eurofxref:Cube/eurofxref:Cube[@currency][@rate]', namespaces)
if not currency_elements:
# Fail loudly if the XML seems to have changed
raise WebServiceError('Unable to find <Cube currency="" rate=""> tags in ECB XML')
rates = {
'EUR': Decimal('1.0000')
}
applicable_currenties = {
'BGN': True,
'CZK': True,
'DKK': True,
'EUR': True,
'GBP': True,
'HRK': True,
'HUF': True,
'NOK': True,
'PLN': True,
'RON': True,
'SEK': True,
'USD': True
}
for currency_element in currency_elements:
code = currency_element.attrib.get('currency')
if code not in applicable_currenties:
continue
rate = currency_element.attrib.get('rate')
rates[code] = Decimal(rate)
return (date, rates) |
def delete_pb_devices():
"""Delete PBs devices from the Tango database."""
parser = argparse.ArgumentParser(description='Register PB devices.')
parser.add_argument('num_pb', type=int,
help='Number of PBs devices to register.')
args = parser.parse_args()
log = logging.getLogger('sip.tango_control.subarray')
tango_db = Database()
log.info("Deleting PB devices:")
for index in range(args.num_pb):
name = 'sip_sdp/pb/{:05d}'.format(index)
log.info("\t%s", name)
tango_db.delete_device(name) | Delete PBs devices from the Tango database. | Below is the the instruction that describes the task:
### Input:
Delete PBs devices from the Tango database.
### Response:
def delete_pb_devices():
"""Delete PBs devices from the Tango database."""
parser = argparse.ArgumentParser(description='Register PB devices.')
parser.add_argument('num_pb', type=int,
help='Number of PBs devices to register.')
args = parser.parse_args()
log = logging.getLogger('sip.tango_control.subarray')
tango_db = Database()
log.info("Deleting PB devices:")
for index in range(args.num_pb):
name = 'sip_sdp/pb/{:05d}'.format(index)
log.info("\t%s", name)
tango_db.delete_device(name) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.