code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def select_edges_by_attribute(docgraph, attribute=None, value=None, data=False):
"""
get all edges with the given edge type and layer.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
attribute : str or None
Name of the node attribute that all nodes must posess.
If None, returns all nodes.
value : str or collection of str or None
Value of the node attribute that all nodes must posess.
If None, returns all nodes with the given node attribute key .
data : bool
If True, results will include edge attributes.
Returns
-------
edges : generator of str
a container/list of edges (represented as (source node ID, target
node ID) tuples). If data is True, edges are represented as
(source node ID, target node ID, edge attribute dict) tuples.
"""
if attribute:
attrib_key_eval = "'{}' in edge_attribs".format(attribute)
if value is not None:
if isinstance(value, basestring):
attrib_val_eval = \
"edge_attribs['{0}'] == '{1}'".format(attribute, value)
return select_edges(
docgraph, data=data,
conditions=[attrib_key_eval, attrib_val_eval])
else: # ``value`` is a list/set/dict of values
attrib_val_evals = \
["edge_attribs['{0}'] == '{1}'".format(attribute, v)
for v in value]
results = \
[select_edges(docgraph, data=data,
conditions=[attrib_key_eval, val_eval])
for val_eval in attrib_val_evals]
# results is a list of generators
return itertools.chain(*results)
else: # yield all edges with the given attribute, regardless of value
return select_edges(docgraph, data=data, conditions=[attrib_key_eval])
else: # don't filter edges at all
return docgraph.edges_iter(data=data) | get all edges with the given edge type and layer.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
attribute : str or None
Name of the node attribute that all nodes must posess.
If None, returns all nodes.
value : str or collection of str or None
Value of the node attribute that all nodes must posess.
If None, returns all nodes with the given node attribute key .
data : bool
If True, results will include edge attributes.
Returns
-------
edges : generator of str
a container/list of edges (represented as (source node ID, target
node ID) tuples). If data is True, edges are represented as
(source node ID, target node ID, edge attribute dict) tuples. | Below is the the instruction that describes the task:
### Input:
get all edges with the given edge type and layer.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
attribute : str or None
Name of the node attribute that all nodes must posess.
If None, returns all nodes.
value : str or collection of str or None
Value of the node attribute that all nodes must posess.
If None, returns all nodes with the given node attribute key .
data : bool
If True, results will include edge attributes.
Returns
-------
edges : generator of str
a container/list of edges (represented as (source node ID, target
node ID) tuples). If data is True, edges are represented as
(source node ID, target node ID, edge attribute dict) tuples.
### Response:
def select_edges_by_attribute(docgraph, attribute=None, value=None, data=False):
"""
get all edges with the given edge type and layer.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
attribute : str or None
Name of the node attribute that all nodes must posess.
If None, returns all nodes.
value : str or collection of str or None
Value of the node attribute that all nodes must posess.
If None, returns all nodes with the given node attribute key .
data : bool
If True, results will include edge attributes.
Returns
-------
edges : generator of str
a container/list of edges (represented as (source node ID, target
node ID) tuples). If data is True, edges are represented as
(source node ID, target node ID, edge attribute dict) tuples.
"""
if attribute:
attrib_key_eval = "'{}' in edge_attribs".format(attribute)
if value is not None:
if isinstance(value, basestring):
attrib_val_eval = \
"edge_attribs['{0}'] == '{1}'".format(attribute, value)
return select_edges(
docgraph, data=data,
conditions=[attrib_key_eval, attrib_val_eval])
else: # ``value`` is a list/set/dict of values
attrib_val_evals = \
["edge_attribs['{0}'] == '{1}'".format(attribute, v)
for v in value]
results = \
[select_edges(docgraph, data=data,
conditions=[attrib_key_eval, val_eval])
for val_eval in attrib_val_evals]
# results is a list of generators
return itertools.chain(*results)
else: # yield all edges with the given attribute, regardless of value
return select_edges(docgraph, data=data, conditions=[attrib_key_eval])
else: # don't filter edges at all
return docgraph.edges_iter(data=data) |
def freeze(self, dest_dir):
"""Freezes every resource within a context"""
for resource in self.resources():
if resource.present:
resource.freeze(dest_dir) | Freezes every resource within a context | Below is the the instruction that describes the task:
### Input:
Freezes every resource within a context
### Response:
def freeze(self, dest_dir):
"""Freezes every resource within a context"""
for resource in self.resources():
if resource.present:
resource.freeze(dest_dir) |
def run(self, dag):
"""
If `dag` is mapped to `coupling_map`, the property
`is_swap_mapped` is set to True (or to False otherwise).
Args:
dag (DAGCircuit): DAG to map.
"""
if self.layout is None:
if self.property_set["layout"]:
self.layout = self.property_set["layout"]
else:
self.layout = Layout.generate_trivial_layout(*dag.qregs.values())
self.property_set['is_swap_mapped'] = True
for gate in dag.twoQ_gates():
physical_q0 = self.layout[gate.qargs[0]]
physical_q1 = self.layout[gate.qargs[1]]
if self.coupling_map.distance(physical_q0, physical_q1) != 1:
self.property_set['is_swap_mapped'] = False
return | If `dag` is mapped to `coupling_map`, the property
`is_swap_mapped` is set to True (or to False otherwise).
Args:
dag (DAGCircuit): DAG to map. | Below is the the instruction that describes the task:
### Input:
If `dag` is mapped to `coupling_map`, the property
`is_swap_mapped` is set to True (or to False otherwise).
Args:
dag (DAGCircuit): DAG to map.
### Response:
def run(self, dag):
"""
If `dag` is mapped to `coupling_map`, the property
`is_swap_mapped` is set to True (or to False otherwise).
Args:
dag (DAGCircuit): DAG to map.
"""
if self.layout is None:
if self.property_set["layout"]:
self.layout = self.property_set["layout"]
else:
self.layout = Layout.generate_trivial_layout(*dag.qregs.values())
self.property_set['is_swap_mapped'] = True
for gate in dag.twoQ_gates():
physical_q0 = self.layout[gate.qargs[0]]
physical_q1 = self.layout[gate.qargs[1]]
if self.coupling_map.distance(physical_q0, physical_q1) != 1:
self.property_set['is_swap_mapped'] = False
return |
def text(self, x, y, text, attr=None):
u'''Write text at the given position.'''
if attr is None:
attr = self.attr
pos = self.fixcoord(x, y)
n = DWORD(0)
self.WriteConsoleOutputCharacterW(self.hout, text,
len(text), pos, byref(n))
self.FillConsoleOutputAttribute(self.hout, attr, n, pos, byref(n)) | u'''Write text at the given position. | Below is the the instruction that describes the task:
### Input:
u'''Write text at the given position.
### Response:
def text(self, x, y, text, attr=None):
u'''Write text at the given position.'''
if attr is None:
attr = self.attr
pos = self.fixcoord(x, y)
n = DWORD(0)
self.WriteConsoleOutputCharacterW(self.hout, text,
len(text), pos, byref(n))
self.FillConsoleOutputAttribute(self.hout, attr, n, pos, byref(n)) |
def read(self, path, encoding=None):
"""
Read the template at the given path, and return it as a unicode string.
"""
b = common.read(path)
if encoding is None:
encoding = self.file_encoding
return self.unicode(b, encoding) | Read the template at the given path, and return it as a unicode string. | Below is the the instruction that describes the task:
### Input:
Read the template at the given path, and return it as a unicode string.
### Response:
def read(self, path, encoding=None):
"""
Read the template at the given path, and return it as a unicode string.
"""
b = common.read(path)
if encoding is None:
encoding = self.file_encoding
return self.unicode(b, encoding) |
def configure_modsecurity(self):
"""
Installs the mod-security Apache module.
https://www.modsecurity.org
"""
r = self.local_renderer
if r.env.modsecurity_enabled and not self.last_manifest.modsecurity_enabled:
self.install_packages()
# Write modsecurity.conf.
fn = self.render_to_file('apache/apache_modsecurity.template.conf')
r.put(local_path=fn, remote_path='/etc/modsecurity/modsecurity.conf', use_sudo=True)
# Write OWASP rules.
r.env.modsecurity_download_filename = '/tmp/owasp-modsecurity-crs.tar.gz'
r.sudo('cd /tmp; wget --output-document={apache_modsecurity_download_filename} {apache_modsecurity_download_url}')
r.env.modsecurity_download_top = r.sudo(
"cd /tmp; "
"tar tzf %(apache_modsecurity_download_filename)s | sed -e 's@/.*@@' | uniq" % self.genv)
r.sudo('cd /tmp; tar -zxvf %(apache_modsecurity_download_filename)s' % self.genv)
r.sudo('cd /tmp; cp -R %(apache_modsecurity_download_top)s/* /etc/modsecurity/' % self.genv)
r.sudo('mv /etc/modsecurity/modsecurity_crs_10_setup.conf.example /etc/modsecurity/modsecurity_crs_10_setup.conf')
r.sudo('rm -f /etc/modsecurity/activated_rules/*')
r.sudo('cd /etc/modsecurity/base_rules; '
'for f in * ; do ln -s /etc/modsecurity/base_rules/$f /etc/modsecurity/activated_rules/$f ; done')
r.sudo('cd /etc/modsecurity/optional_rules; '
'for f in * ; do ln -s /etc/modsecurity/optional_rules/$f /etc/modsecurity/activated_rules/$f ; done')
r.env.httpd_conf_append.append('Include "/etc/modsecurity/activated_rules/*.conf"')
self.enable_mod('evasive')
self.enable_mod('headers')
elif not self.env.modsecurity_enabled and self.last_manifest.modsecurity_enabled:
self.disable_mod('modsecurity') | Installs the mod-security Apache module.
https://www.modsecurity.org | Below is the the instruction that describes the task:
### Input:
Installs the mod-security Apache module.
https://www.modsecurity.org
### Response:
def configure_modsecurity(self):
"""
Installs the mod-security Apache module.
https://www.modsecurity.org
"""
r = self.local_renderer
if r.env.modsecurity_enabled and not self.last_manifest.modsecurity_enabled:
self.install_packages()
# Write modsecurity.conf.
fn = self.render_to_file('apache/apache_modsecurity.template.conf')
r.put(local_path=fn, remote_path='/etc/modsecurity/modsecurity.conf', use_sudo=True)
# Write OWASP rules.
r.env.modsecurity_download_filename = '/tmp/owasp-modsecurity-crs.tar.gz'
r.sudo('cd /tmp; wget --output-document={apache_modsecurity_download_filename} {apache_modsecurity_download_url}')
r.env.modsecurity_download_top = r.sudo(
"cd /tmp; "
"tar tzf %(apache_modsecurity_download_filename)s | sed -e 's@/.*@@' | uniq" % self.genv)
r.sudo('cd /tmp; tar -zxvf %(apache_modsecurity_download_filename)s' % self.genv)
r.sudo('cd /tmp; cp -R %(apache_modsecurity_download_top)s/* /etc/modsecurity/' % self.genv)
r.sudo('mv /etc/modsecurity/modsecurity_crs_10_setup.conf.example /etc/modsecurity/modsecurity_crs_10_setup.conf')
r.sudo('rm -f /etc/modsecurity/activated_rules/*')
r.sudo('cd /etc/modsecurity/base_rules; '
'for f in * ; do ln -s /etc/modsecurity/base_rules/$f /etc/modsecurity/activated_rules/$f ; done')
r.sudo('cd /etc/modsecurity/optional_rules; '
'for f in * ; do ln -s /etc/modsecurity/optional_rules/$f /etc/modsecurity/activated_rules/$f ; done')
r.env.httpd_conf_append.append('Include "/etc/modsecurity/activated_rules/*.conf"')
self.enable_mod('evasive')
self.enable_mod('headers')
elif not self.env.modsecurity_enabled and self.last_manifest.modsecurity_enabled:
self.disable_mod('modsecurity') |
def render(self, text, auth=None):
"""
Renders the specified markdown content and embedded styles.
"""
if markdown is None:
import markdown
if UrlizeExtension is None:
from .mdx_urlize import UrlizeExtension
return markdown.markdown(text, extensions=[
'fenced_code',
'codehilite(css_class=highlight)',
'toc',
'tables',
'sane_lists',
UrlizeExtension(),
]) | Renders the specified markdown content and embedded styles. | Below is the the instruction that describes the task:
### Input:
Renders the specified markdown content and embedded styles.
### Response:
def render(self, text, auth=None):
"""
Renders the specified markdown content and embedded styles.
"""
if markdown is None:
import markdown
if UrlizeExtension is None:
from .mdx_urlize import UrlizeExtension
return markdown.markdown(text, extensions=[
'fenced_code',
'codehilite(css_class=highlight)',
'toc',
'tables',
'sane_lists',
UrlizeExtension(),
]) |
def OnMouseWheel(self, event):
"""Event handler for mouse wheel actions
Invokes zoom when mouse when Ctrl is also pressed
"""
if event.ControlDown():
if event.WheelRotation > 0:
post_command_event(self.grid, self.grid.ZoomInMsg)
else:
post_command_event(self.grid, self.grid.ZoomOutMsg)
elif self.main_window.IsFullScreen():
if event.WheelRotation > 0:
newtable = self.grid.current_table - 1
else:
newtable = self.grid.current_table + 1
post_command_event(self.grid, self.GridActionTableSwitchMsg,
newtable=newtable)
return
else:
wheel_speed = config["mouse_wheel_speed_factor"]
x, y = self.grid.GetViewStart()
direction = wheel_speed if event.GetWheelRotation() < 0 \
else -wheel_speed
if event.ShiftDown():
# Scroll sideways if shift is pressed.
self.grid.Scroll(x + direction, y)
else:
self.grid.Scroll(x, y + direction) | Event handler for mouse wheel actions
Invokes zoom when mouse when Ctrl is also pressed | Below is the the instruction that describes the task:
### Input:
Event handler for mouse wheel actions
Invokes zoom when mouse when Ctrl is also pressed
### Response:
def OnMouseWheel(self, event):
"""Event handler for mouse wheel actions
Invokes zoom when mouse when Ctrl is also pressed
"""
if event.ControlDown():
if event.WheelRotation > 0:
post_command_event(self.grid, self.grid.ZoomInMsg)
else:
post_command_event(self.grid, self.grid.ZoomOutMsg)
elif self.main_window.IsFullScreen():
if event.WheelRotation > 0:
newtable = self.grid.current_table - 1
else:
newtable = self.grid.current_table + 1
post_command_event(self.grid, self.GridActionTableSwitchMsg,
newtable=newtable)
return
else:
wheel_speed = config["mouse_wheel_speed_factor"]
x, y = self.grid.GetViewStart()
direction = wheel_speed if event.GetWheelRotation() < 0 \
else -wheel_speed
if event.ShiftDown():
# Scroll sideways if shift is pressed.
self.grid.Scroll(x + direction, y)
else:
self.grid.Scroll(x, y + direction) |
def pool_memcached_connections(func):
"""Function decorator to pool memcached connections.
Use this to wrap functions that might make multiple calls to memcached. This
will cause a single memcached client to be shared for all connections.
"""
if isgeneratorfunction(func):
def wrapper(*nargs, **kwargs):
with memcached_client():
for result in func(*nargs, **kwargs):
yield result
else:
def wrapper(*nargs, **kwargs):
with memcached_client():
return func(*nargs, **kwargs)
return update_wrapper(wrapper, func) | Function decorator to pool memcached connections.
Use this to wrap functions that might make multiple calls to memcached. This
will cause a single memcached client to be shared for all connections. | Below is the the instruction that describes the task:
### Input:
Function decorator to pool memcached connections.
Use this to wrap functions that might make multiple calls to memcached. This
will cause a single memcached client to be shared for all connections.
### Response:
def pool_memcached_connections(func):
"""Function decorator to pool memcached connections.
Use this to wrap functions that might make multiple calls to memcached. This
will cause a single memcached client to be shared for all connections.
"""
if isgeneratorfunction(func):
def wrapper(*nargs, **kwargs):
with memcached_client():
for result in func(*nargs, **kwargs):
yield result
else:
def wrapper(*nargs, **kwargs):
with memcached_client():
return func(*nargs, **kwargs)
return update_wrapper(wrapper, func) |
def lime_tabular_regression_1000(model, data):
""" LIME Tabular 1000
"""
return lambda X: other.LimeTabularExplainer(model.predict, data, mode="regression").attributions(X, nsamples=1000) | LIME Tabular 1000 | Below is the the instruction that describes the task:
### Input:
LIME Tabular 1000
### Response:
def lime_tabular_regression_1000(model, data):
""" LIME Tabular 1000
"""
return lambda X: other.LimeTabularExplainer(model.predict, data, mode="regression").attributions(X, nsamples=1000) |
def contiguous(self, other: "Interval") -> bool:
"""
Does this interval overlap or touch the other?
"""
return not(self.end < other.start or self.start > other.end) | Does this interval overlap or touch the other? | Below is the the instruction that describes the task:
### Input:
Does this interval overlap or touch the other?
### Response:
def contiguous(self, other: "Interval") -> bool:
"""
Does this interval overlap or touch the other?
"""
return not(self.end < other.start or self.start > other.end) |
def f_load_items(self, iterator, *args, **kwargs):
"""Loads parameters and results specified in `iterator`.
You can directly list the Parameter objects or just their names.
If names are given the `~pypet.naturalnaming.NNGroupNode.f_get` method is applied to find the
parameters or results in the trajectory. Accordingly, the parameters and results
you want to load must already exist in your trajectory (in RAM), probably they
are just empty skeletons waiting desperately to handle data.
If they do not exist in RAM yet, but have been stored to disk before,
you can call :func:`~pypet.trajectory.Trajectory.f_load_skeleton` in order to
bring your trajectory tree skeleton up to date. In case of a single run you can
use the :func:`~pypet.naturalnaming.NNGroupNode.f_load_child` method to recursively
load a subtree without any data.
Then you can load the data of individual results or parameters one by one.
If want to load the whole trajectory at once or ALL results and parameters that are
still empty take a look at :func:`~pypet.trajectory.Trajectory.f_load`.
As mentioned before, to load subtrees of your trajectory you might want to check out
:func:`~pypet.naturalnaming.NNGroupNode.f_load_child`.
To load a list of parameters or results with `f_load_items` you can pass
the following arguments:
:param iterator: A list with parameters or results to be loaded.
:param only_empties:
Optional keyword argument (boolean),
if `True` only empty parameters or results are passed
to the storage service to get loaded. Non-empty parameters or results found in
`iterator` are simply ignored.
:param args: Additional arguments directly passed to the storage service
:param kwargs:
Additional keyword arguments directly passed to the storage service
(except the kwarg `only_empties`)
If you use the standard hdf5 storage service, you can pass the following additional
keyword arguments:
:param load_only:
If you load a result, you can partially load it and ignore the rest of data items.
Just specify the name of the data you want to load. You can also provide a list,
for example `load_only='spikes'`, `load_only=['spikes','membrane_potential']`.
Be aware that you need to specify the names of parts as they were stored
to HDF5. Depending on how your leaf construction works, this may differ
from the names the data might have in your leaf in the trajectory container.
A warning is issued if data specified in `load_only` cannot be found in the
instances specified in `iterator`.
:param load_except:
Analogous to the above, but everything is loaded except names or parts
specified in `load_except`.
You cannot use `load_only` and `load_except` at the same time. If you do
a ValueError is thrown.
A warning is issued if names listed in `load_except` are not part of the
items to load.
"""
if not self._stored:
raise TypeError(
'Cannot load stuff from disk for a trajectory that has never been stored.')
fetched_items = self._nn_interface._fetch_items(LOAD, iterator, args, kwargs)
if fetched_items:
self._storage_service.load(pypetconstants.LIST, fetched_items,
trajectory_name=self.v_name)
else:
self._logger.warning('Your loading was not successful, could not find a single item '
'to load.') | Loads parameters and results specified in `iterator`.
You can directly list the Parameter objects or just their names.
If names are given the `~pypet.naturalnaming.NNGroupNode.f_get` method is applied to find the
parameters or results in the trajectory. Accordingly, the parameters and results
you want to load must already exist in your trajectory (in RAM), probably they
are just empty skeletons waiting desperately to handle data.
If they do not exist in RAM yet, but have been stored to disk before,
you can call :func:`~pypet.trajectory.Trajectory.f_load_skeleton` in order to
bring your trajectory tree skeleton up to date. In case of a single run you can
use the :func:`~pypet.naturalnaming.NNGroupNode.f_load_child` method to recursively
load a subtree without any data.
Then you can load the data of individual results or parameters one by one.
If want to load the whole trajectory at once or ALL results and parameters that are
still empty take a look at :func:`~pypet.trajectory.Trajectory.f_load`.
As mentioned before, to load subtrees of your trajectory you might want to check out
:func:`~pypet.naturalnaming.NNGroupNode.f_load_child`.
To load a list of parameters or results with `f_load_items` you can pass
the following arguments:
:param iterator: A list with parameters or results to be loaded.
:param only_empties:
Optional keyword argument (boolean),
if `True` only empty parameters or results are passed
to the storage service to get loaded. Non-empty parameters or results found in
`iterator` are simply ignored.
:param args: Additional arguments directly passed to the storage service
:param kwargs:
Additional keyword arguments directly passed to the storage service
(except the kwarg `only_empties`)
If you use the standard hdf5 storage service, you can pass the following additional
keyword arguments:
:param load_only:
If you load a result, you can partially load it and ignore the rest of data items.
Just specify the name of the data you want to load. You can also provide a list,
for example `load_only='spikes'`, `load_only=['spikes','membrane_potential']`.
Be aware that you need to specify the names of parts as they were stored
to HDF5. Depending on how your leaf construction works, this may differ
from the names the data might have in your leaf in the trajectory container.
A warning is issued if data specified in `load_only` cannot be found in the
instances specified in `iterator`.
:param load_except:
Analogous to the above, but everything is loaded except names or parts
specified in `load_except`.
You cannot use `load_only` and `load_except` at the same time. If you do
a ValueError is thrown.
A warning is issued if names listed in `load_except` are not part of the
items to load. | Below is the the instruction that describes the task:
### Input:
Loads parameters and results specified in `iterator`.
You can directly list the Parameter objects or just their names.
If names are given the `~pypet.naturalnaming.NNGroupNode.f_get` method is applied to find the
parameters or results in the trajectory. Accordingly, the parameters and results
you want to load must already exist in your trajectory (in RAM), probably they
are just empty skeletons waiting desperately to handle data.
If they do not exist in RAM yet, but have been stored to disk before,
you can call :func:`~pypet.trajectory.Trajectory.f_load_skeleton` in order to
bring your trajectory tree skeleton up to date. In case of a single run you can
use the :func:`~pypet.naturalnaming.NNGroupNode.f_load_child` method to recursively
load a subtree without any data.
Then you can load the data of individual results or parameters one by one.
If want to load the whole trajectory at once or ALL results and parameters that are
still empty take a look at :func:`~pypet.trajectory.Trajectory.f_load`.
As mentioned before, to load subtrees of your trajectory you might want to check out
:func:`~pypet.naturalnaming.NNGroupNode.f_load_child`.
To load a list of parameters or results with `f_load_items` you can pass
the following arguments:
:param iterator: A list with parameters or results to be loaded.
:param only_empties:
Optional keyword argument (boolean),
if `True` only empty parameters or results are passed
to the storage service to get loaded. Non-empty parameters or results found in
`iterator` are simply ignored.
:param args: Additional arguments directly passed to the storage service
:param kwargs:
Additional keyword arguments directly passed to the storage service
(except the kwarg `only_empties`)
If you use the standard hdf5 storage service, you can pass the following additional
keyword arguments:
:param load_only:
If you load a result, you can partially load it and ignore the rest of data items.
Just specify the name of the data you want to load. You can also provide a list,
for example `load_only='spikes'`, `load_only=['spikes','membrane_potential']`.
Be aware that you need to specify the names of parts as they were stored
to HDF5. Depending on how your leaf construction works, this may differ
from the names the data might have in your leaf in the trajectory container.
A warning is issued if data specified in `load_only` cannot be found in the
instances specified in `iterator`.
:param load_except:
Analogous to the above, but everything is loaded except names or parts
specified in `load_except`.
You cannot use `load_only` and `load_except` at the same time. If you do
a ValueError is thrown.
A warning is issued if names listed in `load_except` are not part of the
items to load.
### Response:
def f_load_items(self, iterator, *args, **kwargs):
"""Loads parameters and results specified in `iterator`.
You can directly list the Parameter objects or just their names.
If names are given the `~pypet.naturalnaming.NNGroupNode.f_get` method is applied to find the
parameters or results in the trajectory. Accordingly, the parameters and results
you want to load must already exist in your trajectory (in RAM), probably they
are just empty skeletons waiting desperately to handle data.
If they do not exist in RAM yet, but have been stored to disk before,
you can call :func:`~pypet.trajectory.Trajectory.f_load_skeleton` in order to
bring your trajectory tree skeleton up to date. In case of a single run you can
use the :func:`~pypet.naturalnaming.NNGroupNode.f_load_child` method to recursively
load a subtree without any data.
Then you can load the data of individual results or parameters one by one.
If want to load the whole trajectory at once or ALL results and parameters that are
still empty take a look at :func:`~pypet.trajectory.Trajectory.f_load`.
As mentioned before, to load subtrees of your trajectory you might want to check out
:func:`~pypet.naturalnaming.NNGroupNode.f_load_child`.
To load a list of parameters or results with `f_load_items` you can pass
the following arguments:
:param iterator: A list with parameters or results to be loaded.
:param only_empties:
Optional keyword argument (boolean),
if `True` only empty parameters or results are passed
to the storage service to get loaded. Non-empty parameters or results found in
`iterator` are simply ignored.
:param args: Additional arguments directly passed to the storage service
:param kwargs:
Additional keyword arguments directly passed to the storage service
(except the kwarg `only_empties`)
If you use the standard hdf5 storage service, you can pass the following additional
keyword arguments:
:param load_only:
If you load a result, you can partially load it and ignore the rest of data items.
Just specify the name of the data you want to load. You can also provide a list,
for example `load_only='spikes'`, `load_only=['spikes','membrane_potential']`.
Be aware that you need to specify the names of parts as they were stored
to HDF5. Depending on how your leaf construction works, this may differ
from the names the data might have in your leaf in the trajectory container.
A warning is issued if data specified in `load_only` cannot be found in the
instances specified in `iterator`.
:param load_except:
Analogous to the above, but everything is loaded except names or parts
specified in `load_except`.
You cannot use `load_only` and `load_except` at the same time. If you do
a ValueError is thrown.
A warning is issued if names listed in `load_except` are not part of the
items to load.
"""
if not self._stored:
raise TypeError(
'Cannot load stuff from disk for a trajectory that has never been stored.')
fetched_items = self._nn_interface._fetch_items(LOAD, iterator, args, kwargs)
if fetched_items:
self._storage_service.load(pypetconstants.LIST, fetched_items,
trajectory_name=self.v_name)
else:
self._logger.warning('Your loading was not successful, could not find a single item '
'to load.') |
def structure_attrs_fromdict(self, obj, cl):
# type: (Mapping[str, Any], Type[T]) -> T
"""Instantiate an attrs class from a mapping (dict)."""
# For public use.
conv_obj = {} # Start with a fresh dict, to ignore extra keys.
dispatch = self._structure_func.dispatch
for a in cl.__attrs_attrs__: # type: ignore
# We detect the type by metadata.
type_ = a.type
name = a.name
try:
val = obj[name]
except KeyError:
continue
if name[0] == "_":
name = name[1:]
conv_obj[name] = (
dispatch(type_)(val, type_) if type_ is not None else val
)
return cl(**conv_obj) | Instantiate an attrs class from a mapping (dict). | Below is the the instruction that describes the task:
### Input:
Instantiate an attrs class from a mapping (dict).
### Response:
def structure_attrs_fromdict(self, obj, cl):
# type: (Mapping[str, Any], Type[T]) -> T
"""Instantiate an attrs class from a mapping (dict)."""
# For public use.
conv_obj = {} # Start with a fresh dict, to ignore extra keys.
dispatch = self._structure_func.dispatch
for a in cl.__attrs_attrs__: # type: ignore
# We detect the type by metadata.
type_ = a.type
name = a.name
try:
val = obj[name]
except KeyError:
continue
if name[0] == "_":
name = name[1:]
conv_obj[name] = (
dispatch(type_)(val, type_) if type_ is not None else val
)
return cl(**conv_obj) |
def client(addr):
"""Return a SocketTalk client."""
success = False
while not success:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.connect(addr)
success = True
except socket.error as err:
sock.close()
talk = SocketTalk(sock)
return talk | Return a SocketTalk client. | Below is the the instruction that describes the task:
### Input:
Return a SocketTalk client.
### Response:
def client(addr):
"""Return a SocketTalk client."""
success = False
while not success:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.connect(addr)
success = True
except socket.error as err:
sock.close()
talk = SocketTalk(sock)
return talk |
def build_genome_alignment_from_directory(d_name, ref_spec, extensions=None,
index_exts=None,
fail_no_index=False):
"""
build a genome aligment by loading all files in a directory.
Fiel without indexes are loaded immediately; those with indexes are
loaded on-demand. Not recursive (i.e. subdirectories are not parsed).
:param d_name: directory to load from.
:param ref_spec: which species in the alignemnt files is the reference?
:param extensions: list or set of acceptable extensions; treat any files
with these extensions as part of the alignment. If
None, treat any file which has an extension that is
NOT in index_extensions as part of the alignment.
:param index_exts: treat any files with these extensions as index files.
:param fail_no_index: fail if index extensions are provided and an alignment
file has no index file.
"""
if index_exts is None and fail_no_index:
raise ValueError("Failure on no index specified for loading genome " +
"alignment, but no index extensions specified")
blocks = []
for fn in os.listdir(d_name):
pth = os.path.join(d_name, fn)
if os.path.isfile(pth):
_, ext = os.path.splitext(pth)
if extensions is None or ext in extensions:
idx_path = __find_index(pth, index_exts)
if idx_path is None and fail_no_index:
raise PyokitIOError("No index file for " + fn)
for b in genome_alignment_iterator(pth, ref_spec, idx_path):
blocks.append(b)
return GenomeAlignment(blocks) | build a genome aligment by loading all files in a directory.
Fiel without indexes are loaded immediately; those with indexes are
loaded on-demand. Not recursive (i.e. subdirectories are not parsed).
:param d_name: directory to load from.
:param ref_spec: which species in the alignemnt files is the reference?
:param extensions: list or set of acceptable extensions; treat any files
with these extensions as part of the alignment. If
None, treat any file which has an extension that is
NOT in index_extensions as part of the alignment.
:param index_exts: treat any files with these extensions as index files.
:param fail_no_index: fail if index extensions are provided and an alignment
file has no index file. | Below is the the instruction that describes the task:
### Input:
build a genome aligment by loading all files in a directory.
Fiel without indexes are loaded immediately; those with indexes are
loaded on-demand. Not recursive (i.e. subdirectories are not parsed).
:param d_name: directory to load from.
:param ref_spec: which species in the alignemnt files is the reference?
:param extensions: list or set of acceptable extensions; treat any files
with these extensions as part of the alignment. If
None, treat any file which has an extension that is
NOT in index_extensions as part of the alignment.
:param index_exts: treat any files with these extensions as index files.
:param fail_no_index: fail if index extensions are provided and an alignment
file has no index file.
### Response:
def build_genome_alignment_from_directory(d_name, ref_spec, extensions=None,
index_exts=None,
fail_no_index=False):
"""
build a genome aligment by loading all files in a directory.
Fiel without indexes are loaded immediately; those with indexes are
loaded on-demand. Not recursive (i.e. subdirectories are not parsed).
:param d_name: directory to load from.
:param ref_spec: which species in the alignemnt files is the reference?
:param extensions: list or set of acceptable extensions; treat any files
with these extensions as part of the alignment. If
None, treat any file which has an extension that is
NOT in index_extensions as part of the alignment.
:param index_exts: treat any files with these extensions as index files.
:param fail_no_index: fail if index extensions are provided and an alignment
file has no index file.
"""
if index_exts is None and fail_no_index:
raise ValueError("Failure on no index specified for loading genome " +
"alignment, but no index extensions specified")
blocks = []
for fn in os.listdir(d_name):
pth = os.path.join(d_name, fn)
if os.path.isfile(pth):
_, ext = os.path.splitext(pth)
if extensions is None or ext in extensions:
idx_path = __find_index(pth, index_exts)
if idx_path is None and fail_no_index:
raise PyokitIOError("No index file for " + fn)
for b in genome_alignment_iterator(pth, ref_spec, idx_path):
blocks.append(b)
return GenomeAlignment(blocks) |
def session(self, bundle_id=None, arguments=None, environment=None):
"""
Args:
- bundle_id (str): the app bundle id
- arguments (list): ['-u', 'https://www.google.com/ncr']
- enviroment (dict): {"KEY": "VAL"}
WDA Return json like
{
"value": {
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"capabilities": {
"device": "iphone",
"browserName": "部落冲突",
"sdkVersion": "9.3.2",
"CFBundleIdentifier": "com.supercell.magic"
}
},
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"status": 0
}
To create a new session, send json data like
{
"desiredCapabilities": {
"bundleId": "your-bundle-id",
"app": "your-app-path"
"shouldUseCompactResponses": (bool),
"shouldUseTestManagerForVisibilityDetection": (bool),
"maxTypingFrequency": (integer),
"arguments": (list(str)),
"environment": (dict: str->str)
},
}
"""
if bundle_id is None:
sid = self.status()['sessionId']
if not sid:
raise RuntimeError("no session created ever")
http = self.http.new_client('session/'+sid)
return Session(http, sid)
if arguments and type(arguments) is not list:
raise TypeError('arguments must be a list')
if environment and type(environment) is not dict:
raise TypeError('environment must be a dict')
capabilities = {
'bundleId': bundle_id,
'arguments': arguments,
'environment': environment,
'shouldWaitForQuiescence': True,
}
# Remove empty value to prevent WDAError
for k in list(capabilities.keys()):
if capabilities[k] is None:
capabilities.pop(k)
data = json.dumps({
'desiredCapabilities': capabilities
})
res = self.http.post('session', data)
httpclient = self.http.new_client('session/'+res.sessionId)
return Session(httpclient, res.sessionId) | Args:
- bundle_id (str): the app bundle id
- arguments (list): ['-u', 'https://www.google.com/ncr']
- enviroment (dict): {"KEY": "VAL"}
WDA Return json like
{
"value": {
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"capabilities": {
"device": "iphone",
"browserName": "部落冲突",
"sdkVersion": "9.3.2",
"CFBundleIdentifier": "com.supercell.magic"
}
},
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"status": 0
}
To create a new session, send json data like
{
"desiredCapabilities": {
"bundleId": "your-bundle-id",
"app": "your-app-path"
"shouldUseCompactResponses": (bool),
"shouldUseTestManagerForVisibilityDetection": (bool),
"maxTypingFrequency": (integer),
"arguments": (list(str)),
"environment": (dict: str->str)
},
} | Below is the the instruction that describes the task:
### Input:
Args:
- bundle_id (str): the app bundle id
- arguments (list): ['-u', 'https://www.google.com/ncr']
- enviroment (dict): {"KEY": "VAL"}
WDA Return json like
{
"value": {
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"capabilities": {
"device": "iphone",
"browserName": "部落冲突",
"sdkVersion": "9.3.2",
"CFBundleIdentifier": "com.supercell.magic"
}
},
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"status": 0
}
To create a new session, send json data like
{
"desiredCapabilities": {
"bundleId": "your-bundle-id",
"app": "your-app-path"
"shouldUseCompactResponses": (bool),
"shouldUseTestManagerForVisibilityDetection": (bool),
"maxTypingFrequency": (integer),
"arguments": (list(str)),
"environment": (dict: str->str)
},
}
### Response:
def session(self, bundle_id=None, arguments=None, environment=None):
"""
Args:
- bundle_id (str): the app bundle id
- arguments (list): ['-u', 'https://www.google.com/ncr']
- enviroment (dict): {"KEY": "VAL"}
WDA Return json like
{
"value": {
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"capabilities": {
"device": "iphone",
"browserName": "部落冲突",
"sdkVersion": "9.3.2",
"CFBundleIdentifier": "com.supercell.magic"
}
},
"sessionId": "69E6FDBA-8D59-4349-B7DE-A9CA41A97814",
"status": 0
}
To create a new session, send json data like
{
"desiredCapabilities": {
"bundleId": "your-bundle-id",
"app": "your-app-path"
"shouldUseCompactResponses": (bool),
"shouldUseTestManagerForVisibilityDetection": (bool),
"maxTypingFrequency": (integer),
"arguments": (list(str)),
"environment": (dict: str->str)
},
}
"""
if bundle_id is None:
sid = self.status()['sessionId']
if not sid:
raise RuntimeError("no session created ever")
http = self.http.new_client('session/'+sid)
return Session(http, sid)
if arguments and type(arguments) is not list:
raise TypeError('arguments must be a list')
if environment and type(environment) is not dict:
raise TypeError('environment must be a dict')
capabilities = {
'bundleId': bundle_id,
'arguments': arguments,
'environment': environment,
'shouldWaitForQuiescence': True,
}
# Remove empty value to prevent WDAError
for k in list(capabilities.keys()):
if capabilities[k] is None:
capabilities.pop(k)
data = json.dumps({
'desiredCapabilities': capabilities
})
res = self.http.post('session', data)
httpclient = self.http.new_client('session/'+res.sessionId)
return Session(httpclient, res.sessionId) |
def is_simplified(s):
"""Check if a string's Chinese characters are Simplified.
This is equivalent to:
>>> identify('foo') in (SIMPLIFIED, BOTH)
"""
chinese = _get_hanzi(s)
if not chinese:
return False
elif chinese.issubset(_SHARED_CHARACTERS):
return True
elif chinese.issubset(_SIMPLIFIED_CHARACTERS):
return True
return False | Check if a string's Chinese characters are Simplified.
This is equivalent to:
>>> identify('foo') in (SIMPLIFIED, BOTH) | Below is the the instruction that describes the task:
### Input:
Check if a string's Chinese characters are Simplified.
This is equivalent to:
>>> identify('foo') in (SIMPLIFIED, BOTH)
### Response:
def is_simplified(s):
"""Check if a string's Chinese characters are Simplified.
This is equivalent to:
>>> identify('foo') in (SIMPLIFIED, BOTH)
"""
chinese = _get_hanzi(s)
if not chinese:
return False
elif chinese.issubset(_SHARED_CHARACTERS):
return True
elif chinese.issubset(_SIMPLIFIED_CHARACTERS):
return True
return False |
def create_connection_context(self):
'''Creates and seriazlies a IpcConnectionContextProto (not delimited)'''
context = IpcConnectionContextProto()
context.userInfo.effectiveUser = self.effective_user
context.protocol = "org.apache.hadoop.hdfs.protocol.ClientProtocol"
s_context = context.SerializeToString()
log_protobuf_message("RequestContext (len: %d)" % len(s_context), context)
return s_context | Creates and seriazlies a IpcConnectionContextProto (not delimited) | Below is the the instruction that describes the task:
### Input:
Creates and seriazlies a IpcConnectionContextProto (not delimited)
### Response:
def create_connection_context(self):
'''Creates and seriazlies a IpcConnectionContextProto (not delimited)'''
context = IpcConnectionContextProto()
context.userInfo.effectiveUser = self.effective_user
context.protocol = "org.apache.hadoop.hdfs.protocol.ClientProtocol"
s_context = context.SerializeToString()
log_protobuf_message("RequestContext (len: %d)" % len(s_context), context)
return s_context |
def _api_query(self, protection=None, path_dict=None, options=None):
"""
Queries Bittrex
:param request_url: fully-formed URL to request
:type options: dict
:return: JSON response from Bittrex
:rtype : dict
"""
if not options:
options = {}
if self.api_version not in path_dict:
raise Exception('method call not available under API version {}'.format(self.api_version))
request_url = BASE_URL_V2_0 if self.api_version == API_V2_0 else BASE_URL_V1_1
request_url = request_url.format(path=path_dict[self.api_version])
nonce = str(int(time.time() * 1000))
if protection != PROTECTION_PUB:
request_url = "{0}apikey={1}&nonce={2}&".format(request_url, self.api_key, nonce)
request_url += urlencode(options)
try:
if sys.version_info >= (3, 0) and protection != PROTECTION_PUB:
apisign = hmac.new(bytearray(self.api_secret, 'ascii'),
bytearray(request_url, 'ascii'),
hashlib.sha512).hexdigest()
else:
apisign = hmac.new(self.api_secret.encode(),
request_url.encode(),
hashlib.sha512).hexdigest()
self.wait()
return self.dispatch(request_url, apisign)
except Exception:
return {
'success': False,
'message': 'NO_API_RESPONSE',
'result': None
} | Queries Bittrex
:param request_url: fully-formed URL to request
:type options: dict
:return: JSON response from Bittrex
:rtype : dict | Below is the the instruction that describes the task:
### Input:
Queries Bittrex
:param request_url: fully-formed URL to request
:type options: dict
:return: JSON response from Bittrex
:rtype : dict
### Response:
def _api_query(self, protection=None, path_dict=None, options=None):
"""
Queries Bittrex
:param request_url: fully-formed URL to request
:type options: dict
:return: JSON response from Bittrex
:rtype : dict
"""
if not options:
options = {}
if self.api_version not in path_dict:
raise Exception('method call not available under API version {}'.format(self.api_version))
request_url = BASE_URL_V2_0 if self.api_version == API_V2_0 else BASE_URL_V1_1
request_url = request_url.format(path=path_dict[self.api_version])
nonce = str(int(time.time() * 1000))
if protection != PROTECTION_PUB:
request_url = "{0}apikey={1}&nonce={2}&".format(request_url, self.api_key, nonce)
request_url += urlencode(options)
try:
if sys.version_info >= (3, 0) and protection != PROTECTION_PUB:
apisign = hmac.new(bytearray(self.api_secret, 'ascii'),
bytearray(request_url, 'ascii'),
hashlib.sha512).hexdigest()
else:
apisign = hmac.new(self.api_secret.encode(),
request_url.encode(),
hashlib.sha512).hexdigest()
self.wait()
return self.dispatch(request_url, apisign)
except Exception:
return {
'success': False,
'message': 'NO_API_RESPONSE',
'result': None
} |
def _get_penalty_function(nmr_parameters, constraints_func=None):
"""Get a function to compute the penalty term for the boundary conditions.
This is meant to be used in the evaluation function of the optimization routines.
Args:
nmr_parameters (int): the number of parameters in the model
constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints.
Should hold a CL function with the signature:
.. code-block:: c
void <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* constraint_values);
Where ``constraints_values`` is filled as:
.. code-block:: c
constraint_values[i] = g_i(x)
That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return
the function value of :math:`g_i`.
Returns:
tuple: Struct and SimpleCLFunction, the required data for the penalty function and the penalty function itself.
"""
dependencies = []
data_requirements = {'scratch': LocalMemory('double', 1)}
constraints_code = ''
if constraints_func and constraints_func.get_nmr_constraints() > 0:
nmr_constraints = constraints_func.get_nmr_constraints()
dependencies.append(constraints_func)
data_requirements['constraints'] = LocalMemory('mot_float_type', nmr_constraints)
constraints_code = '''
local mot_float_type* constraints = ((_mle_penalty_data*)scratch_data)->constraints;
''' + constraints_func.get_cl_function_name() + '''(x, data, constraints);
for(int i = 0; i < ''' + str(nmr_constraints) + '''; i++){
*penalty_sum += pown(max((mot_float_type)0, constraints[i]), 2);
}
'''
data = Struct(data_requirements, '_mle_penalty_data')
func = SimpleCLFunction.from_string('''
double _mle_penalty(
local mot_float_type* x,
void* data,
local mot_float_type* lower_bounds,
local mot_float_type* upper_bounds,
float penalty_weight,
void* scratch_data){
local double* penalty_sum = ((_mle_penalty_data*)scratch_data)->scratch;
if(get_local_id(0) == 0){
*penalty_sum = 0;
// boundary conditions
for(int i = 0; i < ''' + str(nmr_parameters) + '''; i++){
if(isfinite(upper_bounds[i])){
*penalty_sum += pown(max((mot_float_type)0, x[i] - upper_bounds[i]), 2);
}
if(isfinite(lower_bounds[i])){
*penalty_sum += pown(max((mot_float_type)0, lower_bounds[i] - x[i]), 2);
}
}
}
barrier(CLK_LOCAL_MEM_FENCE);
// constraints
''' + constraints_code + '''
return penalty_weight * *penalty_sum;
}
''', dependencies=dependencies)
return data, func | Get a function to compute the penalty term for the boundary conditions.
This is meant to be used in the evaluation function of the optimization routines.
Args:
nmr_parameters (int): the number of parameters in the model
constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints.
Should hold a CL function with the signature:
.. code-block:: c
void <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* constraint_values);
Where ``constraints_values`` is filled as:
.. code-block:: c
constraint_values[i] = g_i(x)
That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return
the function value of :math:`g_i`.
Returns:
tuple: Struct and SimpleCLFunction, the required data for the penalty function and the penalty function itself. | Below is the the instruction that describes the task:
### Input:
Get a function to compute the penalty term for the boundary conditions.
This is meant to be used in the evaluation function of the optimization routines.
Args:
nmr_parameters (int): the number of parameters in the model
constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints.
Should hold a CL function with the signature:
.. code-block:: c
void <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* constraint_values);
Where ``constraints_values`` is filled as:
.. code-block:: c
constraint_values[i] = g_i(x)
That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return
the function value of :math:`g_i`.
Returns:
tuple: Struct and SimpleCLFunction, the required data for the penalty function and the penalty function itself.
### Response:
def _get_penalty_function(nmr_parameters, constraints_func=None):
"""Get a function to compute the penalty term for the boundary conditions.
This is meant to be used in the evaluation function of the optimization routines.
Args:
nmr_parameters (int): the number of parameters in the model
constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints.
Should hold a CL function with the signature:
.. code-block:: c
void <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* constraint_values);
Where ``constraints_values`` is filled as:
.. code-block:: c
constraint_values[i] = g_i(x)
That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return
the function value of :math:`g_i`.
Returns:
tuple: Struct and SimpleCLFunction, the required data for the penalty function and the penalty function itself.
"""
dependencies = []
data_requirements = {'scratch': LocalMemory('double', 1)}
constraints_code = ''
if constraints_func and constraints_func.get_nmr_constraints() > 0:
nmr_constraints = constraints_func.get_nmr_constraints()
dependencies.append(constraints_func)
data_requirements['constraints'] = LocalMemory('mot_float_type', nmr_constraints)
constraints_code = '''
local mot_float_type* constraints = ((_mle_penalty_data*)scratch_data)->constraints;
''' + constraints_func.get_cl_function_name() + '''(x, data, constraints);
for(int i = 0; i < ''' + str(nmr_constraints) + '''; i++){
*penalty_sum += pown(max((mot_float_type)0, constraints[i]), 2);
}
'''
data = Struct(data_requirements, '_mle_penalty_data')
func = SimpleCLFunction.from_string('''
double _mle_penalty(
local mot_float_type* x,
void* data,
local mot_float_type* lower_bounds,
local mot_float_type* upper_bounds,
float penalty_weight,
void* scratch_data){
local double* penalty_sum = ((_mle_penalty_data*)scratch_data)->scratch;
if(get_local_id(0) == 0){
*penalty_sum = 0;
// boundary conditions
for(int i = 0; i < ''' + str(nmr_parameters) + '''; i++){
if(isfinite(upper_bounds[i])){
*penalty_sum += pown(max((mot_float_type)0, x[i] - upper_bounds[i]), 2);
}
if(isfinite(lower_bounds[i])){
*penalty_sum += pown(max((mot_float_type)0, lower_bounds[i] - x[i]), 2);
}
}
}
barrier(CLK_LOCAL_MEM_FENCE);
// constraints
''' + constraints_code + '''
return penalty_weight * *penalty_sum;
}
''', dependencies=dependencies)
return data, func |
def manifestation_model_factory(*, validator=validators.is_manifestation_model,
ld_type='CreativeWork', **kwargs):
"""Generate a Manifestation model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
"""
return _model_factory(validator=validator, ld_type=ld_type, **kwargs) | Generate a Manifestation model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments. | Below is the the instruction that describes the task:
### Input:
Generate a Manifestation model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
### Response:
def manifestation_model_factory(*, validator=validators.is_manifestation_model,
ld_type='CreativeWork', **kwargs):
"""Generate a Manifestation model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
"""
return _model_factory(validator=validator, ld_type=ld_type, **kwargs) |
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver | Returns the version of the SQL Server in use: | Below is the the instruction that describes the task:
### Input:
Returns the version of the SQL Server in use:
### Response:
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
ver_code = None
if not self.is_db2 and not self.is_openedge:
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = cur.fetchone()[0]
ver_code = int(ver_code.split('.')[0])
else:
ver_code = 0
if ver_code >= 11:
self._ss_ver = 2012
elif ver_code == 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver |
def noninteractive_changeset_update(self, fqn, template, old_parameters,
parameters, stack_policy, tags,
**kwargs):
"""Update a Cloudformation stack using a change set.
This is required for stacks with a defined Transform (i.e. SAM), as the
default update_stack API cannot be used with them.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
"""
logger.debug("Using noninterative changeset provider mode "
"for %s.", fqn)
_changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'UPDATE', service_role=self.service_role, **kwargs
)
self.deal_with_changeset_stack_policy(fqn, stack_policy)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
) | Update a Cloudformation stack using a change set.
This is required for stacks with a defined Transform (i.e. SAM), as the
default update_stack API cannot be used with them.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack. | Below is the the instruction that describes the task:
### Input:
Update a Cloudformation stack using a change set.
This is required for stacks with a defined Transform (i.e. SAM), as the
default update_stack API cannot be used with them.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
### Response:
def noninteractive_changeset_update(self, fqn, template, old_parameters,
parameters, stack_policy, tags,
**kwargs):
"""Update a Cloudformation stack using a change set.
This is required for stacks with a defined Transform (i.e. SAM), as the
default update_stack API cannot be used with them.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
"""
logger.debug("Using noninterative changeset provider mode "
"for %s.", fqn)
_changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'UPDATE', service_role=self.service_role, **kwargs
)
self.deal_with_changeset_stack_policy(fqn, stack_policy)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
) |
def parse_image_json(text):
"""
parses response output of AWS describe commands and returns the first (and only) item in array
:param text: describe output
:return: image json
"""
image_details = json.loads(text)
if image_details.get('Images') is not None:
try:
image_details = image_details.get('Images')[0]
except IndexError:
image_details = None
return image_details | parses response output of AWS describe commands and returns the first (and only) item in array
:param text: describe output
:return: image json | Below is the the instruction that describes the task:
### Input:
parses response output of AWS describe commands and returns the first (and only) item in array
:param text: describe output
:return: image json
### Response:
def parse_image_json(text):
"""
parses response output of AWS describe commands and returns the first (and only) item in array
:param text: describe output
:return: image json
"""
image_details = json.loads(text)
if image_details.get('Images') is not None:
try:
image_details = image_details.get('Images')[0]
except IndexError:
image_details = None
return image_details |
def orientation(p1, p2, p3):
"""
Finds orientation of three points p1, p2, p3.
The function returns following values
0 --> p1, p2 and p3 are collinear
1 --> Clockwise
2 --> Counterclockwise
:param p1: tuple representing two dimensional point
:param p2: tuple representing two dimensional point
:param p3: tuple representing two dimensional point
"""
val = (p2[consts.Consts.y] - p1[consts.Consts.y]) * (p3[consts.Consts.x] - p2[consts.Consts.x]) \
- (p2[consts.Consts.x] - p1[consts.Consts.x]) * (p3[consts.Consts.y] - p2[consts.Consts.y])
if val == 0:
return 0 # collinear
elif val > 0:
return 1 # clockwise
else:
return 2 | Finds orientation of three points p1, p2, p3.
The function returns following values
0 --> p1, p2 and p3 are collinear
1 --> Clockwise
2 --> Counterclockwise
:param p1: tuple representing two dimensional point
:param p2: tuple representing two dimensional point
:param p3: tuple representing two dimensional point | Below is the the instruction that describes the task:
### Input:
Finds orientation of three points p1, p2, p3.
The function returns following values
0 --> p1, p2 and p3 are collinear
1 --> Clockwise
2 --> Counterclockwise
:param p1: tuple representing two dimensional point
:param p2: tuple representing two dimensional point
:param p3: tuple representing two dimensional point
### Response:
def orientation(p1, p2, p3):
"""
Finds orientation of three points p1, p2, p3.
The function returns following values
0 --> p1, p2 and p3 are collinear
1 --> Clockwise
2 --> Counterclockwise
:param p1: tuple representing two dimensional point
:param p2: tuple representing two dimensional point
:param p3: tuple representing two dimensional point
"""
val = (p2[consts.Consts.y] - p1[consts.Consts.y]) * (p3[consts.Consts.x] - p2[consts.Consts.x]) \
- (p2[consts.Consts.x] - p1[consts.Consts.x]) * (p3[consts.Consts.y] - p2[consts.Consts.y])
if val == 0:
return 0 # collinear
elif val > 0:
return 1 # clockwise
else:
return 2 |
def assign_shifts_view(request, semester):
"""
View all members' preferences. This view also includes forms to create an
entire semester's worth of weekly workshifts.
"""
page_name = "Assign Shifts"
auto_assign_shifts_form = None
random_assign_instances_form = None
clear_assign_form = None
if WorkshiftPool.objects.filter(semester=semester).count():
auto_assign_shifts_form = AutoAssignShiftForm(
data=request.POST if AutoAssignShiftForm.name in request.POST else None,
semester=semester,
)
random_assign_instances_form = RandomAssignInstancesForm(
data=request.POST if RandomAssignInstancesForm.name in request.POST else None,
semester=semester,
)
clear_assign_form = ClearAssignmentsForm(
data=request.POST if ClearAssignmentsForm.name in request.POST else None,
semester=semester,
)
forms = [auto_assign_shifts_form, random_assign_instances_form,
clear_assign_form]
if auto_assign_shifts_form and auto_assign_shifts_form.is_valid():
unassigned_profiles = auto_assign_shifts_form.save()
message = "Assigned workshifters to regular workshifts."
if unassigned_profiles:
message += " The following workshifters were not given " \
"complete assignments: "
message += ", ".join(i.user.get_full_name() for i in unassigned_profiles)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
if random_assign_instances_form and random_assign_instances_form.is_valid():
unassigned_profiles, unassigned_shifts = \
random_assign_instances_form.save()
message = "Assigned workshifters randomly to instances within {}." \
.format(random_assign_instances_form.cleaned_data["pool"])
if unassigned_profiles:
message += "The following workshifers were not given " \
"complete assignments: "
message += ", ".join(i.user.get_full_name() for i in unassigned_profiles)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
if clear_assign_form and clear_assign_form.is_valid():
clear_assign_form.save()
messages.add_message(
request,
messages.INFO,
"Cleared all workshifters from their regular workshift "
"assignments",
)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
shifts = RegularWorkshift.objects.filter(
pool__semester=semester,
active=True,
).exclude(
workshift_type__assignment=WorkshiftType.NO_ASSIGN,
)
assign_forms = []
for shift in shifts:
form = AssignShiftForm(
data=request.POST if "individual_assign" in request.POST else None,
prefix="shift-{}".format(shift.pk),
instance=shift,
semester=semester,
)
assign_forms.append(form)
if assign_forms and all(i.is_valid() for i in assign_forms):
for form in assign_forms:
form.save()
messages.add_message(
request,
messages.INFO,
"Workshift assignments saved.",
)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
workshifters = WorkshiftProfile.objects.filter(semester=semester)
pools = WorkshiftPool.objects.filter(semester=semester).order_by(
"-is_primary", "title",
)
pool_hours = []
for workshifter in workshifters:
hours_owed = []
for pool in pools:
hours = workshifter.pool_hours.get(pool=pool)
hours_owed.append(hours.hours - hours.assigned_hours)
if any(i > 0 for i in hours_owed):
pool_hours.append(hours_owed)
total_pool_hours = [
sum(hours[i] for hours in pool_hours)
for i in range(len(pool_hours[0]) if len(pool_hours) > 0 else 0)
]
return render_to_response("assign_shifts.html", {
"page_name": page_name,
"forms": forms,
"assign_forms": assign_forms,
"unassigned_profiles": zip(workshifters, pool_hours),
"pools": pools,
"total_pool_hours": total_pool_hours,
}, context_instance=RequestContext(request)) | View all members' preferences. This view also includes forms to create an
entire semester's worth of weekly workshifts. | Below is the the instruction that describes the task:
### Input:
View all members' preferences. This view also includes forms to create an
entire semester's worth of weekly workshifts.
### Response:
def assign_shifts_view(request, semester):
"""
View all members' preferences. This view also includes forms to create an
entire semester's worth of weekly workshifts.
"""
page_name = "Assign Shifts"
auto_assign_shifts_form = None
random_assign_instances_form = None
clear_assign_form = None
if WorkshiftPool.objects.filter(semester=semester).count():
auto_assign_shifts_form = AutoAssignShiftForm(
data=request.POST if AutoAssignShiftForm.name in request.POST else None,
semester=semester,
)
random_assign_instances_form = RandomAssignInstancesForm(
data=request.POST if RandomAssignInstancesForm.name in request.POST else None,
semester=semester,
)
clear_assign_form = ClearAssignmentsForm(
data=request.POST if ClearAssignmentsForm.name in request.POST else None,
semester=semester,
)
forms = [auto_assign_shifts_form, random_assign_instances_form,
clear_assign_form]
if auto_assign_shifts_form and auto_assign_shifts_form.is_valid():
unassigned_profiles = auto_assign_shifts_form.save()
message = "Assigned workshifters to regular workshifts."
if unassigned_profiles:
message += " The following workshifters were not given " \
"complete assignments: "
message += ", ".join(i.user.get_full_name() for i in unassigned_profiles)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
if random_assign_instances_form and random_assign_instances_form.is_valid():
unassigned_profiles, unassigned_shifts = \
random_assign_instances_form.save()
message = "Assigned workshifters randomly to instances within {}." \
.format(random_assign_instances_form.cleaned_data["pool"])
if unassigned_profiles:
message += "The following workshifers were not given " \
"complete assignments: "
message += ", ".join(i.user.get_full_name() for i in unassigned_profiles)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
if clear_assign_form and clear_assign_form.is_valid():
clear_assign_form.save()
messages.add_message(
request,
messages.INFO,
"Cleared all workshifters from their regular workshift "
"assignments",
)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
shifts = RegularWorkshift.objects.filter(
pool__semester=semester,
active=True,
).exclude(
workshift_type__assignment=WorkshiftType.NO_ASSIGN,
)
assign_forms = []
for shift in shifts:
form = AssignShiftForm(
data=request.POST if "individual_assign" in request.POST else None,
prefix="shift-{}".format(shift.pk),
instance=shift,
semester=semester,
)
assign_forms.append(form)
if assign_forms and all(i.is_valid() for i in assign_forms):
for form in assign_forms:
form.save()
messages.add_message(
request,
messages.INFO,
"Workshift assignments saved.",
)
return HttpResponseRedirect(wurl(
"workshift:assign_shifts",
sem_url=semester.sem_url,
))
workshifters = WorkshiftProfile.objects.filter(semester=semester)
pools = WorkshiftPool.objects.filter(semester=semester).order_by(
"-is_primary", "title",
)
pool_hours = []
for workshifter in workshifters:
hours_owed = []
for pool in pools:
hours = workshifter.pool_hours.get(pool=pool)
hours_owed.append(hours.hours - hours.assigned_hours)
if any(i > 0 for i in hours_owed):
pool_hours.append(hours_owed)
total_pool_hours = [
sum(hours[i] for hours in pool_hours)
for i in range(len(pool_hours[0]) if len(pool_hours) > 0 else 0)
]
return render_to_response("assign_shifts.html", {
"page_name": page_name,
"forms": forms,
"assign_forms": assign_forms,
"unassigned_profiles": zip(workshifters, pool_hours),
"pools": pools,
"total_pool_hours": total_pool_hours,
}, context_instance=RequestContext(request)) |
def from_veto_definer_file(cls, fp, start=None, end=None, ifo=None,
format='ligolw'):
"""Read a `DataQualityDict` from a LIGO_LW XML VetoDefinerTable.
Parameters
----------
fp : `str`
path of veto definer file to read
start : `~gwpy.time.LIGOTimeGPS`, `int`, optional
GPS start time at which to restrict returned flags
end : `~gwpy.time.LIGOTimeGPS`, `int`, optional
GPS end time at which to restrict returned flags
ifo : `str`, optional
interferometer prefix whose flags you want to read
format : `str`, optional
format of file to read, currently only 'ligolw' is supported
Returns
-------
flags : `DataQualityDict`
a `DataQualityDict` of flags parsed from the `veto_def_table`
of the input file.
Notes
-----
This method does not automatically `~DataQualityDict.populate`
the `active` segment list of any flags, a separate call should
be made for that as follows
>>> flags = DataQualityDict.from_veto_definer_file('/path/to/file.xml')
>>> flags.populate()
"""
if format != 'ligolw':
raise NotImplementedError("Reading veto definer from non-ligolw "
"format file is not currently "
"supported")
# read veto definer file
with get_readable_fileobj(fp, show_progress=False) as fobj:
from ..io.ligolw import read_table as read_ligolw_table
veto_def_table = read_ligolw_table(fobj, 'veto_definer')
if start is not None:
start = to_gps(start)
if end is not None:
end = to_gps(end)
# parse flag definitions
out = cls()
for row in veto_def_table:
if ifo and row.ifo != ifo:
continue
if start and 0 < row.end_time <= start:
continue
elif start:
row.start_time = max(row.start_time, start)
if end and row.start_time >= end:
continue
elif end and not row.end_time:
row.end_time = end
elif end:
row.end_time = min(row.end_time, end)
flag = DataQualityFlag.from_veto_def(row)
if flag.name in out:
out[flag.name].known.extend(flag.known)
out[flag.name].known.coalesce()
else:
out[flag.name] = flag
return out | Read a `DataQualityDict` from a LIGO_LW XML VetoDefinerTable.
Parameters
----------
fp : `str`
path of veto definer file to read
start : `~gwpy.time.LIGOTimeGPS`, `int`, optional
GPS start time at which to restrict returned flags
end : `~gwpy.time.LIGOTimeGPS`, `int`, optional
GPS end time at which to restrict returned flags
ifo : `str`, optional
interferometer prefix whose flags you want to read
format : `str`, optional
format of file to read, currently only 'ligolw' is supported
Returns
-------
flags : `DataQualityDict`
a `DataQualityDict` of flags parsed from the `veto_def_table`
of the input file.
Notes
-----
This method does not automatically `~DataQualityDict.populate`
the `active` segment list of any flags, a separate call should
be made for that as follows
>>> flags = DataQualityDict.from_veto_definer_file('/path/to/file.xml')
>>> flags.populate() | Below is the the instruction that describes the task:
### Input:
Read a `DataQualityDict` from a LIGO_LW XML VetoDefinerTable.
Parameters
----------
fp : `str`
path of veto definer file to read
start : `~gwpy.time.LIGOTimeGPS`, `int`, optional
GPS start time at which to restrict returned flags
end : `~gwpy.time.LIGOTimeGPS`, `int`, optional
GPS end time at which to restrict returned flags
ifo : `str`, optional
interferometer prefix whose flags you want to read
format : `str`, optional
format of file to read, currently only 'ligolw' is supported
Returns
-------
flags : `DataQualityDict`
a `DataQualityDict` of flags parsed from the `veto_def_table`
of the input file.
Notes
-----
This method does not automatically `~DataQualityDict.populate`
the `active` segment list of any flags, a separate call should
be made for that as follows
>>> flags = DataQualityDict.from_veto_definer_file('/path/to/file.xml')
>>> flags.populate()
### Response:
def from_veto_definer_file(cls, fp, start=None, end=None, ifo=None,
format='ligolw'):
"""Read a `DataQualityDict` from a LIGO_LW XML VetoDefinerTable.
Parameters
----------
fp : `str`
path of veto definer file to read
start : `~gwpy.time.LIGOTimeGPS`, `int`, optional
GPS start time at which to restrict returned flags
end : `~gwpy.time.LIGOTimeGPS`, `int`, optional
GPS end time at which to restrict returned flags
ifo : `str`, optional
interferometer prefix whose flags you want to read
format : `str`, optional
format of file to read, currently only 'ligolw' is supported
Returns
-------
flags : `DataQualityDict`
a `DataQualityDict` of flags parsed from the `veto_def_table`
of the input file.
Notes
-----
This method does not automatically `~DataQualityDict.populate`
the `active` segment list of any flags, a separate call should
be made for that as follows
>>> flags = DataQualityDict.from_veto_definer_file('/path/to/file.xml')
>>> flags.populate()
"""
if format != 'ligolw':
raise NotImplementedError("Reading veto definer from non-ligolw "
"format file is not currently "
"supported")
# read veto definer file
with get_readable_fileobj(fp, show_progress=False) as fobj:
from ..io.ligolw import read_table as read_ligolw_table
veto_def_table = read_ligolw_table(fobj, 'veto_definer')
if start is not None:
start = to_gps(start)
if end is not None:
end = to_gps(end)
# parse flag definitions
out = cls()
for row in veto_def_table:
if ifo and row.ifo != ifo:
continue
if start and 0 < row.end_time <= start:
continue
elif start:
row.start_time = max(row.start_time, start)
if end and row.start_time >= end:
continue
elif end and not row.end_time:
row.end_time = end
elif end:
row.end_time = min(row.end_time, end)
flag = DataQualityFlag.from_veto_def(row)
if flag.name in out:
out[flag.name].known.extend(flag.known)
out[flag.name].known.coalesce()
else:
out[flag.name] = flag
return out |
def handle(self):
"The actual service to which the user has connected."
if not self.authentication_ok():
return
if self.DOECHO:
self.writeline(self.WELCOME)
self.session_start()
while self.RUNSHELL:
read_line = self.readline(prompt=self.PROMPT).strip('\r\n')
if read_line:
self.session.transcript_incoming(read_line)
self.input = self.input_reader(self, read_line)
self.raw_input = self.input.raw
if self.input.cmd:
# TODO: Command should not be converted to upper
# looks funny in error messages.
cmd = self.input.cmd.upper()
params = self.input.params
if cmd in self.COMMANDS:
try:
self.COMMANDS[cmd](params)
except:
logger.exception('Error calling {0}.'.format(cmd))
(t, p, tb) = sys.exc_info()
if self.handleException(t, p, tb):
break
else:
self.writeline('-bash: {0}: command not found'.format(cmd))
logger.error("Unknown command '{0}'".format(cmd))
logger.debug("Exiting handler") | The actual service to which the user has connected. | Below is the the instruction that describes the task:
### Input:
The actual service to which the user has connected.
### Response:
def handle(self):
"The actual service to which the user has connected."
if not self.authentication_ok():
return
if self.DOECHO:
self.writeline(self.WELCOME)
self.session_start()
while self.RUNSHELL:
read_line = self.readline(prompt=self.PROMPT).strip('\r\n')
if read_line:
self.session.transcript_incoming(read_line)
self.input = self.input_reader(self, read_line)
self.raw_input = self.input.raw
if self.input.cmd:
# TODO: Command should not be converted to upper
# looks funny in error messages.
cmd = self.input.cmd.upper()
params = self.input.params
if cmd in self.COMMANDS:
try:
self.COMMANDS[cmd](params)
except:
logger.exception('Error calling {0}.'.format(cmd))
(t, p, tb) = sys.exc_info()
if self.handleException(t, p, tb):
break
else:
self.writeline('-bash: {0}: command not found'.format(cmd))
logger.error("Unknown command '{0}'".format(cmd))
logger.debug("Exiting handler") |
def solveConsRepAgentMarkov(solution_next,MrkvArray,DiscFac,CRRA,IncomeDstn,CapShare,DeprFac,PermGroFac,aXtraGrid):
'''
Solve one period of the simple representative agent consumption-saving model.
This version supports a discrete Markov process.
Parameters
----------
solution_next : ConsumerSolution
Solution to the next period's problem (i.e. previous iteration).
MrkvArray : np.array
Markov transition array between this period and next period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
IncomeDstn : [[np.array]]
A list of lists containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
CapShare : float
Capital's share of income in Cobb-Douglas production function.
DeprFac : float
Depreciation rate of capital.
PermGroFac : [float]
Expected permanent income growth factor for each state we could be in
next period.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level. In this model, the minimum acceptable
level is always zero.
Returns
-------
solution_now : ConsumerSolution
Solution to this period's problem (new iteration).
'''
# Define basic objects
StateCount = MrkvArray.shape[0]
aNrmNow = aXtraGrid
aNrmCount = aNrmNow.size
EndOfPrdvP_cond = np.zeros((StateCount,aNrmCount)) + np.nan
# Loop over *next period* states, calculating conditional EndOfPrdvP
for j in range(StateCount):
# Define next-period-state conditional objects
vPfuncNext = solution_next.vPfunc[j]
ShkPrbsNext = IncomeDstn[j][0]
PermShkValsNext = IncomeDstn[j][1]
TranShkValsNext = IncomeDstn[j][2]
# Make tiled versions of end-of-period assets, shocks, and probabilities
ShkCount = ShkPrbsNext.size
aNrm_tiled = np.tile(np.reshape(aNrmNow,(aNrmCount,1)),(1,ShkCount))
# Tile arrays of the income shocks and put them into useful shapes
PermShkVals_tiled = np.tile(np.reshape(PermShkValsNext,(1,ShkCount)),(aNrmCount,1))
TranShkVals_tiled = np.tile(np.reshape(TranShkValsNext,(1,ShkCount)),(aNrmCount,1))
ShkPrbs_tiled = np.tile(np.reshape(ShkPrbsNext,(1,ShkCount)),(aNrmCount,1))
# Calculate next period's capital-to-permanent-labor ratio under each combination
# of end-of-period assets and shock realization
kNrmNext = aNrm_tiled/(PermGroFac[j]*PermShkVals_tiled)
# Calculate next period's market resources
KtoLnext = kNrmNext/TranShkVals_tiled
RfreeNext = 1. - DeprFac + CapShare*KtoLnext**(CapShare-1.)
wRteNext = (1.-CapShare)*KtoLnext**CapShare
mNrmNext = RfreeNext*kNrmNext + wRteNext*TranShkVals_tiled
# Calculate end-of-period marginal value of assets for the RA
vPnext = vPfuncNext(mNrmNext)
EndOfPrdvP_cond[j,:] = DiscFac*np.sum(RfreeNext*(PermGroFac[j]*PermShkVals_tiled)**(-CRRA)*vPnext*ShkPrbs_tiled,axis=1)
# Apply the Markov transition matrix to get unconditional end-of-period marginal value
EndOfPrdvP = np.dot(MrkvArray,EndOfPrdvP_cond)
# Construct the consumption function and marginal value function for each discrete state
cFuncNow_list = []
vPfuncNow_list = []
for i in range(StateCount):
# Invert the first order condition to get consumption, then find endogenous gridpoints
cNrmNow = EndOfPrdvP[i,:]**(-1./CRRA)
mNrmNow = aNrmNow + cNrmNow
# Construct the consumption function and the marginal value function
cFuncNow_list.append(LinearInterp(np.insert(mNrmNow,0,0.0),np.insert(cNrmNow,0,0.0)))
vPfuncNow_list.append(MargValueFunc(cFuncNow_list[-1],CRRA))
# Construct and return the solution for this period
solution_now = ConsumerSolution(cFunc=cFuncNow_list,vPfunc=vPfuncNow_list)
return solution_now | Solve one period of the simple representative agent consumption-saving model.
This version supports a discrete Markov process.
Parameters
----------
solution_next : ConsumerSolution
Solution to the next period's problem (i.e. previous iteration).
MrkvArray : np.array
Markov transition array between this period and next period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
IncomeDstn : [[np.array]]
A list of lists containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
CapShare : float
Capital's share of income in Cobb-Douglas production function.
DeprFac : float
Depreciation rate of capital.
PermGroFac : [float]
Expected permanent income growth factor for each state we could be in
next period.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level. In this model, the minimum acceptable
level is always zero.
Returns
-------
solution_now : ConsumerSolution
Solution to this period's problem (new iteration). | Below is the the instruction that describes the task:
### Input:
Solve one period of the simple representative agent consumption-saving model.
This version supports a discrete Markov process.
Parameters
----------
solution_next : ConsumerSolution
Solution to the next period's problem (i.e. previous iteration).
MrkvArray : np.array
Markov transition array between this period and next period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
IncomeDstn : [[np.array]]
A list of lists containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
CapShare : float
Capital's share of income in Cobb-Douglas production function.
DeprFac : float
Depreciation rate of capital.
PermGroFac : [float]
Expected permanent income growth factor for each state we could be in
next period.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level. In this model, the minimum acceptable
level is always zero.
Returns
-------
solution_now : ConsumerSolution
Solution to this period's problem (new iteration).
### Response:
def solveConsRepAgentMarkov(solution_next,MrkvArray,DiscFac,CRRA,IncomeDstn,CapShare,DeprFac,PermGroFac,aXtraGrid):
'''
Solve one period of the simple representative agent consumption-saving model.
This version supports a discrete Markov process.
Parameters
----------
solution_next : ConsumerSolution
Solution to the next period's problem (i.e. previous iteration).
MrkvArray : np.array
Markov transition array between this period and next period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
IncomeDstn : [[np.array]]
A list of lists containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
CapShare : float
Capital's share of income in Cobb-Douglas production function.
DeprFac : float
Depreciation rate of capital.
PermGroFac : [float]
Expected permanent income growth factor for each state we could be in
next period.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level. In this model, the minimum acceptable
level is always zero.
Returns
-------
solution_now : ConsumerSolution
Solution to this period's problem (new iteration).
'''
# Define basic objects
StateCount = MrkvArray.shape[0]
aNrmNow = aXtraGrid
aNrmCount = aNrmNow.size
EndOfPrdvP_cond = np.zeros((StateCount,aNrmCount)) + np.nan
# Loop over *next period* states, calculating conditional EndOfPrdvP
for j in range(StateCount):
# Define next-period-state conditional objects
vPfuncNext = solution_next.vPfunc[j]
ShkPrbsNext = IncomeDstn[j][0]
PermShkValsNext = IncomeDstn[j][1]
TranShkValsNext = IncomeDstn[j][2]
# Make tiled versions of end-of-period assets, shocks, and probabilities
ShkCount = ShkPrbsNext.size
aNrm_tiled = np.tile(np.reshape(aNrmNow,(aNrmCount,1)),(1,ShkCount))
# Tile arrays of the income shocks and put them into useful shapes
PermShkVals_tiled = np.tile(np.reshape(PermShkValsNext,(1,ShkCount)),(aNrmCount,1))
TranShkVals_tiled = np.tile(np.reshape(TranShkValsNext,(1,ShkCount)),(aNrmCount,1))
ShkPrbs_tiled = np.tile(np.reshape(ShkPrbsNext,(1,ShkCount)),(aNrmCount,1))
# Calculate next period's capital-to-permanent-labor ratio under each combination
# of end-of-period assets and shock realization
kNrmNext = aNrm_tiled/(PermGroFac[j]*PermShkVals_tiled)
# Calculate next period's market resources
KtoLnext = kNrmNext/TranShkVals_tiled
RfreeNext = 1. - DeprFac + CapShare*KtoLnext**(CapShare-1.)
wRteNext = (1.-CapShare)*KtoLnext**CapShare
mNrmNext = RfreeNext*kNrmNext + wRteNext*TranShkVals_tiled
# Calculate end-of-period marginal value of assets for the RA
vPnext = vPfuncNext(mNrmNext)
EndOfPrdvP_cond[j,:] = DiscFac*np.sum(RfreeNext*(PermGroFac[j]*PermShkVals_tiled)**(-CRRA)*vPnext*ShkPrbs_tiled,axis=1)
# Apply the Markov transition matrix to get unconditional end-of-period marginal value
EndOfPrdvP = np.dot(MrkvArray,EndOfPrdvP_cond)
# Construct the consumption function and marginal value function for each discrete state
cFuncNow_list = []
vPfuncNow_list = []
for i in range(StateCount):
# Invert the first order condition to get consumption, then find endogenous gridpoints
cNrmNow = EndOfPrdvP[i,:]**(-1./CRRA)
mNrmNow = aNrmNow + cNrmNow
# Construct the consumption function and the marginal value function
cFuncNow_list.append(LinearInterp(np.insert(mNrmNow,0,0.0),np.insert(cNrmNow,0,0.0)))
vPfuncNow_list.append(MargValueFunc(cFuncNow_list[-1],CRRA))
# Construct and return the solution for this period
solution_now = ConsumerSolution(cFunc=cFuncNow_list,vPfunc=vPfuncNow_list)
return solution_now |
def insert_ngram(self, ngram, count):
"""
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "INSERT INTO _{0}_gram {1};".format(len(ngram),
self._build_values_clause(ngram, count))
self.execute_sql(query) | Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram. | Below is the the instruction that describes the task:
### Input:
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
### Response:
def insert_ngram(self, ngram, count):
"""
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "INSERT INTO _{0}_gram {1};".format(len(ngram),
self._build_values_clause(ngram, count))
self.execute_sql(query) |
def _parse_names_set(feature_names):
"""Helping function of `_parse_feature_names` that parses a set of feature names."""
feature_collection = OrderedDict()
for feature_name in feature_names:
if isinstance(feature_name, str):
feature_collection[feature_name] = ...
else:
raise ValueError('Failed to parse {}, expected string'.format(feature_name))
return feature_collection | Helping function of `_parse_feature_names` that parses a set of feature names. | Below is the the instruction that describes the task:
### Input:
Helping function of `_parse_feature_names` that parses a set of feature names.
### Response:
def _parse_names_set(feature_names):
"""Helping function of `_parse_feature_names` that parses a set of feature names."""
feature_collection = OrderedDict()
for feature_name in feature_names:
if isinstance(feature_name, str):
feature_collection[feature_name] = ...
else:
raise ValueError('Failed to parse {}, expected string'.format(feature_name))
return feature_collection |
def load_from_dict(self, data: dict, overwrite: bool=True):
"""
Loads key/values from dicts or list into the ConfigKey.
:param data: The data object to load.
This can be a dict, or a list of key/value tuples.
:param overwrite: Should the ConfigKey overwrite data already in it?
"""
if data is None or data == {}:
return False
# Loop over items
if isinstance(data, list) or isinstance(data, tuple):
# Pick a random item from the tuple.
if len(data[0]) != 2:
raise exc.LoaderException("Cannot load data with length {}".format(len(data[0])))
items = data
elif isinstance(data, dict) or isinstance(data, self.__class__):
items = data.items()
else:
raise exc.LoaderException("Cannot load data of type {}".format(type(data)))
for key, item in items:
assert isinstance(key, str)
if hasattr(self, key) and not overwrite:
# Refuse to overwrite existing data.
continue
# Check name to verify it's safe.
if self.safe_load:
if key.startswith("__") or key in ['dump', 'items', 'keys', 'values',
'iter_list', 'load_from_dict', 'iter_list_dump',
'parsed', 'safe_load']:
# It's evil!
key = "unsafe_" + key
if '.' in key:
# Doubly evil!
key = key.replace('.', '_')
if isinstance(item, dict):
# Create a new ConfigKey object with the dict.
ncfg = ConfigKey()
# Parse the data.
ncfg.load_from_dict(item)
# Set our new ConfigKey as an attribute of ourselves.
setattr(self, key, ncfg)
elif isinstance(item, list):
# Iterate over the list, creating ConfigKey items as appropriate.
nlst = self.iter_list(item)
# Set our new list as an attribute of ourselves.
setattr(self, key, nlst)
else:
# Set the item as an attribute of ourselves.
setattr(self, key, item)
# Flip the parsed flag,
self.parsed = True | Loads key/values from dicts or list into the ConfigKey.
:param data: The data object to load.
This can be a dict, or a list of key/value tuples.
:param overwrite: Should the ConfigKey overwrite data already in it? | Below is the the instruction that describes the task:
### Input:
Loads key/values from dicts or list into the ConfigKey.
:param data: The data object to load.
This can be a dict, or a list of key/value tuples.
:param overwrite: Should the ConfigKey overwrite data already in it?
### Response:
def load_from_dict(self, data: dict, overwrite: bool=True):
"""
Loads key/values from dicts or list into the ConfigKey.
:param data: The data object to load.
This can be a dict, or a list of key/value tuples.
:param overwrite: Should the ConfigKey overwrite data already in it?
"""
if data is None or data == {}:
return False
# Loop over items
if isinstance(data, list) or isinstance(data, tuple):
# Pick a random item from the tuple.
if len(data[0]) != 2:
raise exc.LoaderException("Cannot load data with length {}".format(len(data[0])))
items = data
elif isinstance(data, dict) or isinstance(data, self.__class__):
items = data.items()
else:
raise exc.LoaderException("Cannot load data of type {}".format(type(data)))
for key, item in items:
assert isinstance(key, str)
if hasattr(self, key) and not overwrite:
# Refuse to overwrite existing data.
continue
# Check name to verify it's safe.
if self.safe_load:
if key.startswith("__") or key in ['dump', 'items', 'keys', 'values',
'iter_list', 'load_from_dict', 'iter_list_dump',
'parsed', 'safe_load']:
# It's evil!
key = "unsafe_" + key
if '.' in key:
# Doubly evil!
key = key.replace('.', '_')
if isinstance(item, dict):
# Create a new ConfigKey object with the dict.
ncfg = ConfigKey()
# Parse the data.
ncfg.load_from_dict(item)
# Set our new ConfigKey as an attribute of ourselves.
setattr(self, key, ncfg)
elif isinstance(item, list):
# Iterate over the list, creating ConfigKey items as appropriate.
nlst = self.iter_list(item)
# Set our new list as an attribute of ourselves.
setattr(self, key, nlst)
else:
# Set the item as an attribute of ourselves.
setattr(self, key, item)
# Flip the parsed flag,
self.parsed = True |
def split_in_tiles(self, hint):
"""
Split a SiteCollection into a set of tiles (SiteCollection instances).
:param hint: hint for how many tiles to generate
"""
tiles = []
for seq in split_in_blocks(range(len(self)), hint or 1):
sc = SiteCollection.__new__(SiteCollection)
sc.array = self.array[numpy.array(seq, int)]
tiles.append(sc)
return tiles | Split a SiteCollection into a set of tiles (SiteCollection instances).
:param hint: hint for how many tiles to generate | Below is the the instruction that describes the task:
### Input:
Split a SiteCollection into a set of tiles (SiteCollection instances).
:param hint: hint for how many tiles to generate
### Response:
def split_in_tiles(self, hint):
"""
Split a SiteCollection into a set of tiles (SiteCollection instances).
:param hint: hint for how many tiles to generate
"""
tiles = []
for seq in split_in_blocks(range(len(self)), hint or 1):
sc = SiteCollection.__new__(SiteCollection)
sc.array = self.array[numpy.array(seq, int)]
tiles.append(sc)
return tiles |
def ban_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/bans#create-ban"
api_path = "/api/v2/bans"
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/bans#create-ban | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/chat/bans#create-ban
### Response:
def ban_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/bans#create-ban"
api_path = "/api/v2/bans"
return self.call(api_path, method="POST", data=data, **kwargs) |
def find_non_contiguous(all_items):
"""Find any items that have slots that aren't contiguous"""
non_contiguous = []
for item in all_items:
if item.slots.count() < 2:
# No point in checking
continue
last_slot = None
for slot in item.slots.all().order_by('end_time'):
if last_slot:
if last_slot.end_time != slot.get_start_time():
non_contiguous.append(item)
break
last_slot = slot
return non_contiguous | Find any items that have slots that aren't contiguous | Below is the the instruction that describes the task:
### Input:
Find any items that have slots that aren't contiguous
### Response:
def find_non_contiguous(all_items):
"""Find any items that have slots that aren't contiguous"""
non_contiguous = []
for item in all_items:
if item.slots.count() < 2:
# No point in checking
continue
last_slot = None
for slot in item.slots.all().order_by('end_time'):
if last_slot:
if last_slot.end_time != slot.get_start_time():
non_contiguous.append(item)
break
last_slot = slot
return non_contiguous |
def send(self, sender: PytgbotApiBot):
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
"""
return sender.send_location(
# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id
latitude=self.latitude, longitude=self.longitude, chat_id=self.receiver, reply_to_message_id=self.reply_id, live_period=self.live_period, disable_notification=self.disable_notification, reply_markup=self.reply_markup
) | Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage | Below is the the instruction that describes the task:
### Input:
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
### Response:
def send(self, sender: PytgbotApiBot):
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
"""
return sender.send_location(
# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id
latitude=self.latitude, longitude=self.longitude, chat_id=self.receiver, reply_to_message_id=self.reply_id, live_period=self.live_period, disable_notification=self.disable_notification, reply_markup=self.reply_markup
) |
def create_rules(aes, value):
"""Create a Rules instance for a single aesthetic value.
Parameter
---------
aes: str
The name of the aesthetic
value: str or list
The value associated with any aesthetic
"""
if isinstance(value, six.string_types):
return Rules(aes)
else:
rules = Rules()
for idx, (pattern, css_value) in enumerate(value):
rules.add_rule(pattern, '{0}_{1}'.format(aes, idx))
return rules | Create a Rules instance for a single aesthetic value.
Parameter
---------
aes: str
The name of the aesthetic
value: str or list
The value associated with any aesthetic | Below is the the instruction that describes the task:
### Input:
Create a Rules instance for a single aesthetic value.
Parameter
---------
aes: str
The name of the aesthetic
value: str or list
The value associated with any aesthetic
### Response:
def create_rules(aes, value):
"""Create a Rules instance for a single aesthetic value.
Parameter
---------
aes: str
The name of the aesthetic
value: str or list
The value associated with any aesthetic
"""
if isinstance(value, six.string_types):
return Rules(aes)
else:
rules = Rules()
for idx, (pattern, css_value) in enumerate(value):
rules.add_rule(pattern, '{0}_{1}'.format(aes, idx))
return rules |
def angsep(lon1, lat1, lon2, lat2):
"""
Angular separation (deg) between two sky coordinates.
Borrowed from astropy (www.astropy.org)
Notes
-----
The angular separation is calculated using the Vincenty formula [1],
which is slighly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
[1] http://en.wikipedia.org/wiki/Great-circle_distance
"""
lon1,lat1 = np.radians([lon1,lat1])
lon2,lat2 = np.radians([lon2,lat2])
sdlon = np.sin(lon2 - lon1)
cdlon = np.cos(lon2 - lon1)
slat1 = np.sin(lat1)
slat2 = np.sin(lat2)
clat1 = np.cos(lat1)
clat2 = np.cos(lat2)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.degrees(np.arctan2(np.hypot(num1,num2), denominator)) | Angular separation (deg) between two sky coordinates.
Borrowed from astropy (www.astropy.org)
Notes
-----
The angular separation is calculated using the Vincenty formula [1],
which is slighly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
[1] http://en.wikipedia.org/wiki/Great-circle_distance | Below is the the instruction that describes the task:
### Input:
Angular separation (deg) between two sky coordinates.
Borrowed from astropy (www.astropy.org)
Notes
-----
The angular separation is calculated using the Vincenty formula [1],
which is slighly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
[1] http://en.wikipedia.org/wiki/Great-circle_distance
### Response:
def angsep(lon1, lat1, lon2, lat2):
"""
Angular separation (deg) between two sky coordinates.
Borrowed from astropy (www.astropy.org)
Notes
-----
The angular separation is calculated using the Vincenty formula [1],
which is slighly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
[1] http://en.wikipedia.org/wiki/Great-circle_distance
"""
lon1,lat1 = np.radians([lon1,lat1])
lon2,lat2 = np.radians([lon2,lat2])
sdlon = np.sin(lon2 - lon1)
cdlon = np.cos(lon2 - lon1)
slat1 = np.sin(lat1)
slat2 = np.sin(lat2)
clat1 = np.cos(lat1)
clat2 = np.cos(lat2)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.degrees(np.arctan2(np.hypot(num1,num2), denominator)) |
def remove(self, oid):
"""
Remove a faked Virtual Function resource.
This method also updates the 'virtual-function-uris' property in the
parent Partition resource, by removing the URI for the faked Virtual
Function resource.
Parameters:
oid (string):
The object ID of the faked Virtual Function resource.
"""
virtual_function = self.lookup_by_oid(oid)
partition = self.parent
devno = virtual_function.properties.get('device-number', None)
if devno:
partition.devno_free_if_allocated(devno)
assert 'virtual-function-uris' in partition.properties
vf_uris = partition.properties['virtual-function-uris']
vf_uris.remove(virtual_function.uri)
super(FakedVirtualFunctionManager, self).remove(oid) | Remove a faked Virtual Function resource.
This method also updates the 'virtual-function-uris' property in the
parent Partition resource, by removing the URI for the faked Virtual
Function resource.
Parameters:
oid (string):
The object ID of the faked Virtual Function resource. | Below is the the instruction that describes the task:
### Input:
Remove a faked Virtual Function resource.
This method also updates the 'virtual-function-uris' property in the
parent Partition resource, by removing the URI for the faked Virtual
Function resource.
Parameters:
oid (string):
The object ID of the faked Virtual Function resource.
### Response:
def remove(self, oid):
"""
Remove a faked Virtual Function resource.
This method also updates the 'virtual-function-uris' property in the
parent Partition resource, by removing the URI for the faked Virtual
Function resource.
Parameters:
oid (string):
The object ID of the faked Virtual Function resource.
"""
virtual_function = self.lookup_by_oid(oid)
partition = self.parent
devno = virtual_function.properties.get('device-number', None)
if devno:
partition.devno_free_if_allocated(devno)
assert 'virtual-function-uris' in partition.properties
vf_uris = partition.properties['virtual-function-uris']
vf_uris.remove(virtual_function.uri)
super(FakedVirtualFunctionManager, self).remove(oid) |
def draw(self, img, pixmapper, bounds):
'''draw a polygon on the image'''
if self.hidden:
return
self._pix_points = []
for i in range(len(self.points)-1):
if len(self.points[i]) > 2:
colour = self.points[i][2]
else:
colour = self.colour
self.draw_line(img, pixmapper, self.points[i], self.points[i+1],
colour, self.linewidth) | draw a polygon on the image | Below is the the instruction that describes the task:
### Input:
draw a polygon on the image
### Response:
def draw(self, img, pixmapper, bounds):
'''draw a polygon on the image'''
if self.hidden:
return
self._pix_points = []
for i in range(len(self.points)-1):
if len(self.points[i]) > 2:
colour = self.points[i][2]
else:
colour = self.colour
self.draw_line(img, pixmapper, self.points[i], self.points[i+1],
colour, self.linewidth) |
def from_string(cls, string):
"""
Create a public blob from a ``-cert.pub``-style string.
"""
fields = string.split(None, 2)
if len(fields) < 2:
msg = "Not enough fields for public blob: {}"
raise ValueError(msg.format(fields))
key_type = fields[0]
key_blob = decodebytes(b(fields[1]))
try:
comment = fields[2].strip()
except IndexError:
comment = None
# Verify that the blob message first (string) field matches the
# key_type
m = Message(key_blob)
blob_type = m.get_text()
if blob_type != key_type:
deets = "key type={!r}, but blob type={!r}".format(
key_type, blob_type
)
raise ValueError("Invalid PublicBlob contents: {}".format(deets))
# All good? All good.
return cls(type_=key_type, blob=key_blob, comment=comment) | Create a public blob from a ``-cert.pub``-style string. | Below is the the instruction that describes the task:
### Input:
Create a public blob from a ``-cert.pub``-style string.
### Response:
def from_string(cls, string):
"""
Create a public blob from a ``-cert.pub``-style string.
"""
fields = string.split(None, 2)
if len(fields) < 2:
msg = "Not enough fields for public blob: {}"
raise ValueError(msg.format(fields))
key_type = fields[0]
key_blob = decodebytes(b(fields[1]))
try:
comment = fields[2].strip()
except IndexError:
comment = None
# Verify that the blob message first (string) field matches the
# key_type
m = Message(key_blob)
blob_type = m.get_text()
if blob_type != key_type:
deets = "key type={!r}, but blob type={!r}".format(
key_type, blob_type
)
raise ValueError("Invalid PublicBlob contents: {}".format(deets))
# All good? All good.
return cls(type_=key_type, blob=key_blob, comment=comment) |
def close_active_window(self):
"""
Close active window.
"""
active_split = self._get_active_split()
# First remove the active window from its split.
index = active_split.index(self.active_window)
del active_split[index]
# Move focus.
if len(active_split):
new_active_window = active_split[max(0, index - 1)]
while isinstance(new_active_window, (HSplit, VSplit)):
new_active_window = new_active_window[0]
self.active_window = new_active_window
else:
self.active_window = None # No windows left.
# When there is exactly on item left, move this back into the parent
# split. (We don't want to keep a split with one item around -- exept
# for the root.)
if len(active_split) == 1 and active_split != self.root:
parent = self._get_split_parent(active_split)
index = parent.index(active_split)
parent[index] = active_split[0] | Close active window. | Below is the the instruction that describes the task:
### Input:
Close active window.
### Response:
def close_active_window(self):
"""
Close active window.
"""
active_split = self._get_active_split()
# First remove the active window from its split.
index = active_split.index(self.active_window)
del active_split[index]
# Move focus.
if len(active_split):
new_active_window = active_split[max(0, index - 1)]
while isinstance(new_active_window, (HSplit, VSplit)):
new_active_window = new_active_window[0]
self.active_window = new_active_window
else:
self.active_window = None # No windows left.
# When there is exactly on item left, move this back into the parent
# split. (We don't want to keep a split with one item around -- exept
# for the root.)
if len(active_split) == 1 and active_split != self.root:
parent = self._get_split_parent(active_split)
index = parent.index(active_split)
parent[index] = active_split[0] |
def engine_list(self):
"""
:returns: Return list of engines supported by GNS3 for the GNS3VM
"""
download_url = "https://github.com/GNS3/gns3-gui/releases/download/v{version}/GNS3.VM.VMware.Workstation.{version}.zip".format(version=__version__)
vmware_informations = {
"engine_id": "vmware",
"description": 'VMware is the recommended choice for best performances.<br>The GNS3 VM can be <a href="{}">downloaded here</a>.'.format(download_url),
"support_when_exit": True,
"support_headless": True,
"support_ram": True
}
if sys.platform.startswith("darwin"):
vmware_informations["name"] = "VMware Fusion"
else:
vmware_informations["name"] = "VMware Workstation / Player"
download_url = "https://github.com/GNS3/gns3-gui/releases/download/v{version}/GNS3.VM.VirtualBox.{version}.zip".format(version=__version__)
virtualbox_informations = {
"engine_id": "virtualbox",
"name": "VirtualBox",
"description": 'VirtualBox doesn\'t support nested virtualization, this means running Qemu based VM could be very slow.<br>The GNS3 VM can be <a href="{}">downloaded here</a>'.format(download_url),
"support_when_exit": True,
"support_headless": True,
"support_ram": True
}
remote_informations = {
"engine_id": "remote",
"name": "Remote",
"description": "Use a remote GNS3 server as the GNS3 VM.",
"support_when_exit": False,
"support_headless": False,
"support_ram": False
}
return [
vmware_informations,
virtualbox_informations,
remote_informations
] | :returns: Return list of engines supported by GNS3 for the GNS3VM | Below is the the instruction that describes the task:
### Input:
:returns: Return list of engines supported by GNS3 for the GNS3VM
### Response:
def engine_list(self):
"""
:returns: Return list of engines supported by GNS3 for the GNS3VM
"""
download_url = "https://github.com/GNS3/gns3-gui/releases/download/v{version}/GNS3.VM.VMware.Workstation.{version}.zip".format(version=__version__)
vmware_informations = {
"engine_id": "vmware",
"description": 'VMware is the recommended choice for best performances.<br>The GNS3 VM can be <a href="{}">downloaded here</a>.'.format(download_url),
"support_when_exit": True,
"support_headless": True,
"support_ram": True
}
if sys.platform.startswith("darwin"):
vmware_informations["name"] = "VMware Fusion"
else:
vmware_informations["name"] = "VMware Workstation / Player"
download_url = "https://github.com/GNS3/gns3-gui/releases/download/v{version}/GNS3.VM.VirtualBox.{version}.zip".format(version=__version__)
virtualbox_informations = {
"engine_id": "virtualbox",
"name": "VirtualBox",
"description": 'VirtualBox doesn\'t support nested virtualization, this means running Qemu based VM could be very slow.<br>The GNS3 VM can be <a href="{}">downloaded here</a>'.format(download_url),
"support_when_exit": True,
"support_headless": True,
"support_ram": True
}
remote_informations = {
"engine_id": "remote",
"name": "Remote",
"description": "Use a remote GNS3 server as the GNS3 VM.",
"support_when_exit": False,
"support_headless": False,
"support_ram": False
}
return [
vmware_informations,
virtualbox_informations,
remote_informations
] |
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers) | Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk) | Below is the the instruction that describes the task:
### Input:
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
### Response:
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers) |
def add_entry(self, path_object):
"""Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
"""
if (not is_root() and not self.st_mode & PERM_WRITE and
not self.filesystem.is_windows_fs):
exception = IOError if IS_PY2 else OSError
raise exception(errno.EACCES, 'Permission Denied', self.path)
if path_object.name in self.contents:
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self.contents[path_object.name] = path_object
path_object.parent_dir = self
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if path_object.st_nlink == 1:
self.filesystem.change_disk_usage(
path_object.size, path_object.name, self.st_dev) | Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists | Below is the the instruction that describes the task:
### Input:
Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
### Response:
def add_entry(self, path_object):
"""Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
"""
if (not is_root() and not self.st_mode & PERM_WRITE and
not self.filesystem.is_windows_fs):
exception = IOError if IS_PY2 else OSError
raise exception(errno.EACCES, 'Permission Denied', self.path)
if path_object.name in self.contents:
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self.contents[path_object.name] = path_object
path_object.parent_dir = self
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if path_object.st_nlink == 1:
self.filesystem.change_disk_usage(
path_object.size, path_object.name, self.st_dev) |
def by_youtube_id(cls, youtube_id):
"""
Look up video by youtube id
"""
qset = cls.objects.filter(
encoded_videos__profile__profile_name='youtube',
encoded_videos__url=youtube_id
).prefetch_related('encoded_videos', 'courses')
return qset | Look up video by youtube id | Below is the the instruction that describes the task:
### Input:
Look up video by youtube id
### Response:
def by_youtube_id(cls, youtube_id):
"""
Look up video by youtube id
"""
qset = cls.objects.filter(
encoded_videos__profile__profile_name='youtube',
encoded_videos__url=youtube_id
).prefetch_related('encoded_videos', 'courses')
return qset |
def get_rotation_program(pauli_term: PauliTerm) -> Program:
"""
Generate a rotation program so that the pauli term is diagonal.
:param pauli_term: The Pauli term used to generate diagonalizing one-qubit rotations.
:return: The rotation program.
"""
meas_basis_change = Program()
for index, gate in pauli_term:
if gate == 'X':
meas_basis_change.inst(RY(-np.pi / 2, index))
elif gate == 'Y':
meas_basis_change.inst(RX(np.pi / 2, index))
elif gate == 'Z':
pass
else:
raise ValueError()
return meas_basis_change | Generate a rotation program so that the pauli term is diagonal.
:param pauli_term: The Pauli term used to generate diagonalizing one-qubit rotations.
:return: The rotation program. | Below is the the instruction that describes the task:
### Input:
Generate a rotation program so that the pauli term is diagonal.
:param pauli_term: The Pauli term used to generate diagonalizing one-qubit rotations.
:return: The rotation program.
### Response:
def get_rotation_program(pauli_term: PauliTerm) -> Program:
"""
Generate a rotation program so that the pauli term is diagonal.
:param pauli_term: The Pauli term used to generate diagonalizing one-qubit rotations.
:return: The rotation program.
"""
meas_basis_change = Program()
for index, gate in pauli_term:
if gate == 'X':
meas_basis_change.inst(RY(-np.pi / 2, index))
elif gate == 'Y':
meas_basis_change.inst(RX(np.pi / 2, index))
elif gate == 'Z':
pass
else:
raise ValueError()
return meas_basis_change |
def dual_csiszar_function(logu, csiszar_function, name=None):
"""Calculates the dual Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Csiszar-dual is defined as:
```none
f^*(u) = u f(1 / u)
```
where `f` is some other Csiszar-function.
For example, the dual of `kl_reverse` is `kl_forward`, i.e.,
```none
f(u) = -log(u)
f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u)
```
The dual of the dual is the original function:
```none
f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u)
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
csiszar_function: Python `callable` representing a Csiszar-function over
log-domain.
name: Python `str` name prefixed to Ops created by this function.
Returns:
dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of
`f` at `u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "dual_csiszar_function", [logu]):
return tf.exp(logu) * csiszar_function(-logu) | Calculates the dual Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Csiszar-dual is defined as:
```none
f^*(u) = u f(1 / u)
```
where `f` is some other Csiszar-function.
For example, the dual of `kl_reverse` is `kl_forward`, i.e.,
```none
f(u) = -log(u)
f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u)
```
The dual of the dual is the original function:
```none
f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u)
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
csiszar_function: Python `callable` representing a Csiszar-function over
log-domain.
name: Python `str` name prefixed to Ops created by this function.
Returns:
dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of
`f` at `u = exp(logu)`. | Below is the the instruction that describes the task:
### Input:
Calculates the dual Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Csiszar-dual is defined as:
```none
f^*(u) = u f(1 / u)
```
where `f` is some other Csiszar-function.
For example, the dual of `kl_reverse` is `kl_forward`, i.e.,
```none
f(u) = -log(u)
f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u)
```
The dual of the dual is the original function:
```none
f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u)
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
csiszar_function: Python `callable` representing a Csiszar-function over
log-domain.
name: Python `str` name prefixed to Ops created by this function.
Returns:
dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of
`f` at `u = exp(logu)`.
### Response:
def dual_csiszar_function(logu, csiszar_function, name=None):
"""Calculates the dual Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Csiszar-dual is defined as:
```none
f^*(u) = u f(1 / u)
```
where `f` is some other Csiszar-function.
For example, the dual of `kl_reverse` is `kl_forward`, i.e.,
```none
f(u) = -log(u)
f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u)
```
The dual of the dual is the original function:
```none
f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u)
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
csiszar_function: Python `callable` representing a Csiszar-function over
log-domain.
name: Python `str` name prefixed to Ops created by this function.
Returns:
dual_f_of_u: `float`-like `Tensor` of the result of calculating the dual of
`f` at `u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "dual_csiszar_function", [logu]):
return tf.exp(logu) * csiszar_function(-logu) |
def safe_concurrent_creation(target_path):
"""A contextmanager that yields a temporary path and renames it to a final target path when the
contextmanager exits.
Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins.
:param target_path: The final target path to rename the temporary path to.
:yields: A temporary path containing the original path with a unique (uuid4) suffix.
"""
safe_mkdir_for(target_path)
tmp_path = '{}.tmp.{}'.format(target_path, uuid.uuid4().hex)
try:
yield tmp_path
except Exception:
rm_rf(tmp_path)
raise
else:
if os.path.exists(tmp_path):
safe_concurrent_rename(tmp_path, target_path) | A contextmanager that yields a temporary path and renames it to a final target path when the
contextmanager exits.
Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins.
:param target_path: The final target path to rename the temporary path to.
:yields: A temporary path containing the original path with a unique (uuid4) suffix. | Below is the the instruction that describes the task:
### Input:
A contextmanager that yields a temporary path and renames it to a final target path when the
contextmanager exits.
Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins.
:param target_path: The final target path to rename the temporary path to.
:yields: A temporary path containing the original path with a unique (uuid4) suffix.
### Response:
def safe_concurrent_creation(target_path):
"""A contextmanager that yields a temporary path and renames it to a final target path when the
contextmanager exits.
Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins.
:param target_path: The final target path to rename the temporary path to.
:yields: A temporary path containing the original path with a unique (uuid4) suffix.
"""
safe_mkdir_for(target_path)
tmp_path = '{}.tmp.{}'.format(target_path, uuid.uuid4().hex)
try:
yield tmp_path
except Exception:
rm_rf(tmp_path)
raise
else:
if os.path.exists(tmp_path):
safe_concurrent_rename(tmp_path, target_path) |
def export(fn):
""" Export decorator
Please refer to the following SO article for details: https://stackoverflow.com/a/35710527
"""
mod = sys.modules[fn.__module__]
if hasattr(mod, '__all__'):
mod.__all__.append(fn.__name__)
else:
mod.__all__ = [fn.__name__]
return fn | Export decorator
Please refer to the following SO article for details: https://stackoverflow.com/a/35710527 | Below is the the instruction that describes the task:
### Input:
Export decorator
Please refer to the following SO article for details: https://stackoverflow.com/a/35710527
### Response:
def export(fn):
""" Export decorator
Please refer to the following SO article for details: https://stackoverflow.com/a/35710527
"""
mod = sys.modules[fn.__module__]
if hasattr(mod, '__all__'):
mod.__all__.append(fn.__name__)
else:
mod.__all__ = [fn.__name__]
return fn |
def derivatives(self, x, y, Rs, theta_Rs, r_core, center_x=0, center_y=0):
"""
deflection angles
:param x: x coordinate
:param y: y coordinate
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
:param center_x:
:param center_y:
:return:
"""
rho0 = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs, r_core=r_core)
if Rs < 0.0000001:
Rs = 0.0000001
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
dx, dy = self.coreBurkAlpha(R, Rs, rho0, r_core, x_, y_)
return dx, dy | deflection angles
:param x: x coordinate
:param y: y coordinate
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
:param center_x:
:param center_y:
:return: | Below is the the instruction that describes the task:
### Input:
deflection angles
:param x: x coordinate
:param y: y coordinate
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
:param center_x:
:param center_y:
:return:
### Response:
def derivatives(self, x, y, Rs, theta_Rs, r_core, center_x=0, center_y=0):
"""
deflection angles
:param x: x coordinate
:param y: y coordinate
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
:param center_x:
:param center_y:
:return:
"""
rho0 = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs, r_core=r_core)
if Rs < 0.0000001:
Rs = 0.0000001
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
dx, dy = self.coreBurkAlpha(R, Rs, rho0, r_core, x_, y_)
return dx, dy |
def sense(self, *targets, **options):
"""Discover a contactless card or listening device.
.. note:: The :meth:`sense` method is intended for experts
with a good understanding of the commands and
responses exchanged during target activation (the
notion used for commands and responses follows the
NFC Forum Digital Specification). If the greater
level of control is not needed it is recommended to
use the :meth:`connect` method.
All positional arguments build the list of potential *targets*
to discover and must be of type :class:`RemoteTarget`. Keyword
argument *options* may be the number of ``iterations`` of the
sense loop set by *targets* and the ``interval`` between
iterations. The return value is either a :class:`RemoteTarget`
instance or :const:`None`.
>>> import nfc, nfc.clf
>>> clf = nfc.ContactlessFrontend("usb")
>>> target1 = nfc.clf.RemoteTarget("106A")
>>> target2 = nfc.clf.RemoteTarget("212F")
>>> print(clf.sense(target1, target2, iterations=5, interval=0.2))
106A(sdd_res=04497622D93881, sel_res=00, sens_res=4400)
A **Type A Target** is specified with the technology letter
``A`` following the bitrate to be used for the SENS_REQ
command (almost always must the bitrate be 106 kbps). To
discover only a specific Type A target, the NFCID1 (UID) can
be set with a 4, 7, or 10 byte ``sel_req`` attribute (cascade
tags are handled internally).
>>> target = nfc.clf.RemoteTarget("106A")
>>> print(clf.sense(target))
106A sdd_res=04497622D93881 sel_res=00 sens_res=4400
>>> target.sel_req = bytearray.fromhex("04497622D93881")
>>> print(clf.sense(target))
106A sdd_res=04497622D93881 sel_res=00 sens_res=4400
>>> target.sel_req = bytearray.fromhex("04497622")
>>> print(clf.sense(target))
None
A **Type B Target** is specified with the technology letter
``B`` following the bitrate to be used for the SENSB_REQ
command (almost always must the bitrate be 106 kbps). A
specific application family identifier can be set with the
first byte of a ``sensb_req`` attribute (the second byte PARAM
is ignored when it can not be set to local device, 00h is a
safe value in all cases).
>>> target = nfc.clf.RemoteTarget("106B")
>>> print(clf.sense(target))
106B sens_res=50E5DD3DC900000011008185
>>> target.sensb_req = bytearray.fromhex("0000")
>>> print(clf.sense(target))
106B sens_res=50E5DD3DC900000011008185
>>> target.sensb_req = bytearray.fromhex("FF00")
>>> print(clf.sense(target))
None
A **Type F Target** is specified with the technology letter
``F`` following the bitrate to be used for the SENSF_REQ
command (the typically supported bitrates are 212 and 424
kbps). The default SENSF_REQ command allows all targets to
answer, requests system code information, and selects a single
time slot for the SENSF_RES response. This can be changed with
the ``sensf_req`` attribute.
>>> target = nfc.clf.RemoteTarget("212F")
>>> print(clf.sense(target))
212F sensf_res=0101010601B00ADE0B03014B024F4993FF12FC
>>> target.sensf_req = bytearray.fromhex("0012FC0000")
>>> print(clf.sense(target))
212F sensf_res=0101010601B00ADE0B03014B024F4993FF
>>> target.sensf_req = bytearray.fromhex("00ABCD0000")
>>> print(clf.sense(target))
None
An **Active Communication Mode P2P Target** search is selected
with an ``atr_req`` attribute. The choice of bitrate and
modulation type is 106A, 212F, and 424F.
>>> atr = bytearray.fromhex("D4000102030405060708091000000030")
>>> target = clf.sense(nfc.clf.RemoteTarget("106A", atr_req=atr))
>>> if target and target.atr_res: print(target.atr_res.encode("hex"))
d501c023cae6b3182afe3dee0000000e3246666d01011103020013040196
>>> target = clf.sense(nfc.clf.RemoteTarget("424F", atr_req=atr))
>>> if target and target.atr_res: print(target.atr_res.encode("hex"))
d501dc0104f04584e15769700000000e3246666d01011103020013040196
Some drivers must modify the ATR_REQ to cope with hardware
limitations, for example change length reduction value to
reduce the maximum size of target responses. The ATR_REQ that
has been send is given by the ``atr_req`` attribute of the
returned RemoteTarget object.
A **Passive Communication Mode P2P Target** responds to 106A
discovery with bit 6 of SEL_RES set to 1, and to 212F/424F
discovery (when the request code RC is 0 in the SENSF_REQ
command) with an NFCID2 that starts with 01FEh in the
SENSF_RES response. Responses below are from a Nexus 5
configured for NFC-DEP Protocol (SEL_RES bit 6 is set) and
Type 4A Tag (SEL_RES bit 5 is set).
>>> print(clf.sense(nfc.clf.RemoteTarget("106A")))
106A sdd_res=08796BEB sel_res=60 sens_res=0400
>>> sensf_req = bytearray.fromhex("00FFFF0000")
>>> print(clf.sense(nfc.clf.RemoteTarget("424F", sensf_req=sensf_req)))
424F sensf_res=0101FE1444EFB88FD50000000000000000
Errors found in the *targets* argument list raise exceptions
only if exactly one target is given. If multiple targets are
provided, any target that is not supported or has invalid
attributes is just ignored (but is logged as a debug message).
**Exceptions**
* :exc:`~exceptions.IOError` (ENODEV) when a local contacless
communication device has not been opened or communication
with the local device is no longer possible.
* :exc:`nfc.clf.UnsupportedTargetError` if the single target
supplied as input is not supported by the active driver.
This exception is never raised when :meth:`sense` is called
with multiple targets, those unsupported are then silently
ignored.
"""
def sense_tta(target):
if target.sel_req and len(target.sel_req) not in (4, 7, 10):
raise ValueError("sel_req must be 4, 7, or 10 byte")
target = self.device.sense_tta(target)
log.debug("found %s", target)
if target and len(target.sens_res) != 2:
error = "SENS Response Format Error (wrong length)"
log.debug(error)
raise ProtocolError(error)
if target and target.sens_res[0] & 0b00011111 == 0:
if target.sens_res[1] & 0b00001111 != 0b1100:
error = "SENS Response Data Error (T1T config)"
log.debug(error)
raise ProtocolError(error)
if not target.rid_res:
error = "RID Response Error (no response received)"
log.debug(error)
raise ProtocolError(error)
if len(target.rid_res) != 6:
error = "RID Response Format Error (wrong length)"
log.debug(error)
raise ProtocolError(error)
if target.rid_res[0] >> 4 != 0b0001:
error = "RID Response Data Error (invalid HR0)"
log.debug(error)
raise ProtocolError(error)
return target
def sense_ttb(target):
return self.device.sense_ttb(target)
def sense_ttf(target):
return self.device.sense_ttf(target)
def sense_dep(target):
if len(target.atr_req) < 16:
raise ValueError("minimum atr_req length is 16 byte")
if len(target.atr_req) > 64:
raise ValueError("maximum atr_req length is 64 byte")
return self.device.sense_dep(target)
for target in targets:
if not isinstance(target, RemoteTarget):
raise ValueError("invalid target argument type: %r" % target)
with self.lock:
if self.device is None:
raise IOError(errno.ENODEV, os.strerror(errno.ENODEV))
self.target = None # forget captured target
self.device.mute() # deactivate the rf field
for i in xrange(max(1, options.get('iterations', 1))):
started = time.time()
for target in targets:
log.debug("sense {0}".format(target))
try:
if target.atr_req is not None:
self.target = sense_dep(target)
elif target.brty.endswith('A'):
self.target = sense_tta(target)
elif target.brty.endswith('B'):
self.target = sense_ttb(target)
elif target.brty.endswith('F'):
self.target = sense_ttf(target)
else:
info = "unknown technology type in %r"
raise UnsupportedTargetError(info % target.brty)
except UnsupportedTargetError as error:
if len(targets) == 1:
raise error
else:
log.debug(error)
except CommunicationError as error:
log.debug(error)
else:
if self.target is not None:
log.debug("found {0}".format(self.target))
return self.target
if len(targets) > 0:
self.device.mute() # deactivate the rf field
if i < options.get('iterations', 1) - 1:
elapsed = time.time() - started
time.sleep(max(0, options.get('interval', 0.1)-elapsed)) | Discover a contactless card or listening device.
.. note:: The :meth:`sense` method is intended for experts
with a good understanding of the commands and
responses exchanged during target activation (the
notion used for commands and responses follows the
NFC Forum Digital Specification). If the greater
level of control is not needed it is recommended to
use the :meth:`connect` method.
All positional arguments build the list of potential *targets*
to discover and must be of type :class:`RemoteTarget`. Keyword
argument *options* may be the number of ``iterations`` of the
sense loop set by *targets* and the ``interval`` between
iterations. The return value is either a :class:`RemoteTarget`
instance or :const:`None`.
>>> import nfc, nfc.clf
>>> clf = nfc.ContactlessFrontend("usb")
>>> target1 = nfc.clf.RemoteTarget("106A")
>>> target2 = nfc.clf.RemoteTarget("212F")
>>> print(clf.sense(target1, target2, iterations=5, interval=0.2))
106A(sdd_res=04497622D93881, sel_res=00, sens_res=4400)
A **Type A Target** is specified with the technology letter
``A`` following the bitrate to be used for the SENS_REQ
command (almost always must the bitrate be 106 kbps). To
discover only a specific Type A target, the NFCID1 (UID) can
be set with a 4, 7, or 10 byte ``sel_req`` attribute (cascade
tags are handled internally).
>>> target = nfc.clf.RemoteTarget("106A")
>>> print(clf.sense(target))
106A sdd_res=04497622D93881 sel_res=00 sens_res=4400
>>> target.sel_req = bytearray.fromhex("04497622D93881")
>>> print(clf.sense(target))
106A sdd_res=04497622D93881 sel_res=00 sens_res=4400
>>> target.sel_req = bytearray.fromhex("04497622")
>>> print(clf.sense(target))
None
A **Type B Target** is specified with the technology letter
``B`` following the bitrate to be used for the SENSB_REQ
command (almost always must the bitrate be 106 kbps). A
specific application family identifier can be set with the
first byte of a ``sensb_req`` attribute (the second byte PARAM
is ignored when it can not be set to local device, 00h is a
safe value in all cases).
>>> target = nfc.clf.RemoteTarget("106B")
>>> print(clf.sense(target))
106B sens_res=50E5DD3DC900000011008185
>>> target.sensb_req = bytearray.fromhex("0000")
>>> print(clf.sense(target))
106B sens_res=50E5DD3DC900000011008185
>>> target.sensb_req = bytearray.fromhex("FF00")
>>> print(clf.sense(target))
None
A **Type F Target** is specified with the technology letter
``F`` following the bitrate to be used for the SENSF_REQ
command (the typically supported bitrates are 212 and 424
kbps). The default SENSF_REQ command allows all targets to
answer, requests system code information, and selects a single
time slot for the SENSF_RES response. This can be changed with
the ``sensf_req`` attribute.
>>> target = nfc.clf.RemoteTarget("212F")
>>> print(clf.sense(target))
212F sensf_res=0101010601B00ADE0B03014B024F4993FF12FC
>>> target.sensf_req = bytearray.fromhex("0012FC0000")
>>> print(clf.sense(target))
212F sensf_res=0101010601B00ADE0B03014B024F4993FF
>>> target.sensf_req = bytearray.fromhex("00ABCD0000")
>>> print(clf.sense(target))
None
An **Active Communication Mode P2P Target** search is selected
with an ``atr_req`` attribute. The choice of bitrate and
modulation type is 106A, 212F, and 424F.
>>> atr = bytearray.fromhex("D4000102030405060708091000000030")
>>> target = clf.sense(nfc.clf.RemoteTarget("106A", atr_req=atr))
>>> if target and target.atr_res: print(target.atr_res.encode("hex"))
d501c023cae6b3182afe3dee0000000e3246666d01011103020013040196
>>> target = clf.sense(nfc.clf.RemoteTarget("424F", atr_req=atr))
>>> if target and target.atr_res: print(target.atr_res.encode("hex"))
d501dc0104f04584e15769700000000e3246666d01011103020013040196
Some drivers must modify the ATR_REQ to cope with hardware
limitations, for example change length reduction value to
reduce the maximum size of target responses. The ATR_REQ that
has been send is given by the ``atr_req`` attribute of the
returned RemoteTarget object.
A **Passive Communication Mode P2P Target** responds to 106A
discovery with bit 6 of SEL_RES set to 1, and to 212F/424F
discovery (when the request code RC is 0 in the SENSF_REQ
command) with an NFCID2 that starts with 01FEh in the
SENSF_RES response. Responses below are from a Nexus 5
configured for NFC-DEP Protocol (SEL_RES bit 6 is set) and
Type 4A Tag (SEL_RES bit 5 is set).
>>> print(clf.sense(nfc.clf.RemoteTarget("106A")))
106A sdd_res=08796BEB sel_res=60 sens_res=0400
>>> sensf_req = bytearray.fromhex("00FFFF0000")
>>> print(clf.sense(nfc.clf.RemoteTarget("424F", sensf_req=sensf_req)))
424F sensf_res=0101FE1444EFB88FD50000000000000000
Errors found in the *targets* argument list raise exceptions
only if exactly one target is given. If multiple targets are
provided, any target that is not supported or has invalid
attributes is just ignored (but is logged as a debug message).
**Exceptions**
* :exc:`~exceptions.IOError` (ENODEV) when a local contacless
communication device has not been opened or communication
with the local device is no longer possible.
* :exc:`nfc.clf.UnsupportedTargetError` if the single target
supplied as input is not supported by the active driver.
This exception is never raised when :meth:`sense` is called
with multiple targets, those unsupported are then silently
ignored. | Below is the the instruction that describes the task:
### Input:
Discover a contactless card or listening device.
.. note:: The :meth:`sense` method is intended for experts
with a good understanding of the commands and
responses exchanged during target activation (the
notion used for commands and responses follows the
NFC Forum Digital Specification). If the greater
level of control is not needed it is recommended to
use the :meth:`connect` method.
All positional arguments build the list of potential *targets*
to discover and must be of type :class:`RemoteTarget`. Keyword
argument *options* may be the number of ``iterations`` of the
sense loop set by *targets* and the ``interval`` between
iterations. The return value is either a :class:`RemoteTarget`
instance or :const:`None`.
>>> import nfc, nfc.clf
>>> clf = nfc.ContactlessFrontend("usb")
>>> target1 = nfc.clf.RemoteTarget("106A")
>>> target2 = nfc.clf.RemoteTarget("212F")
>>> print(clf.sense(target1, target2, iterations=5, interval=0.2))
106A(sdd_res=04497622D93881, sel_res=00, sens_res=4400)
A **Type A Target** is specified with the technology letter
``A`` following the bitrate to be used for the SENS_REQ
command (almost always must the bitrate be 106 kbps). To
discover only a specific Type A target, the NFCID1 (UID) can
be set with a 4, 7, or 10 byte ``sel_req`` attribute (cascade
tags are handled internally).
>>> target = nfc.clf.RemoteTarget("106A")
>>> print(clf.sense(target))
106A sdd_res=04497622D93881 sel_res=00 sens_res=4400
>>> target.sel_req = bytearray.fromhex("04497622D93881")
>>> print(clf.sense(target))
106A sdd_res=04497622D93881 sel_res=00 sens_res=4400
>>> target.sel_req = bytearray.fromhex("04497622")
>>> print(clf.sense(target))
None
A **Type B Target** is specified with the technology letter
``B`` following the bitrate to be used for the SENSB_REQ
command (almost always must the bitrate be 106 kbps). A
specific application family identifier can be set with the
first byte of a ``sensb_req`` attribute (the second byte PARAM
is ignored when it can not be set to local device, 00h is a
safe value in all cases).
>>> target = nfc.clf.RemoteTarget("106B")
>>> print(clf.sense(target))
106B sens_res=50E5DD3DC900000011008185
>>> target.sensb_req = bytearray.fromhex("0000")
>>> print(clf.sense(target))
106B sens_res=50E5DD3DC900000011008185
>>> target.sensb_req = bytearray.fromhex("FF00")
>>> print(clf.sense(target))
None
A **Type F Target** is specified with the technology letter
``F`` following the bitrate to be used for the SENSF_REQ
command (the typically supported bitrates are 212 and 424
kbps). The default SENSF_REQ command allows all targets to
answer, requests system code information, and selects a single
time slot for the SENSF_RES response. This can be changed with
the ``sensf_req`` attribute.
>>> target = nfc.clf.RemoteTarget("212F")
>>> print(clf.sense(target))
212F sensf_res=0101010601B00ADE0B03014B024F4993FF12FC
>>> target.sensf_req = bytearray.fromhex("0012FC0000")
>>> print(clf.sense(target))
212F sensf_res=0101010601B00ADE0B03014B024F4993FF
>>> target.sensf_req = bytearray.fromhex("00ABCD0000")
>>> print(clf.sense(target))
None
An **Active Communication Mode P2P Target** search is selected
with an ``atr_req`` attribute. The choice of bitrate and
modulation type is 106A, 212F, and 424F.
>>> atr = bytearray.fromhex("D4000102030405060708091000000030")
>>> target = clf.sense(nfc.clf.RemoteTarget("106A", atr_req=atr))
>>> if target and target.atr_res: print(target.atr_res.encode("hex"))
d501c023cae6b3182afe3dee0000000e3246666d01011103020013040196
>>> target = clf.sense(nfc.clf.RemoteTarget("424F", atr_req=atr))
>>> if target and target.atr_res: print(target.atr_res.encode("hex"))
d501dc0104f04584e15769700000000e3246666d01011103020013040196
Some drivers must modify the ATR_REQ to cope with hardware
limitations, for example change length reduction value to
reduce the maximum size of target responses. The ATR_REQ that
has been send is given by the ``atr_req`` attribute of the
returned RemoteTarget object.
A **Passive Communication Mode P2P Target** responds to 106A
discovery with bit 6 of SEL_RES set to 1, and to 212F/424F
discovery (when the request code RC is 0 in the SENSF_REQ
command) with an NFCID2 that starts with 01FEh in the
SENSF_RES response. Responses below are from a Nexus 5
configured for NFC-DEP Protocol (SEL_RES bit 6 is set) and
Type 4A Tag (SEL_RES bit 5 is set).
>>> print(clf.sense(nfc.clf.RemoteTarget("106A")))
106A sdd_res=08796BEB sel_res=60 sens_res=0400
>>> sensf_req = bytearray.fromhex("00FFFF0000")
>>> print(clf.sense(nfc.clf.RemoteTarget("424F", sensf_req=sensf_req)))
424F sensf_res=0101FE1444EFB88FD50000000000000000
Errors found in the *targets* argument list raise exceptions
only if exactly one target is given. If multiple targets are
provided, any target that is not supported or has invalid
attributes is just ignored (but is logged as a debug message).
**Exceptions**
* :exc:`~exceptions.IOError` (ENODEV) when a local contacless
communication device has not been opened or communication
with the local device is no longer possible.
* :exc:`nfc.clf.UnsupportedTargetError` if the single target
supplied as input is not supported by the active driver.
This exception is never raised when :meth:`sense` is called
with multiple targets, those unsupported are then silently
ignored.
### Response:
def sense(self, *targets, **options):
"""Discover a contactless card or listening device.
.. note:: The :meth:`sense` method is intended for experts
with a good understanding of the commands and
responses exchanged during target activation (the
notion used for commands and responses follows the
NFC Forum Digital Specification). If the greater
level of control is not needed it is recommended to
use the :meth:`connect` method.
All positional arguments build the list of potential *targets*
to discover and must be of type :class:`RemoteTarget`. Keyword
argument *options* may be the number of ``iterations`` of the
sense loop set by *targets* and the ``interval`` between
iterations. The return value is either a :class:`RemoteTarget`
instance or :const:`None`.
>>> import nfc, nfc.clf
>>> clf = nfc.ContactlessFrontend("usb")
>>> target1 = nfc.clf.RemoteTarget("106A")
>>> target2 = nfc.clf.RemoteTarget("212F")
>>> print(clf.sense(target1, target2, iterations=5, interval=0.2))
106A(sdd_res=04497622D93881, sel_res=00, sens_res=4400)
A **Type A Target** is specified with the technology letter
``A`` following the bitrate to be used for the SENS_REQ
command (almost always must the bitrate be 106 kbps). To
discover only a specific Type A target, the NFCID1 (UID) can
be set with a 4, 7, or 10 byte ``sel_req`` attribute (cascade
tags are handled internally).
>>> target = nfc.clf.RemoteTarget("106A")
>>> print(clf.sense(target))
106A sdd_res=04497622D93881 sel_res=00 sens_res=4400
>>> target.sel_req = bytearray.fromhex("04497622D93881")
>>> print(clf.sense(target))
106A sdd_res=04497622D93881 sel_res=00 sens_res=4400
>>> target.sel_req = bytearray.fromhex("04497622")
>>> print(clf.sense(target))
None
A **Type B Target** is specified with the technology letter
``B`` following the bitrate to be used for the SENSB_REQ
command (almost always must the bitrate be 106 kbps). A
specific application family identifier can be set with the
first byte of a ``sensb_req`` attribute (the second byte PARAM
is ignored when it can not be set to local device, 00h is a
safe value in all cases).
>>> target = nfc.clf.RemoteTarget("106B")
>>> print(clf.sense(target))
106B sens_res=50E5DD3DC900000011008185
>>> target.sensb_req = bytearray.fromhex("0000")
>>> print(clf.sense(target))
106B sens_res=50E5DD3DC900000011008185
>>> target.sensb_req = bytearray.fromhex("FF00")
>>> print(clf.sense(target))
None
A **Type F Target** is specified with the technology letter
``F`` following the bitrate to be used for the SENSF_REQ
command (the typically supported bitrates are 212 and 424
kbps). The default SENSF_REQ command allows all targets to
answer, requests system code information, and selects a single
time slot for the SENSF_RES response. This can be changed with
the ``sensf_req`` attribute.
>>> target = nfc.clf.RemoteTarget("212F")
>>> print(clf.sense(target))
212F sensf_res=0101010601B00ADE0B03014B024F4993FF12FC
>>> target.sensf_req = bytearray.fromhex("0012FC0000")
>>> print(clf.sense(target))
212F sensf_res=0101010601B00ADE0B03014B024F4993FF
>>> target.sensf_req = bytearray.fromhex("00ABCD0000")
>>> print(clf.sense(target))
None
An **Active Communication Mode P2P Target** search is selected
with an ``atr_req`` attribute. The choice of bitrate and
modulation type is 106A, 212F, and 424F.
>>> atr = bytearray.fromhex("D4000102030405060708091000000030")
>>> target = clf.sense(nfc.clf.RemoteTarget("106A", atr_req=atr))
>>> if target and target.atr_res: print(target.atr_res.encode("hex"))
d501c023cae6b3182afe3dee0000000e3246666d01011103020013040196
>>> target = clf.sense(nfc.clf.RemoteTarget("424F", atr_req=atr))
>>> if target and target.atr_res: print(target.atr_res.encode("hex"))
d501dc0104f04584e15769700000000e3246666d01011103020013040196
Some drivers must modify the ATR_REQ to cope with hardware
limitations, for example change length reduction value to
reduce the maximum size of target responses. The ATR_REQ that
has been send is given by the ``atr_req`` attribute of the
returned RemoteTarget object.
A **Passive Communication Mode P2P Target** responds to 106A
discovery with bit 6 of SEL_RES set to 1, and to 212F/424F
discovery (when the request code RC is 0 in the SENSF_REQ
command) with an NFCID2 that starts with 01FEh in the
SENSF_RES response. Responses below are from a Nexus 5
configured for NFC-DEP Protocol (SEL_RES bit 6 is set) and
Type 4A Tag (SEL_RES bit 5 is set).
>>> print(clf.sense(nfc.clf.RemoteTarget("106A")))
106A sdd_res=08796BEB sel_res=60 sens_res=0400
>>> sensf_req = bytearray.fromhex("00FFFF0000")
>>> print(clf.sense(nfc.clf.RemoteTarget("424F", sensf_req=sensf_req)))
424F sensf_res=0101FE1444EFB88FD50000000000000000
Errors found in the *targets* argument list raise exceptions
only if exactly one target is given. If multiple targets are
provided, any target that is not supported or has invalid
attributes is just ignored (but is logged as a debug message).
**Exceptions**
* :exc:`~exceptions.IOError` (ENODEV) when a local contacless
communication device has not been opened or communication
with the local device is no longer possible.
* :exc:`nfc.clf.UnsupportedTargetError` if the single target
supplied as input is not supported by the active driver.
This exception is never raised when :meth:`sense` is called
with multiple targets, those unsupported are then silently
ignored.
"""
def sense_tta(target):
if target.sel_req and len(target.sel_req) not in (4, 7, 10):
raise ValueError("sel_req must be 4, 7, or 10 byte")
target = self.device.sense_tta(target)
log.debug("found %s", target)
if target and len(target.sens_res) != 2:
error = "SENS Response Format Error (wrong length)"
log.debug(error)
raise ProtocolError(error)
if target and target.sens_res[0] & 0b00011111 == 0:
if target.sens_res[1] & 0b00001111 != 0b1100:
error = "SENS Response Data Error (T1T config)"
log.debug(error)
raise ProtocolError(error)
if not target.rid_res:
error = "RID Response Error (no response received)"
log.debug(error)
raise ProtocolError(error)
if len(target.rid_res) != 6:
error = "RID Response Format Error (wrong length)"
log.debug(error)
raise ProtocolError(error)
if target.rid_res[0] >> 4 != 0b0001:
error = "RID Response Data Error (invalid HR0)"
log.debug(error)
raise ProtocolError(error)
return target
def sense_ttb(target):
return self.device.sense_ttb(target)
def sense_ttf(target):
return self.device.sense_ttf(target)
def sense_dep(target):
if len(target.atr_req) < 16:
raise ValueError("minimum atr_req length is 16 byte")
if len(target.atr_req) > 64:
raise ValueError("maximum atr_req length is 64 byte")
return self.device.sense_dep(target)
for target in targets:
if not isinstance(target, RemoteTarget):
raise ValueError("invalid target argument type: %r" % target)
with self.lock:
if self.device is None:
raise IOError(errno.ENODEV, os.strerror(errno.ENODEV))
self.target = None # forget captured target
self.device.mute() # deactivate the rf field
for i in xrange(max(1, options.get('iterations', 1))):
started = time.time()
for target in targets:
log.debug("sense {0}".format(target))
try:
if target.atr_req is not None:
self.target = sense_dep(target)
elif target.brty.endswith('A'):
self.target = sense_tta(target)
elif target.brty.endswith('B'):
self.target = sense_ttb(target)
elif target.brty.endswith('F'):
self.target = sense_ttf(target)
else:
info = "unknown technology type in %r"
raise UnsupportedTargetError(info % target.brty)
except UnsupportedTargetError as error:
if len(targets) == 1:
raise error
else:
log.debug(error)
except CommunicationError as error:
log.debug(error)
else:
if self.target is not None:
log.debug("found {0}".format(self.target))
return self.target
if len(targets) > 0:
self.device.mute() # deactivate the rf field
if i < options.get('iterations', 1) - 1:
elapsed = time.time() - started
time.sleep(max(0, options.get('interval', 0.1)-elapsed)) |
def linesubst(line, variables):
"""
In a string, substitute '{{varname}}' occurrences with the value of
variables['varname'], '\\' being an escaping char...
If at first you don't understand this function, draw its finite state
machine and everything will become crystal clear :)
"""
# trivial no substitution early detection:
if '{{' not in line and '\\' not in line:
return line
st = NORM
out = ""
curvar = ""
for c in line:
if st is NORM:
if c == '{':
st = ONE
elif c == '\\':
st = LIT
else:
out += c
elif st is LIT:
out += c
st = NORM
elif st is ONE:
if c == '{':
st = TWO
elif c == '\\':
out += '{'
st = LIT
else:
out += '{' + c
st = NORM
elif st is TWO:
if c == '\\':
st = TLIT
elif c == '}':
st = TERM
else:
curvar += c
elif st is TLIT:
curvar += c
st = TWO
elif st is TERM:
if c == '}':
if curvar not in variables:
LOG.warning("Unknown variable %r detected, will just be replaced by an empty string", curvar)
else:
LOG.debug("Substitution of {{%s}} by %r", curvar, variables[curvar])
value = variables[curvar]
if isinstance(value, (float, int, long)):
value = str(value)
out += value
curvar = ''
st = NORM
elif c == '\\':
curvar += '}'
st = TLIT
else:
curvar += '}' + c
st = TWO
if st is not NORM:
LOG.warning("st is not NORM at end of line: " + line)
LOG.warning("returned substitution: " + out)
return out | In a string, substitute '{{varname}}' occurrences with the value of
variables['varname'], '\\' being an escaping char...
If at first you don't understand this function, draw its finite state
machine and everything will become crystal clear :) | Below is the the instruction that describes the task:
### Input:
In a string, substitute '{{varname}}' occurrences with the value of
variables['varname'], '\\' being an escaping char...
If at first you don't understand this function, draw its finite state
machine and everything will become crystal clear :)
### Response:
def linesubst(line, variables):
"""
In a string, substitute '{{varname}}' occurrences with the value of
variables['varname'], '\\' being an escaping char...
If at first you don't understand this function, draw its finite state
machine and everything will become crystal clear :)
"""
# trivial no substitution early detection:
if '{{' not in line and '\\' not in line:
return line
st = NORM
out = ""
curvar = ""
for c in line:
if st is NORM:
if c == '{':
st = ONE
elif c == '\\':
st = LIT
else:
out += c
elif st is LIT:
out += c
st = NORM
elif st is ONE:
if c == '{':
st = TWO
elif c == '\\':
out += '{'
st = LIT
else:
out += '{' + c
st = NORM
elif st is TWO:
if c == '\\':
st = TLIT
elif c == '}':
st = TERM
else:
curvar += c
elif st is TLIT:
curvar += c
st = TWO
elif st is TERM:
if c == '}':
if curvar not in variables:
LOG.warning("Unknown variable %r detected, will just be replaced by an empty string", curvar)
else:
LOG.debug("Substitution of {{%s}} by %r", curvar, variables[curvar])
value = variables[curvar]
if isinstance(value, (float, int, long)):
value = str(value)
out += value
curvar = ''
st = NORM
elif c == '\\':
curvar += '}'
st = TLIT
else:
curvar += '}' + c
st = TWO
if st is not NORM:
LOG.warning("st is not NORM at end of line: " + line)
LOG.warning("returned substitution: " + out)
return out |
def blink1(switch_off=True, gamma=None, white_point=None):
"""Context manager which automatically shuts down the Blink(1)
after use.
:param switch_off: turn blink(1) off when existing context
:param gamma: set gamma curve (as tuple)
:param white_point: set white point (as tuple)
"""
b1 = Blink1(gamma=gamma, white_point=white_point)
yield b1
if switch_off:
b1.off()
b1.close() | Context manager which automatically shuts down the Blink(1)
after use.
:param switch_off: turn blink(1) off when existing context
:param gamma: set gamma curve (as tuple)
:param white_point: set white point (as tuple) | Below is the the instruction that describes the task:
### Input:
Context manager which automatically shuts down the Blink(1)
after use.
:param switch_off: turn blink(1) off when existing context
:param gamma: set gamma curve (as tuple)
:param white_point: set white point (as tuple)
### Response:
def blink1(switch_off=True, gamma=None, white_point=None):
"""Context manager which automatically shuts down the Blink(1)
after use.
:param switch_off: turn blink(1) off when existing context
:param gamma: set gamma curve (as tuple)
:param white_point: set white point (as tuple)
"""
b1 = Blink1(gamma=gamma, white_point=white_point)
yield b1
if switch_off:
b1.off()
b1.close() |
def _set_session_role(self, v, load=False):
"""
Setter method for session_role, mapped from YANG variable /mpls_state/rsvp/sessions/session_role (session-role)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_role is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_role() directly.
YANG Description: If this session role is ingress, egress or transit
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'session-role-ingress': {'value': 1}, u'session-role-transit': {'value': 3}, u'session-role-unspecified': {'value': 0}, u'session-role-egress': {'value': 2}},), is_leaf=True, yang_name="session-role", rest_name="session-role", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-role', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """session_role must be of a type compatible with session-role""",
'defined-type': "brocade-mpls-operational:session-role",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'session-role-ingress': {'value': 1}, u'session-role-transit': {'value': 3}, u'session-role-unspecified': {'value': 0}, u'session-role-egress': {'value': 2}},), is_leaf=True, yang_name="session-role", rest_name="session-role", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-role', is_config=False)""",
})
self.__session_role = t
if hasattr(self, '_set'):
self._set() | Setter method for session_role, mapped from YANG variable /mpls_state/rsvp/sessions/session_role (session-role)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_role is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_role() directly.
YANG Description: If this session role is ingress, egress or transit | Below is the the instruction that describes the task:
### Input:
Setter method for session_role, mapped from YANG variable /mpls_state/rsvp/sessions/session_role (session-role)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_role is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_role() directly.
YANG Description: If this session role is ingress, egress or transit
### Response:
def _set_session_role(self, v, load=False):
"""
Setter method for session_role, mapped from YANG variable /mpls_state/rsvp/sessions/session_role (session-role)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_role is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_role() directly.
YANG Description: If this session role is ingress, egress or transit
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'session-role-ingress': {'value': 1}, u'session-role-transit': {'value': 3}, u'session-role-unspecified': {'value': 0}, u'session-role-egress': {'value': 2}},), is_leaf=True, yang_name="session-role", rest_name="session-role", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-role', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """session_role must be of a type compatible with session-role""",
'defined-type': "brocade-mpls-operational:session-role",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'session-role-ingress': {'value': 1}, u'session-role-transit': {'value': 3}, u'session-role-unspecified': {'value': 0}, u'session-role-egress': {'value': 2}},), is_leaf=True, yang_name="session-role", rest_name="session-role", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-role', is_config=False)""",
})
self.__session_role = t
if hasattr(self, '_set'):
self._set() |
async def status(self, *args, **kwargs):
"""
Get task status
Get task status structure from `taskId`
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["status"], *args, **kwargs) | Get task status
Get task status structure from `taskId`
This method gives output: ``v1/task-status-response.json#``
This method is ``stable`` | Below is the the instruction that describes the task:
### Input:
Get task status
Get task status structure from `taskId`
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
### Response:
async def status(self, *args, **kwargs):
"""
Get task status
Get task status structure from `taskId`
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["status"], *args, **kwargs) |
def make_caches(driver, specs, catdir=None, cache_dir=None, storage_options={}):
"""
Creates Cache objects from the cache_specs provided in the catalog yaml file
Parameters
----------
driver: str
Name of the plugin that can load catalog entry
specs: list
Specification for caching the data source.
"""
if specs is None:
return []
return [registry.get(spec['type'], FileCache)(
driver, spec, catdir=catdir, cache_dir=cache_dir,
storage_options=storage_options)
for spec in specs] | Creates Cache objects from the cache_specs provided in the catalog yaml file
Parameters
----------
driver: str
Name of the plugin that can load catalog entry
specs: list
Specification for caching the data source. | Below is the the instruction that describes the task:
### Input:
Creates Cache objects from the cache_specs provided in the catalog yaml file
Parameters
----------
driver: str
Name of the plugin that can load catalog entry
specs: list
Specification for caching the data source.
### Response:
def make_caches(driver, specs, catdir=None, cache_dir=None, storage_options={}):
"""
Creates Cache objects from the cache_specs provided in the catalog yaml file
Parameters
----------
driver: str
Name of the plugin that can load catalog entry
specs: list
Specification for caching the data source.
"""
if specs is None:
return []
return [registry.get(spec['type'], FileCache)(
driver, spec, catdir=catdir, cache_dir=cache_dir,
storage_options=storage_options)
for spec in specs] |
def range(*args, prefix: str):
"""Returns a range of NamedQubits.
The range returned starts with the prefix, and followed by a qubit for
each number in the range, e.g.:
NamedQubit.range(3, prefix="a") -> ["a1", "a2", "a3]
NamedQubit.range(2, 4, prefix="a") -> ["a2", "a3]
Args:
*args: Args to be passed to Python's standard range function.
prefix: A prefix for constructed NamedQubits.
Returns:
A list of NamedQubits.
"""
return [NamedQubit(prefix + str(i)) for i in range(*args)] | Returns a range of NamedQubits.
The range returned starts with the prefix, and followed by a qubit for
each number in the range, e.g.:
NamedQubit.range(3, prefix="a") -> ["a1", "a2", "a3]
NamedQubit.range(2, 4, prefix="a") -> ["a2", "a3]
Args:
*args: Args to be passed to Python's standard range function.
prefix: A prefix for constructed NamedQubits.
Returns:
A list of NamedQubits. | Below is the the instruction that describes the task:
### Input:
Returns a range of NamedQubits.
The range returned starts with the prefix, and followed by a qubit for
each number in the range, e.g.:
NamedQubit.range(3, prefix="a") -> ["a1", "a2", "a3]
NamedQubit.range(2, 4, prefix="a") -> ["a2", "a3]
Args:
*args: Args to be passed to Python's standard range function.
prefix: A prefix for constructed NamedQubits.
Returns:
A list of NamedQubits.
### Response:
def range(*args, prefix: str):
"""Returns a range of NamedQubits.
The range returned starts with the prefix, and followed by a qubit for
each number in the range, e.g.:
NamedQubit.range(3, prefix="a") -> ["a1", "a2", "a3]
NamedQubit.range(2, 4, prefix="a") -> ["a2", "a3]
Args:
*args: Args to be passed to Python's standard range function.
prefix: A prefix for constructed NamedQubits.
Returns:
A list of NamedQubits.
"""
return [NamedQubit(prefix + str(i)) for i in range(*args)] |
def pipe_wait(popens):
"""
Given an array of Popen objects returned by the
pipe method, wait for all processes to terminate
and return the array with their return values.
Taken from http://www.enricozini.org/2009/debian/python-pipes/
"""
# Avoid mutating the passed copy
popens = copy.copy(popens)
results = [0] * len(popens)
while popens:
last = popens.pop(-1)
results[len(popens)] = last.wait()
return results | Given an array of Popen objects returned by the
pipe method, wait for all processes to terminate
and return the array with their return values.
Taken from http://www.enricozini.org/2009/debian/python-pipes/ | Below is the the instruction that describes the task:
### Input:
Given an array of Popen objects returned by the
pipe method, wait for all processes to terminate
and return the array with their return values.
Taken from http://www.enricozini.org/2009/debian/python-pipes/
### Response:
def pipe_wait(popens):
"""
Given an array of Popen objects returned by the
pipe method, wait for all processes to terminate
and return the array with their return values.
Taken from http://www.enricozini.org/2009/debian/python-pipes/
"""
# Avoid mutating the passed copy
popens = copy.copy(popens)
results = [0] * len(popens)
while popens:
last = popens.pop(-1)
results[len(popens)] = last.wait()
return results |
def _ReadDataTypeDefinitionWithMembers(
self, definitions_registry, definition_values,
data_type_definition_class, definition_name, supports_conditions=False):
"""Reads a data type definition with members.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supports_conditions (Optional[bool]): True if conditions are supported
by the data type definition.
Returns:
StringDefinition: string data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
members = definition_values.get('members', None)
if not members:
error_message = 'missing members'
raise errors.DefinitionReaderError(definition_name, error_message)
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_STORAGE_DATA_TYPE_WITH_MEMBERS)
definition_object = self._ReadDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_definition_values)
attributes = definition_values.get('attributes', None)
if attributes:
unsupported_attributes = set(attributes.keys()).difference(
self._SUPPORTED_ATTRIBUTES_STORAGE_DATA_TYPE)
if unsupported_attributes:
error_message = 'unsupported attributes: {0:s}'.format(
', '.join(unsupported_attributes))
raise errors.DefinitionReaderError(definition_name, error_message)
byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE)
if byte_order not in definitions.BYTE_ORDERS:
error_message = 'unsupported byte-order attribute: {0!s}'.format(
byte_order)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.byte_order = byte_order
for member in members:
section = member.get('section', None)
if section:
member_section_definition = data_types.MemberSectionDefinition(section)
definition_object.AddSectionDefinition(member_section_definition)
else:
member_data_type_definition = self._ReadMemberDataTypeDefinitionMember(
definitions_registry, member, definition_object.name,
supports_conditions=supports_conditions)
definition_object.AddMemberDefinition(member_data_type_definition)
return definition_object | Reads a data type definition with members.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supports_conditions (Optional[bool]): True if conditions are supported
by the data type definition.
Returns:
StringDefinition: string data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect. | Below is the the instruction that describes the task:
### Input:
Reads a data type definition with members.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supports_conditions (Optional[bool]): True if conditions are supported
by the data type definition.
Returns:
StringDefinition: string data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
### Response:
def _ReadDataTypeDefinitionWithMembers(
self, definitions_registry, definition_values,
data_type_definition_class, definition_name, supports_conditions=False):
"""Reads a data type definition with members.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supports_conditions (Optional[bool]): True if conditions are supported
by the data type definition.
Returns:
StringDefinition: string data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
members = definition_values.get('members', None)
if not members:
error_message = 'missing members'
raise errors.DefinitionReaderError(definition_name, error_message)
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_STORAGE_DATA_TYPE_WITH_MEMBERS)
definition_object = self._ReadDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_definition_values)
attributes = definition_values.get('attributes', None)
if attributes:
unsupported_attributes = set(attributes.keys()).difference(
self._SUPPORTED_ATTRIBUTES_STORAGE_DATA_TYPE)
if unsupported_attributes:
error_message = 'unsupported attributes: {0:s}'.format(
', '.join(unsupported_attributes))
raise errors.DefinitionReaderError(definition_name, error_message)
byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE)
if byte_order not in definitions.BYTE_ORDERS:
error_message = 'unsupported byte-order attribute: {0!s}'.format(
byte_order)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.byte_order = byte_order
for member in members:
section = member.get('section', None)
if section:
member_section_definition = data_types.MemberSectionDefinition(section)
definition_object.AddSectionDefinition(member_section_definition)
else:
member_data_type_definition = self._ReadMemberDataTypeDefinitionMember(
definitions_registry, member, definition_object.name,
supports_conditions=supports_conditions)
definition_object.AddMemberDefinition(member_data_type_definition)
return definition_object |
def create_user(username, password, permissions, users=None):
'''
Create user accounts
CLI Example:
.. code-block:: bash
salt dell drac.create_user [USERNAME] [PASSWORD] [PRIVILEGES]
salt dell drac.create_user diana secret login,test_alerts,clear_logs
DRAC Privileges
* login : Login to iDRAC
* drac : Configure iDRAC
* user_management : Configure Users
* clear_logs : Clear Logs
* server_control_commands : Execute Server Control Commands
* console_redirection : Access Console Redirection
* virtual_media : Access Virtual Media
* test_alerts : Test Alerts
* debug_commands : Execute Debug Commands
'''
_uids = set()
if users is None:
users = list_users()
if username in users:
log.warning('\'%s\' already exists', username)
return False
for idx in six.iterkeys(users):
_uids.add(users[idx]['index'])
uid = sorted(list(set(range(2, 12)) - _uids), reverse=True).pop()
# Create user accountvfirst
if not __execute_cmd('config -g cfgUserAdmin -o \
cfgUserAdminUserName -i {0} {1}'.format(uid, username)):
delete_user(username, uid)
return False
# Configure users permissions
if not set_permissions(username, permissions, uid):
log.warning('unable to set user permissions')
delete_user(username, uid)
return False
# Configure users password
if not change_password(username, password, uid):
log.warning('unable to set user password')
delete_user(username, uid)
return False
# Enable users admin
if not __execute_cmd('config -g cfgUserAdmin -o \
cfgUserAdminEnable -i {0} 1'.format(uid)):
delete_user(username, uid)
return False
return True | Create user accounts
CLI Example:
.. code-block:: bash
salt dell drac.create_user [USERNAME] [PASSWORD] [PRIVILEGES]
salt dell drac.create_user diana secret login,test_alerts,clear_logs
DRAC Privileges
* login : Login to iDRAC
* drac : Configure iDRAC
* user_management : Configure Users
* clear_logs : Clear Logs
* server_control_commands : Execute Server Control Commands
* console_redirection : Access Console Redirection
* virtual_media : Access Virtual Media
* test_alerts : Test Alerts
* debug_commands : Execute Debug Commands | Below is the the instruction that describes the task:
### Input:
Create user accounts
CLI Example:
.. code-block:: bash
salt dell drac.create_user [USERNAME] [PASSWORD] [PRIVILEGES]
salt dell drac.create_user diana secret login,test_alerts,clear_logs
DRAC Privileges
* login : Login to iDRAC
* drac : Configure iDRAC
* user_management : Configure Users
* clear_logs : Clear Logs
* server_control_commands : Execute Server Control Commands
* console_redirection : Access Console Redirection
* virtual_media : Access Virtual Media
* test_alerts : Test Alerts
* debug_commands : Execute Debug Commands
### Response:
def create_user(username, password, permissions, users=None):
'''
Create user accounts
CLI Example:
.. code-block:: bash
salt dell drac.create_user [USERNAME] [PASSWORD] [PRIVILEGES]
salt dell drac.create_user diana secret login,test_alerts,clear_logs
DRAC Privileges
* login : Login to iDRAC
* drac : Configure iDRAC
* user_management : Configure Users
* clear_logs : Clear Logs
* server_control_commands : Execute Server Control Commands
* console_redirection : Access Console Redirection
* virtual_media : Access Virtual Media
* test_alerts : Test Alerts
* debug_commands : Execute Debug Commands
'''
_uids = set()
if users is None:
users = list_users()
if username in users:
log.warning('\'%s\' already exists', username)
return False
for idx in six.iterkeys(users):
_uids.add(users[idx]['index'])
uid = sorted(list(set(range(2, 12)) - _uids), reverse=True).pop()
# Create user accountvfirst
if not __execute_cmd('config -g cfgUserAdmin -o \
cfgUserAdminUserName -i {0} {1}'.format(uid, username)):
delete_user(username, uid)
return False
# Configure users permissions
if not set_permissions(username, permissions, uid):
log.warning('unable to set user permissions')
delete_user(username, uid)
return False
# Configure users password
if not change_password(username, password, uid):
log.warning('unable to set user password')
delete_user(username, uid)
return False
# Enable users admin
if not __execute_cmd('config -g cfgUserAdmin -o \
cfgUserAdminEnable -i {0} 1'.format(uid)):
delete_user(username, uid)
return False
return True |
def list(self, filter_title=None, filter_ids=None, page=None):
"""
:type filter_title: str
:param filter_title: Filter by dashboard title
:type filter_ids: list of ints
:param filter_ids: Filter by dashboard ids
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'page' key
if there are paginated results
::
{
"dashboards": [
{
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "next"
}
}
}
"""
filters = [
'filter[title]={0}'.format(filter_title) if filter_title else None,
'filter[ids]={0}'.format(','.join([str(dash_id) for dash_id in filter_ids])) if filter_ids else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}dashboards.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) | :type filter_title: str
:param filter_title: Filter by dashboard title
:type filter_ids: list of ints
:param filter_ids: Filter by dashboard ids
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'page' key
if there are paginated results
::
{
"dashboards": [
{
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "next"
}
}
} | Below is the the instruction that describes the task:
### Input:
:type filter_title: str
:param filter_title: Filter by dashboard title
:type filter_ids: list of ints
:param filter_ids: Filter by dashboard ids
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'page' key
if there are paginated results
::
{
"dashboards": [
{
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "next"
}
}
}
### Response:
def list(self, filter_title=None, filter_ids=None, page=None):
"""
:type filter_title: str
:param filter_title: Filter by dashboard title
:type filter_ids: list of ints
:param filter_ids: Filter by dashboard ids
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'page' key
if there are paginated results
::
{
"dashboards": [
{
"id": "integer",
"title": "string",
"description": "string",
"icon": "string",
"created_at": "time",
"updated_at": "time",
"visibility": "string",
"editable": "string",
"ui_url": "string",
"api_url": "string",
"owner_email": "string",
"filter": {
"event_types": ["string"],
"attributes": ["string"]
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/dashboards.json?page=1&per_page=100",
"rel": "next"
}
}
}
"""
filters = [
'filter[title]={0}'.format(filter_title) if filter_title else None,
'filter[ids]={0}'.format(','.join([str(dash_id) for dash_id in filter_ids])) if filter_ids else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}dashboards.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
) |
def _characteristics_discovered(self, service):
"""Called when GATT characteristics have been discovered."""
# Characteristics for the specified service were discovered. Update
# set of discovered services and signal when all have been discovered.
self._discovered_services.add(service)
if self._discovered_services >= set(self._peripheral.services()):
# Found all the services characteristics, finally time to fire the
# service discovery complete event.
self._discovered.set() | Called when GATT characteristics have been discovered. | Below is the the instruction that describes the task:
### Input:
Called when GATT characteristics have been discovered.
### Response:
def _characteristics_discovered(self, service):
"""Called when GATT characteristics have been discovered."""
# Characteristics for the specified service were discovered. Update
# set of discovered services and signal when all have been discovered.
self._discovered_services.add(service)
if self._discovered_services >= set(self._peripheral.services()):
# Found all the services characteristics, finally time to fire the
# service discovery complete event.
self._discovered.set() |
def update(self, db):
"""update(db)
Update this database with all resources entries in the resource
database DB.
"""
self.lock.acquire()
update_db(self.db, db.db)
self.lock.release() | update(db)
Update this database with all resources entries in the resource
database DB. | Below is the the instruction that describes the task:
### Input:
update(db)
Update this database with all resources entries in the resource
database DB.
### Response:
def update(self, db):
"""update(db)
Update this database with all resources entries in the resource
database DB.
"""
self.lock.acquire()
update_db(self.db, db.db)
self.lock.release() |
def serialize(exc):
""" Serialize `self.exc` into a data dictionary representing it.
"""
return {
'exc_type': type(exc).__name__,
'exc_path': get_module_path(type(exc)),
'exc_args': list(map(safe_for_serialization, exc.args)),
'value': safe_for_serialization(exc),
} | Serialize `self.exc` into a data dictionary representing it. | Below is the the instruction that describes the task:
### Input:
Serialize `self.exc` into a data dictionary representing it.
### Response:
def serialize(exc):
""" Serialize `self.exc` into a data dictionary representing it.
"""
return {
'exc_type': type(exc).__name__,
'exc_path': get_module_path(type(exc)),
'exc_args': list(map(safe_for_serialization, exc.args)),
'value': safe_for_serialization(exc),
} |
def tf_loss_per_instance(self, states, internals, actions, terminal, reward,
next_states, next_internals, update, reference=None):
"""
Creates the TensorFlow operations for calculating the loss per batch instance.
Args:
states: Dict of state tensors.
internals: Dict of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss per instance tensor.
"""
raise NotImplementedError | Creates the TensorFlow operations for calculating the loss per batch instance.
Args:
states: Dict of state tensors.
internals: Dict of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss per instance tensor. | Below is the the instruction that describes the task:
### Input:
Creates the TensorFlow operations for calculating the loss per batch instance.
Args:
states: Dict of state tensors.
internals: Dict of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss per instance tensor.
### Response:
def tf_loss_per_instance(self, states, internals, actions, terminal, reward,
next_states, next_internals, update, reference=None):
"""
Creates the TensorFlow operations for calculating the loss per batch instance.
Args:
states: Dict of state tensors.
internals: Dict of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss per instance tensor.
"""
raise NotImplementedError |
def assign_component_value(cli, transform_path, component_type, name, value):
"""
Requests status on whether a scene is loaded or not.
:param cli:
:param transform_path: The path of the transform where the component resides
:param component_type: The C# type name of the component GetComponent(type)
:param name: The field or property name.
:param value: The value to assign (String | Number | Boolean)
:return: bool
"""
message_payload = {
"transform_path": transform_path,
"component_type": component_type,
"name": name,
"value": value
}
msg = message.Message("assign.unity.component.value", message_payload)
cli.send_message(msg)
response = cli.read_message()
verify_response(response) | Requests status on whether a scene is loaded or not.
:param cli:
:param transform_path: The path of the transform where the component resides
:param component_type: The C# type name of the component GetComponent(type)
:param name: The field or property name.
:param value: The value to assign (String | Number | Boolean)
:return: bool | Below is the the instruction that describes the task:
### Input:
Requests status on whether a scene is loaded or not.
:param cli:
:param transform_path: The path of the transform where the component resides
:param component_type: The C# type name of the component GetComponent(type)
:param name: The field or property name.
:param value: The value to assign (String | Number | Boolean)
:return: bool
### Response:
def assign_component_value(cli, transform_path, component_type, name, value):
"""
Requests status on whether a scene is loaded or not.
:param cli:
:param transform_path: The path of the transform where the component resides
:param component_type: The C# type name of the component GetComponent(type)
:param name: The field or property name.
:param value: The value to assign (String | Number | Boolean)
:return: bool
"""
message_payload = {
"transform_path": transform_path,
"component_type": component_type,
"name": name,
"value": value
}
msg = message.Message("assign.unity.component.value", message_payload)
cli.send_message(msg)
response = cli.read_message()
verify_response(response) |
def unregister_file(path, pkg=None, conn=None): # pylint: disable=W0612
'''
Unregister a file from the package database
'''
close = False
if conn is None:
close = True
conn = init()
conn.execute('DELETE FROM files WHERE path=?', (path, ))
if close:
conn.close() | Unregister a file from the package database | Below is the the instruction that describes the task:
### Input:
Unregister a file from the package database
### Response:
def unregister_file(path, pkg=None, conn=None): # pylint: disable=W0612
'''
Unregister a file from the package database
'''
close = False
if conn is None:
close = True
conn = init()
conn.execute('DELETE FROM files WHERE path=?', (path, ))
if close:
conn.close() |
def initialize(self):
"""See :meth:`pymlab.sensors.Device.initialize` for more information.
Calls `initialize()` on all devices connected to the bus.
"""
Device.initialize(self)
for child in iter(self.children.values()):
child.initialize() | See :meth:`pymlab.sensors.Device.initialize` for more information.
Calls `initialize()` on all devices connected to the bus. | Below is the the instruction that describes the task:
### Input:
See :meth:`pymlab.sensors.Device.initialize` for more information.
Calls `initialize()` on all devices connected to the bus.
### Response:
def initialize(self):
"""See :meth:`pymlab.sensors.Device.initialize` for more information.
Calls `initialize()` on all devices connected to the bus.
"""
Device.initialize(self)
for child in iter(self.children.values()):
child.initialize() |
def team_profiles(self, team):
"""
Get team's social media profiles linked on their TBA page.
:param team: Team to get data on.
:return: List of Profile objects.
"""
return [Profile(raw) for raw in self._get('team/%s/social_media' % self.team_key(team))] | Get team's social media profiles linked on their TBA page.
:param team: Team to get data on.
:return: List of Profile objects. | Below is the the instruction that describes the task:
### Input:
Get team's social media profiles linked on their TBA page.
:param team: Team to get data on.
:return: List of Profile objects.
### Response:
def team_profiles(self, team):
"""
Get team's social media profiles linked on their TBA page.
:param team: Team to get data on.
:return: List of Profile objects.
"""
return [Profile(raw) for raw in self._get('team/%s/social_media' % self.team_key(team))] |
def stylize(text, styles, reset=True):
"""conveniently styles your text as and resets ANSI codes at its end."""
terminator = attr("reset") if reset else ""
return "{}{}{}".format("".join(styles), text, terminator) | conveniently styles your text as and resets ANSI codes at its end. | Below is the the instruction that describes the task:
### Input:
conveniently styles your text as and resets ANSI codes at its end.
### Response:
def stylize(text, styles, reset=True):
"""conveniently styles your text as and resets ANSI codes at its end."""
terminator = attr("reset") if reset else ""
return "{}{}{}".format("".join(styles), text, terminator) |
def _status_query(query, hostname, enumerate=None, service=None):
'''
Send query along to Nagios.
'''
config = _config()
data = None
params = {
'hostname': hostname,
'query': query,
}
ret = {
'result': False
}
if enumerate:
params['formatoptions'] = 'enumerate'
if service:
params['servicedescription'] = service
if config['username'] and config['password'] is not None:
auth = (config['username'], config['password'],)
else:
auth = None
try:
result = salt.utils.http.query(
config['url'],
method='GET',
params=params,
decode=True,
data=data,
text=True,
status=True,
header_dict={},
auth=auth,
backend='requests',
opts=__opts__,
)
except ValueError:
ret['error'] = 'Please ensure Nagios is running.'
ret['result'] = False
return ret
if result.get('status', None) == salt.ext.six.moves.http_client.OK:
try:
ret['json_data'] = result['dict']
ret['result'] = True
except ValueError:
ret['error'] = 'Please ensure Nagios is running.'
elif result.get('status', None) == salt.ext.six.moves.http_client.UNAUTHORIZED:
ret['error'] = 'Authentication failed. Please check the configuration.'
elif result.get('status', None) == salt.ext.six.moves.http_client.NOT_FOUND:
ret['error'] = 'URL {0} was not found.'.format(config['url'])
else:
ret['error'] = 'Results: {0}'.format(result.text)
return ret | Send query along to Nagios. | Below is the the instruction that describes the task:
### Input:
Send query along to Nagios.
### Response:
def _status_query(query, hostname, enumerate=None, service=None):
'''
Send query along to Nagios.
'''
config = _config()
data = None
params = {
'hostname': hostname,
'query': query,
}
ret = {
'result': False
}
if enumerate:
params['formatoptions'] = 'enumerate'
if service:
params['servicedescription'] = service
if config['username'] and config['password'] is not None:
auth = (config['username'], config['password'],)
else:
auth = None
try:
result = salt.utils.http.query(
config['url'],
method='GET',
params=params,
decode=True,
data=data,
text=True,
status=True,
header_dict={},
auth=auth,
backend='requests',
opts=__opts__,
)
except ValueError:
ret['error'] = 'Please ensure Nagios is running.'
ret['result'] = False
return ret
if result.get('status', None) == salt.ext.six.moves.http_client.OK:
try:
ret['json_data'] = result['dict']
ret['result'] = True
except ValueError:
ret['error'] = 'Please ensure Nagios is running.'
elif result.get('status', None) == salt.ext.six.moves.http_client.UNAUTHORIZED:
ret['error'] = 'Authentication failed. Please check the configuration.'
elif result.get('status', None) == salt.ext.six.moves.http_client.NOT_FOUND:
ret['error'] = 'URL {0} was not found.'.format(config['url'])
else:
ret['error'] = 'Results: {0}'.format(result.text)
return ret |
def _get_relationships(model):
"""
Gets the necessary relationships for the resource
by inspecting the sqlalchemy model for relationships.
:param DeclarativeMeta model: The SQLAlchemy ORM model.
:return: A tuple of Relationship/ListRelationship instances
corresponding to the relationships on the Model.
:rtype: tuple
"""
relationships = []
for name, relationship in inspect(model).relationships.items():
class_ = relationship.mapper.class_
if relationship.uselist:
rel = ListRelationship(name, relation=class_.__name__)
else:
rel = Relationship(name, relation=class_.__name__)
relationships.append(rel)
return tuple(relationships) | Gets the necessary relationships for the resource
by inspecting the sqlalchemy model for relationships.
:param DeclarativeMeta model: The SQLAlchemy ORM model.
:return: A tuple of Relationship/ListRelationship instances
corresponding to the relationships on the Model.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Gets the necessary relationships for the resource
by inspecting the sqlalchemy model for relationships.
:param DeclarativeMeta model: The SQLAlchemy ORM model.
:return: A tuple of Relationship/ListRelationship instances
corresponding to the relationships on the Model.
:rtype: tuple
### Response:
def _get_relationships(model):
"""
Gets the necessary relationships for the resource
by inspecting the sqlalchemy model for relationships.
:param DeclarativeMeta model: The SQLAlchemy ORM model.
:return: A tuple of Relationship/ListRelationship instances
corresponding to the relationships on the Model.
:rtype: tuple
"""
relationships = []
for name, relationship in inspect(model).relationships.items():
class_ = relationship.mapper.class_
if relationship.uselist:
rel = ListRelationship(name, relation=class_.__name__)
else:
rel = Relationship(name, relation=class_.__name__)
relationships.append(rel)
return tuple(relationships) |
async def revoke(self):
"""Removes all access rights for this user from the controller.
"""
await self.controller.revoke(self.username)
self._user_info.access = '' | Removes all access rights for this user from the controller. | Below is the the instruction that describes the task:
### Input:
Removes all access rights for this user from the controller.
### Response:
async def revoke(self):
"""Removes all access rights for this user from the controller.
"""
await self.controller.revoke(self.username)
self._user_info.access = '' |
def x10_housecode(self):
"""Emit the X10 house code."""
housecode = None
if self.is_x10:
housecode = insteonplm.utils.byte_to_housecode(self.addr[1])
return housecode | Emit the X10 house code. | Below is the the instruction that describes the task:
### Input:
Emit the X10 house code.
### Response:
def x10_housecode(self):
"""Emit the X10 house code."""
housecode = None
if self.is_x10:
housecode = insteonplm.utils.byte_to_housecode(self.addr[1])
return housecode |
def add_resource(self, transaction, parent_resource, lp):
"""
Render a POST on a new resource.
:param transaction: the transaction
:param parent_resource: the parent of the resource
:param lp: the location_path attribute of the resource
:return: the response
"""
method = getattr(parent_resource, "render_POST", None)
try:
resource = method(request=transaction.request)
except NotImplementedError:
try:
method = getattr(parent_resource, "render_POST_advanced", None)
ret = method(request=transaction.request, response=transaction.response)
if isinstance(ret, tuple) and len(ret) == 2 and isinstance(ret[1], Response) \
and isinstance(ret[0], Resource):
# Advanced handler
resource, response = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if transaction.response.code is None:
transaction.response.code = defines.Codes.CREATED.number
return transaction
elif isinstance(ret, tuple) and len(ret) == 3 and isinstance(ret[1], Response) \
and isinstance(ret[0], Resource):
# Advanced handler separate
resource, response, callback = ret
ret = self._handle_separate_advanced(transaction, callback)
if not isinstance(ret, tuple) or \
not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)): # pragma: no cover
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
resource, response = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if transaction.response.code is None:
transaction.response.code = defines.Codes.CREATED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif isinstance(resource, tuple) and len(resource) == 2:
resource, callback = resource
resource = self._handle_separate(transaction, callback)
if not isinstance(resource, Resource): # pragma: no cover
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else: # pragma: no cover
# Handle error
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
resource.path = lp
if resource.etag is not None:
transaction.response.etag = resource.etag
transaction.response.location_path = resource.path
if resource.location_query is not None and len(resource.location_query) > 0:
transaction.response.location_query = resource.location_query
transaction.response.code = defines.Codes.CREATED.number
transaction.response.payload = None
assert (isinstance(resource, Resource))
if resource.etag is not None:
transaction.response.etag = resource.etag
if resource.max_age is not None:
transaction.response.max_age = resource.max_age
resource.changed = True
transaction.resource = resource
self._parent.root[resource.path] = resource
return transaction | Render a POST on a new resource.
:param transaction: the transaction
:param parent_resource: the parent of the resource
:param lp: the location_path attribute of the resource
:return: the response | Below is the the instruction that describes the task:
### Input:
Render a POST on a new resource.
:param transaction: the transaction
:param parent_resource: the parent of the resource
:param lp: the location_path attribute of the resource
:return: the response
### Response:
def add_resource(self, transaction, parent_resource, lp):
"""
Render a POST on a new resource.
:param transaction: the transaction
:param parent_resource: the parent of the resource
:param lp: the location_path attribute of the resource
:return: the response
"""
method = getattr(parent_resource, "render_POST", None)
try:
resource = method(request=transaction.request)
except NotImplementedError:
try:
method = getattr(parent_resource, "render_POST_advanced", None)
ret = method(request=transaction.request, response=transaction.response)
if isinstance(ret, tuple) and len(ret) == 2 and isinstance(ret[1], Response) \
and isinstance(ret[0], Resource):
# Advanced handler
resource, response = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if transaction.response.code is None:
transaction.response.code = defines.Codes.CREATED.number
return transaction
elif isinstance(ret, tuple) and len(ret) == 3 and isinstance(ret[1], Response) \
and isinstance(ret[0], Resource):
# Advanced handler separate
resource, response, callback = ret
ret = self._handle_separate_advanced(transaction, callback)
if not isinstance(ret, tuple) or \
not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)): # pragma: no cover
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
resource, response = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if transaction.response.code is None:
transaction.response.code = defines.Codes.CREATED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif isinstance(resource, tuple) and len(resource) == 2:
resource, callback = resource
resource = self._handle_separate(transaction, callback)
if not isinstance(resource, Resource): # pragma: no cover
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else: # pragma: no cover
# Handle error
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
resource.path = lp
if resource.etag is not None:
transaction.response.etag = resource.etag
transaction.response.location_path = resource.path
if resource.location_query is not None and len(resource.location_query) > 0:
transaction.response.location_query = resource.location_query
transaction.response.code = defines.Codes.CREATED.number
transaction.response.payload = None
assert (isinstance(resource, Resource))
if resource.etag is not None:
transaction.response.etag = resource.etag
if resource.max_age is not None:
transaction.response.max_age = resource.max_age
resource.changed = True
transaction.resource = resource
self._parent.root[resource.path] = resource
return transaction |
def _parse(self, stream, context, path):
"""Parse stream to find a given byte string."""
start = stream.tell()
read_bytes = ""
if self.max_length:
read_bytes = stream.read(self.max_length)
else:
read_bytes = stream.read()
skip = read_bytes.find(self.find) + len(self.find)
stream.seek(start + skip)
return skip | Parse stream to find a given byte string. | Below is the the instruction that describes the task:
### Input:
Parse stream to find a given byte string.
### Response:
def _parse(self, stream, context, path):
"""Parse stream to find a given byte string."""
start = stream.tell()
read_bytes = ""
if self.max_length:
read_bytes = stream.read(self.max_length)
else:
read_bytes = stream.read()
skip = read_bytes.find(self.find) + len(self.find)
stream.seek(start + skip)
return skip |
def escape( x, lb=False ):
"""
Ensure a string does not contain HTML-reserved characters (including
double quotes)
Optionally also insert a linebreak if the string is too long
"""
# Insert a linebreak? Roughly around the middle of the string,
if lb:
l = len(x)
if l >= 10:
l >>= 1 # middle of the string
s1 = x.find( ' ', l ) # first ws to the right
s2 = x.rfind( ' ', 0, l ) # first ws to the left
if s2 > 0:
s = s2 if s1<0 or l-s1 > s2-l else s1
x = x[:s] + '\\n' + x[s+1:]
elif s1 > 0:
x = x[:s1] + '\\n' + x[s1+1:]
# Escape HTML reserved characters
return x.replace("&", "&").replace("<", "<").replace(">", ">").replace('"', """) | Ensure a string does not contain HTML-reserved characters (including
double quotes)
Optionally also insert a linebreak if the string is too long | Below is the the instruction that describes the task:
### Input:
Ensure a string does not contain HTML-reserved characters (including
double quotes)
Optionally also insert a linebreak if the string is too long
### Response:
def escape( x, lb=False ):
"""
Ensure a string does not contain HTML-reserved characters (including
double quotes)
Optionally also insert a linebreak if the string is too long
"""
# Insert a linebreak? Roughly around the middle of the string,
if lb:
l = len(x)
if l >= 10:
l >>= 1 # middle of the string
s1 = x.find( ' ', l ) # first ws to the right
s2 = x.rfind( ' ', 0, l ) # first ws to the left
if s2 > 0:
s = s2 if s1<0 or l-s1 > s2-l else s1
x = x[:s] + '\\n' + x[s+1:]
elif s1 > 0:
x = x[:s1] + '\\n' + x[s1+1:]
# Escape HTML reserved characters
return x.replace("&", "&").replace("<", "<").replace(">", ">").replace('"', """) |
def competition_list_files(self, competition):
""" list files for competition
Parameters
==========
competition: the name of the competition
"""
competition_list_files_result = self.process_response(
self.competitions_data_list_files_with_http_info(id=competition))
return [File(f) for f in competition_list_files_result] | list files for competition
Parameters
==========
competition: the name of the competition | Below is the the instruction that describes the task:
### Input:
list files for competition
Parameters
==========
competition: the name of the competition
### Response:
def competition_list_files(self, competition):
""" list files for competition
Parameters
==========
competition: the name of the competition
"""
competition_list_files_result = self.process_response(
self.competitions_data_list_files_with_http_info(id=competition))
return [File(f) for f in competition_list_files_result] |
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_REQUIRED", account_settings.EMAIL_REQUIRED) | The user is required to hand over an e-mail address when signing up | Below is the the instruction that describes the task:
### Input:
The user is required to hand over an e-mail address when signing up
### Response:
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_REQUIRED", account_settings.EMAIL_REQUIRED) |
def is_metaseries(self):
"""Page contains MDS MetaSeries metadata in ImageDescription tag."""
if self.index > 1 or self.software != 'MetaSeries':
return False
d = self.description
return d.startswith('<MetaData>') and d.endswith('</MetaData>') | Page contains MDS MetaSeries metadata in ImageDescription tag. | Below is the the instruction that describes the task:
### Input:
Page contains MDS MetaSeries metadata in ImageDescription tag.
### Response:
def is_metaseries(self):
"""Page contains MDS MetaSeries metadata in ImageDescription tag."""
if self.index > 1 or self.software != 'MetaSeries':
return False
d = self.description
return d.startswith('<MetaData>') and d.endswith('</MetaData>') |
def encode_scopes(scopes, use_quote=False):
"""
Creates a string out of a list of scopes.
:param scopes: A list of scopes
:param use_quote: Boolean flag indicating whether the string should be quoted
:return: Scopes as a string
"""
scopes_as_string = Scope.separator.join(scopes)
if use_quote:
return quote(scopes_as_string)
return scopes_as_string | Creates a string out of a list of scopes.
:param scopes: A list of scopes
:param use_quote: Boolean flag indicating whether the string should be quoted
:return: Scopes as a string | Below is the the instruction that describes the task:
### Input:
Creates a string out of a list of scopes.
:param scopes: A list of scopes
:param use_quote: Boolean flag indicating whether the string should be quoted
:return: Scopes as a string
### Response:
def encode_scopes(scopes, use_quote=False):
"""
Creates a string out of a list of scopes.
:param scopes: A list of scopes
:param use_quote: Boolean flag indicating whether the string should be quoted
:return: Scopes as a string
"""
scopes_as_string = Scope.separator.join(scopes)
if use_quote:
return quote(scopes_as_string)
return scopes_as_string |
def load():
'''
Return all of the data in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.load
'''
serial = salt.payload.Serial(__opts__)
try:
datastore_path = os.path.join(__opts__['cachedir'], 'datastore')
with salt.utils.files.fopen(datastore_path, 'rb') as rfh:
return serial.loads(rfh.read())
except (IOError, OSError, NameError):
return {} | Return all of the data in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.load | Below is the the instruction that describes the task:
### Input:
Return all of the data in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.load
### Response:
def load():
'''
Return all of the data in the minion datastore
CLI Example:
.. code-block:: bash
salt '*' data.load
'''
serial = salt.payload.Serial(__opts__)
try:
datastore_path = os.path.join(__opts__['cachedir'], 'datastore')
with salt.utils.files.fopen(datastore_path, 'rb') as rfh:
return serial.loads(rfh.read())
except (IOError, OSError, NameError):
return {} |
def _from_dict(cls, _dict):
"""Initialize a ValueCollection object from a json dictionary."""
args = {}
if 'values' in _dict:
args['values'] = [
Value._from_dict(x) for x in (_dict.get('values'))
]
else:
raise ValueError(
'Required property \'values\' not present in ValueCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in ValueCollection JSON'
)
return cls(**args) | Initialize a ValueCollection object from a json dictionary. | Below is the the instruction that describes the task:
### Input:
Initialize a ValueCollection object from a json dictionary.
### Response:
def _from_dict(cls, _dict):
"""Initialize a ValueCollection object from a json dictionary."""
args = {}
if 'values' in _dict:
args['values'] = [
Value._from_dict(x) for x in (_dict.get('values'))
]
else:
raise ValueError(
'Required property \'values\' not present in ValueCollection JSON'
)
if 'pagination' in _dict:
args['pagination'] = Pagination._from_dict(_dict.get('pagination'))
else:
raise ValueError(
'Required property \'pagination\' not present in ValueCollection JSON'
)
return cls(**args) |
def load_mnist():
'''Load the MNIST digits dataset.'''
mnist = skdata.mnist.dataset.MNIST()
mnist.meta # trigger download if needed.
def arr(n, dtype):
arr = mnist.arrays[n]
return arr.reshape((len(arr), -1)).astype(dtype)
train_images = arr('train_images', np.float32) / 128 - 1
train_labels = arr('train_labels', np.uint8)
return ((train_images[:50000], train_labels[:50000, 0]),
(train_images[50000:], train_labels[50000:, 0])) | Load the MNIST digits dataset. | Below is the the instruction that describes the task:
### Input:
Load the MNIST digits dataset.
### Response:
def load_mnist():
'''Load the MNIST digits dataset.'''
mnist = skdata.mnist.dataset.MNIST()
mnist.meta # trigger download if needed.
def arr(n, dtype):
arr = mnist.arrays[n]
return arr.reshape((len(arr), -1)).astype(dtype)
train_images = arr('train_images', np.float32) / 128 - 1
train_labels = arr('train_labels', np.uint8)
return ((train_images[:50000], train_labels[:50000, 0]),
(train_images[50000:], train_labels[50000:, 0])) |
def about_axis(cls, center, angle, axis, invert=False):
"""Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False]
"""
return Translation(center) * \
Rotation.from_properties(angle, axis, invert) * \
Translation(-center) | Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False] | Below is the the instruction that describes the task:
### Input:
Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False]
### Response:
def about_axis(cls, center, angle, axis, invert=False):
"""Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False]
"""
return Translation(center) * \
Rotation.from_properties(angle, axis, invert) * \
Translation(-center) |
def array(self, name):
"""
Returns the array of tables with the given name.
"""
if name in self._navigable:
if isinstance(self._navigable[name], (list, tuple)):
return self[name]
else:
raise NoArrayFoundError
else:
return ArrayOfTables(toml_file=self, name=name) | Returns the array of tables with the given name. | Below is the the instruction that describes the task:
### Input:
Returns the array of tables with the given name.
### Response:
def array(self, name):
"""
Returns the array of tables with the given name.
"""
if name in self._navigable:
if isinstance(self._navigable[name], (list, tuple)):
return self[name]
else:
raise NoArrayFoundError
else:
return ArrayOfTables(toml_file=self, name=name) |
def try_eval_metadata(metadata, name):
"""Evaluate given metadata to a python object, if possible"""
value = metadata[name]
if not isinstance(value, (str, unicode)):
return
if (value.startswith('"') and value.endswith('"')) or (value.startswith("'") and value.endswith("'")):
if name in ['active', 'magic_args', 'language']:
metadata[name] = value[1:-1]
return
if value.startswith('c(') and value.endswith(')'):
value = '[' + value[2:-1] + ']'
elif value.startswith('list(') and value.endswith(')'):
value = '[' + value[5:-1] + ']'
try:
metadata[name] = ast.literal_eval(value)
except (SyntaxError, ValueError):
return | Evaluate given metadata to a python object, if possible | Below is the the instruction that describes the task:
### Input:
Evaluate given metadata to a python object, if possible
### Response:
def try_eval_metadata(metadata, name):
"""Evaluate given metadata to a python object, if possible"""
value = metadata[name]
if not isinstance(value, (str, unicode)):
return
if (value.startswith('"') and value.endswith('"')) or (value.startswith("'") and value.endswith("'")):
if name in ['active', 'magic_args', 'language']:
metadata[name] = value[1:-1]
return
if value.startswith('c(') and value.endswith(')'):
value = '[' + value[2:-1] + ']'
elif value.startswith('list(') and value.endswith(')'):
value = '[' + value[5:-1] + ']'
try:
metadata[name] = ast.literal_eval(value)
except (SyntaxError, ValueError):
return |
def min_percent(self, value: float) -> 'Size':
"""Set the minimum percentage of free space to use."""
raise_not_number(value)
self.minimum = '{}%'.format(value)
return self | Set the minimum percentage of free space to use. | Below is the the instruction that describes the task:
### Input:
Set the minimum percentage of free space to use.
### Response:
def min_percent(self, value: float) -> 'Size':
"""Set the minimum percentage of free space to use."""
raise_not_number(value)
self.minimum = '{}%'.format(value)
return self |
def sets(self):
"""Get list of sets."""
if self.cache:
return self.cache.get(
self.app.config['OAISERVER_CACHE_KEY']) | Get list of sets. | Below is the the instruction that describes the task:
### Input:
Get list of sets.
### Response:
def sets(self):
"""Get list of sets."""
if self.cache:
return self.cache.get(
self.app.config['OAISERVER_CACHE_KEY']) |
def can_create_replica_without_replication_connection(self):
""" go through the replication methods to see if there are ones
that does not require a working replication connection.
"""
replica_methods = self._create_replica_methods
return any(self.replica_method_can_work_without_replication_connection(method) for method in replica_methods) | go through the replication methods to see if there are ones
that does not require a working replication connection. | Below is the the instruction that describes the task:
### Input:
go through the replication methods to see if there are ones
that does not require a working replication connection.
### Response:
def can_create_replica_without_replication_connection(self):
""" go through the replication methods to see if there are ones
that does not require a working replication connection.
"""
replica_methods = self._create_replica_methods
return any(self.replica_method_can_work_without_replication_connection(method) for method in replica_methods) |
def validate_cmd_response_int(name, got, expected):
"""
Check that some value returned in the response to a command matches what
we put in the request (the command).
"""
if got != expected:
raise(pyhsm.exception.YHSM_Error("Bad %s in response (got %i, expected %i)" \
% (name, got, expected)))
return got | Check that some value returned in the response to a command matches what
we put in the request (the command). | Below is the the instruction that describes the task:
### Input:
Check that some value returned in the response to a command matches what
we put in the request (the command).
### Response:
def validate_cmd_response_int(name, got, expected):
"""
Check that some value returned in the response to a command matches what
we put in the request (the command).
"""
if got != expected:
raise(pyhsm.exception.YHSM_Error("Bad %s in response (got %i, expected %i)" \
% (name, got, expected)))
return got |
def quote_datetime(self, value):
"""
Force the quote_datetime to always be a datetime
:param value:
:return:
"""
if value:
if isinstance(value, type_check):
self._quote_datetime = parse(value)
elif isinstance(value, datetime.datetime):
self._quote_datetime = value | Force the quote_datetime to always be a datetime
:param value:
:return: | Below is the the instruction that describes the task:
### Input:
Force the quote_datetime to always be a datetime
:param value:
:return:
### Response:
def quote_datetime(self, value):
"""
Force the quote_datetime to always be a datetime
:param value:
:return:
"""
if value:
if isinstance(value, type_check):
self._quote_datetime = parse(value)
elif isinstance(value, datetime.datetime):
self._quote_datetime = value |
def sendrpc(self, argv=[]):
self._aArgv += argv
_operation = ''
try:
self._cParams.parser(self._aArgv, self._dOptions)
# set rpc operation handler
while self._cParams.get('operation') not in rpc.operations.keys():
self._cParams.set('operation', raw_input("Enter RPC Operation:\n%s:" % rpc.operations.keys()))
_operation = self._cParams.get('operation')
self._hRpcOper = rpc.operations[_operation](opts=self._dOptions)
# input missing operation parameters
self._hRpcOper.fill(params=self._cParams.get())
send_msg = self._hRpcOper.readmsg(self._cParams.get())
self._cParams.set('messageid', self._cParams.get('messageid')+1)
self._hConn.sendmsg(send_msg)
self._cParams.set('sendmsg', send_msg)
recv_msg = self._hConn.recvmsg()
self._cParams.set('recvmsg', recv_msg)
self._hRpcOper.parsemsg(self._cParams.get())
self._hRpcOper.writemsg(self._cParams.get())
# reset operation params
self._cParams.reset()
except:
if _operation != 'close-session':
print 'BNClient: Call sendrpc%s fail' % (' <'+_operation+'>' if len(_operation) else '')
sys.exit()
""" end of function exchgmsg """ | end of function exchgmsg | Below is the the instruction that describes the task:
### Input:
end of function exchgmsg
### Response:
def sendrpc(self, argv=[]):
self._aArgv += argv
_operation = ''
try:
self._cParams.parser(self._aArgv, self._dOptions)
# set rpc operation handler
while self._cParams.get('operation') not in rpc.operations.keys():
self._cParams.set('operation', raw_input("Enter RPC Operation:\n%s:" % rpc.operations.keys()))
_operation = self._cParams.get('operation')
self._hRpcOper = rpc.operations[_operation](opts=self._dOptions)
# input missing operation parameters
self._hRpcOper.fill(params=self._cParams.get())
send_msg = self._hRpcOper.readmsg(self._cParams.get())
self._cParams.set('messageid', self._cParams.get('messageid')+1)
self._hConn.sendmsg(send_msg)
self._cParams.set('sendmsg', send_msg)
recv_msg = self._hConn.recvmsg()
self._cParams.set('recvmsg', recv_msg)
self._hRpcOper.parsemsg(self._cParams.get())
self._hRpcOper.writemsg(self._cParams.get())
# reset operation params
self._cParams.reset()
except:
if _operation != 'close-session':
print 'BNClient: Call sendrpc%s fail' % (' <'+_operation+'>' if len(_operation) else '')
sys.exit()
""" end of function exchgmsg """ |
def create_page(self, content, class_, record_keeper=None, logger=None):
'''
Robust as possible
Attempts to create the page
If any of the functions used to attach content to the page
fail, keep going, keep a record of those errors in a context dict
return the page and the context dict in a tuple
'''
fields, nested_fields = separate_fields(content)
foreign_id = content.pop('id')
# remove unwanted fields
if 'latest_revision_created_at' in content:
content.pop('latest_revision_created_at')
page = class_(**fields)
# create functions to attach attributes
function_args_mapping = (
# add_section_time
(add_json_dump, ("time", nested_fields, page)),
# add_tags
(add_list_of_things, ("tags", nested_fields, page)),
# add_metadata_tags
(add_list_of_things, ("metadata_tags", nested_fields, page)),
# attach_image
(attach_image, ("image", nested_fields, page, record_keeper)),
# attach_social_media_image
(attach_image, ("social_media_image", nested_fields,
page, record_keeper)),
# attach_banner_image
(attach_image, ("banner", nested_fields, page, record_keeper)),
)
for mapping in function_args_mapping:
function = mapping[0]
_args = mapping[1]
try:
function(*_args)
except Exception as e:
if logger:
logger.log(
ERROR,
"Failed to create page content",
{
"foreign_page_id": foreign_id,
"exception": e,
"function": function.__name__,
})
# Handle content in nested_fields
body = add_stream_fields(nested_fields, page)
# body has not been added as it contains reference to pages
if body:
record_keeper.article_bodies[foreign_id] = body
# Handle relationships in nested_fields
if record_keeper:
record_relation_functions = [
record_keeper.record_nav_tags,
record_keeper.record_recommended_articles,
record_keeper.record_reaction_questions,
record_keeper.record_related_sections,
record_keeper.record_section_tags,
record_keeper.record_banner_page_link,
]
for function in record_relation_functions:
try:
function(nested_fields, foreign_id)
except Exception as e:
if logger:
logger.log(
ERROR,
"Failed to record content",
{
"foreign_page_id": foreign_id,
"exception": e,
"function": function.__name__,
})
return page | Robust as possible
Attempts to create the page
If any of the functions used to attach content to the page
fail, keep going, keep a record of those errors in a context dict
return the page and the context dict in a tuple | Below is the the instruction that describes the task:
### Input:
Robust as possible
Attempts to create the page
If any of the functions used to attach content to the page
fail, keep going, keep a record of those errors in a context dict
return the page and the context dict in a tuple
### Response:
def create_page(self, content, class_, record_keeper=None, logger=None):
'''
Robust as possible
Attempts to create the page
If any of the functions used to attach content to the page
fail, keep going, keep a record of those errors in a context dict
return the page and the context dict in a tuple
'''
fields, nested_fields = separate_fields(content)
foreign_id = content.pop('id')
# remove unwanted fields
if 'latest_revision_created_at' in content:
content.pop('latest_revision_created_at')
page = class_(**fields)
# create functions to attach attributes
function_args_mapping = (
# add_section_time
(add_json_dump, ("time", nested_fields, page)),
# add_tags
(add_list_of_things, ("tags", nested_fields, page)),
# add_metadata_tags
(add_list_of_things, ("metadata_tags", nested_fields, page)),
# attach_image
(attach_image, ("image", nested_fields, page, record_keeper)),
# attach_social_media_image
(attach_image, ("social_media_image", nested_fields,
page, record_keeper)),
# attach_banner_image
(attach_image, ("banner", nested_fields, page, record_keeper)),
)
for mapping in function_args_mapping:
function = mapping[0]
_args = mapping[1]
try:
function(*_args)
except Exception as e:
if logger:
logger.log(
ERROR,
"Failed to create page content",
{
"foreign_page_id": foreign_id,
"exception": e,
"function": function.__name__,
})
# Handle content in nested_fields
body = add_stream_fields(nested_fields, page)
# body has not been added as it contains reference to pages
if body:
record_keeper.article_bodies[foreign_id] = body
# Handle relationships in nested_fields
if record_keeper:
record_relation_functions = [
record_keeper.record_nav_tags,
record_keeper.record_recommended_articles,
record_keeper.record_reaction_questions,
record_keeper.record_related_sections,
record_keeper.record_section_tags,
record_keeper.record_banner_page_link,
]
for function in record_relation_functions:
try:
function(nested_fields, foreign_id)
except Exception as e:
if logger:
logger.log(
ERROR,
"Failed to record content",
{
"foreign_page_id": foreign_id,
"exception": e,
"function": function.__name__,
})
return page |
def get_common_register(start, end):
"""Get the register most commonly used in accessing structs.
Access to is considered for every opcode that accesses memory
in an offset from a register::
mov eax, [ebx + 5]
For every access, the struct-referencing registers, in this case
`ebx`, are counted. The most used one is returned.
Args:
start: The adderss to start at
end: The address to finish at
"""
registers = defaultdict(int)
for line in lines(start, end):
insn = line.insn
for operand in insn.operands:
if not operand.type.has_phrase:
continue
if not operand.base:
continue
register_name = operand.base
registers[register_name] += 1
return max(registers.iteritems(), key=operator.itemgetter(1))[0] | Get the register most commonly used in accessing structs.
Access to is considered for every opcode that accesses memory
in an offset from a register::
mov eax, [ebx + 5]
For every access, the struct-referencing registers, in this case
`ebx`, are counted. The most used one is returned.
Args:
start: The adderss to start at
end: The address to finish at | Below is the the instruction that describes the task:
### Input:
Get the register most commonly used in accessing structs.
Access to is considered for every opcode that accesses memory
in an offset from a register::
mov eax, [ebx + 5]
For every access, the struct-referencing registers, in this case
`ebx`, are counted. The most used one is returned.
Args:
start: The adderss to start at
end: The address to finish at
### Response:
def get_common_register(start, end):
"""Get the register most commonly used in accessing structs.
Access to is considered for every opcode that accesses memory
in an offset from a register::
mov eax, [ebx + 5]
For every access, the struct-referencing registers, in this case
`ebx`, are counted. The most used one is returned.
Args:
start: The adderss to start at
end: The address to finish at
"""
registers = defaultdict(int)
for line in lines(start, end):
insn = line.insn
for operand in insn.operands:
if not operand.type.has_phrase:
continue
if not operand.base:
continue
register_name = operand.base
registers[register_name] += 1
return max(registers.iteritems(), key=operator.itemgetter(1))[0] |
def _find_first_bigger(self, timestamps, target, lower_bound, upper_bound):
"""
Find the first element in timestamps whose value is bigger than target.
param list values: list of timestamps(epoch number).
param target: target value.
param lower_bound: lower bound for binary search.
param upper_bound: upper bound for binary search.
"""
while lower_bound < upper_bound:
pos = lower_bound + (upper_bound - lower_bound) / 2
if timestamps[pos] > target:
upper_bound = pos
else:
lower_bound = pos + 1
return pos | Find the first element in timestamps whose value is bigger than target.
param list values: list of timestamps(epoch number).
param target: target value.
param lower_bound: lower bound for binary search.
param upper_bound: upper bound for binary search. | Below is the the instruction that describes the task:
### Input:
Find the first element in timestamps whose value is bigger than target.
param list values: list of timestamps(epoch number).
param target: target value.
param lower_bound: lower bound for binary search.
param upper_bound: upper bound for binary search.
### Response:
def _find_first_bigger(self, timestamps, target, lower_bound, upper_bound):
"""
Find the first element in timestamps whose value is bigger than target.
param list values: list of timestamps(epoch number).
param target: target value.
param lower_bound: lower bound for binary search.
param upper_bound: upper bound for binary search.
"""
while lower_bound < upper_bound:
pos = lower_bound + (upper_bound - lower_bound) / 2
if timestamps[pos] > target:
upper_bound = pos
else:
lower_bound = pos + 1
return pos |
def format(obj, options):
"""Return a string representation of the Python object
Args:
obj: The Python object
options: Format options
"""
formatters = {
float_types: lambda x: '{:.{}g}'.format(x, options.digits),
}
for _types, fmtr in formatters.items():
if isinstance(obj, _types):
return fmtr(obj)
try:
if six.PY2 and isinstance(obj, six.string_types):
return str(obj.encode('utf-8'))
return str(obj)
except:
return 'OBJECT' | Return a string representation of the Python object
Args:
obj: The Python object
options: Format options | Below is the the instruction that describes the task:
### Input:
Return a string representation of the Python object
Args:
obj: The Python object
options: Format options
### Response:
def format(obj, options):
"""Return a string representation of the Python object
Args:
obj: The Python object
options: Format options
"""
formatters = {
float_types: lambda x: '{:.{}g}'.format(x, options.digits),
}
for _types, fmtr in formatters.items():
if isinstance(obj, _types):
return fmtr(obj)
try:
if six.PY2 and isinstance(obj, six.string_types):
return str(obj.encode('utf-8'))
return str(obj)
except:
return 'OBJECT' |
def compose_github(projects, data):
""" Compose projects.json for github
:param projects: projects.json
:param data: eclipse JSON
:return: projects.json with github
"""
for p in [project for project in data if len(data[project]['github_repos']) > 0]:
if 'github' not in projects[p]:
projects[p]['github'] = []
urls = [url['url'] for url in data[p]['github_repos'] if
url['url'] not in projects[p]['github']]
projects[p]['github'] += urls
return projects | Compose projects.json for github
:param projects: projects.json
:param data: eclipse JSON
:return: projects.json with github | Below is the the instruction that describes the task:
### Input:
Compose projects.json for github
:param projects: projects.json
:param data: eclipse JSON
:return: projects.json with github
### Response:
def compose_github(projects, data):
""" Compose projects.json for github
:param projects: projects.json
:param data: eclipse JSON
:return: projects.json with github
"""
for p in [project for project in data if len(data[project]['github_repos']) > 0]:
if 'github' not in projects[p]:
projects[p]['github'] = []
urls = [url['url'] for url in data[p]['github_repos'] if
url['url'] not in projects[p]['github']]
projects[p]['github'] += urls
return projects |
def next_token(self):
"""Lexical analyser of the raw input."""
while self.char is not None:
if self.char.isspace():
# The current character is a whitespace
self.whitespace()
continue
elif self.char == '#':
# The current character is `#`
self.advance()
self.comment()
continue
elif self.char.isalpha() or self.char == '_':
# The current character is a letter or `_`
return self._id()
elif self.char == ';':
# The current character is `;`
self.advance()
return Token(Nature.SEMI, ';')
elif self.char == ',':
# The current character is `,`
self.advance()
return Token(Nature.COMMA, ';')
elif self.char.isdigit():
# The current character is a number
return self.number()
elif self.char == '=' and self.peek() == '=':
# The current character is `==`
self.advance()
self.advance()
return Token(Nature.EQ, '==')
elif self.char == '!' and self.peek() == '=':
# The current character is `!=`
self.advance()
self.advance()
return Token(Nature.NE, '!=')
elif self.char == '<' and self.peek() == '=':
# The current character is `<=`
self.advance()
self.advance()
return Token(Nature.LE, '<=')
elif self.char == '>' and self.peek() == '=':
# The current character is `>=`
self.advance()
self.advance()
return Token(Nature.GE, '>=')
elif self.char == '<':
# The current character is `<`
self.advance()
return Token(Nature.LT, '<')
elif self.char == '>':
# The current character is `>`
self.advance()
return Token(Nature.GT, '>')
elif self.char == '=':
# The current character is `=`
self.advance()
return Token(Nature.ASSIGN, '=')
elif self.char == '+':
# The current character is `+`
self.advance()
return Token(Nature.PLUS, '+')
elif self.char == '-':
# The current character is `-`
self.advance()
return Token(Nature.MINUS, '-')
elif self.char == '*':
# The current character is `*`
self.advance()
return Token(Nature.MUL, '*')
elif self.char == '/' and self.peek() == '/':
# The current character is `//`
self.advance()
self.advance()
return Token(Nature.INT_DIV, '//')
elif self.char == '/':
# The current character is `/`
self.advance()
return Token(Nature.DIV, '/')
elif self.char == '(':
# The current character is `(`
self.advance()
return Token(Nature.LPAREN, '(')
elif self.char == ')':
# The current character is `)`
self.advance()
return Token(Nature.RPAREN, ')')
elif self.char == '{':
# The current character is `{`
self.advance()
return Token(Nature.LBRACKET, '{')
elif self.char == '}':
# The current character is `}`
self.advance()
return Token(Nature.RBRACKET, '}')
else:
# The current character is unknown
raise LexicalError(f"Invalid character `{self.char}`.")
# End of raw input
return Token(Nature.EOF, None) | Lexical analyser of the raw input. | Below is the the instruction that describes the task:
### Input:
Lexical analyser of the raw input.
### Response:
def next_token(self):
"""Lexical analyser of the raw input."""
while self.char is not None:
if self.char.isspace():
# The current character is a whitespace
self.whitespace()
continue
elif self.char == '#':
# The current character is `#`
self.advance()
self.comment()
continue
elif self.char.isalpha() or self.char == '_':
# The current character is a letter or `_`
return self._id()
elif self.char == ';':
# The current character is `;`
self.advance()
return Token(Nature.SEMI, ';')
elif self.char == ',':
# The current character is `,`
self.advance()
return Token(Nature.COMMA, ';')
elif self.char.isdigit():
# The current character is a number
return self.number()
elif self.char == '=' and self.peek() == '=':
# The current character is `==`
self.advance()
self.advance()
return Token(Nature.EQ, '==')
elif self.char == '!' and self.peek() == '=':
# The current character is `!=`
self.advance()
self.advance()
return Token(Nature.NE, '!=')
elif self.char == '<' and self.peek() == '=':
# The current character is `<=`
self.advance()
self.advance()
return Token(Nature.LE, '<=')
elif self.char == '>' and self.peek() == '=':
# The current character is `>=`
self.advance()
self.advance()
return Token(Nature.GE, '>=')
elif self.char == '<':
# The current character is `<`
self.advance()
return Token(Nature.LT, '<')
elif self.char == '>':
# The current character is `>`
self.advance()
return Token(Nature.GT, '>')
elif self.char == '=':
# The current character is `=`
self.advance()
return Token(Nature.ASSIGN, '=')
elif self.char == '+':
# The current character is `+`
self.advance()
return Token(Nature.PLUS, '+')
elif self.char == '-':
# The current character is `-`
self.advance()
return Token(Nature.MINUS, '-')
elif self.char == '*':
# The current character is `*`
self.advance()
return Token(Nature.MUL, '*')
elif self.char == '/' and self.peek() == '/':
# The current character is `//`
self.advance()
self.advance()
return Token(Nature.INT_DIV, '//')
elif self.char == '/':
# The current character is `/`
self.advance()
return Token(Nature.DIV, '/')
elif self.char == '(':
# The current character is `(`
self.advance()
return Token(Nature.LPAREN, '(')
elif self.char == ')':
# The current character is `)`
self.advance()
return Token(Nature.RPAREN, ')')
elif self.char == '{':
# The current character is `{`
self.advance()
return Token(Nature.LBRACKET, '{')
elif self.char == '}':
# The current character is `}`
self.advance()
return Token(Nature.RBRACKET, '}')
else:
# The current character is unknown
raise LexicalError(f"Invalid character `{self.char}`.")
# End of raw input
return Token(Nature.EOF, None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.