code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context['self']
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context._locals({'next': ih})
ih.inherits = TemplateNamespace("self:%s" % template.uri,
lclcontext,
template=template,
populate_self=False)
context._data['parent'] = lclcontext._data['local'] = ih.inherits
callable_ = getattr(template.module, '_mako_inherit', None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, '_mako_generate_namespaces', None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext) | called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution. | Below is the the instruction that describes the task:
### Input:
called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution.
### Response:
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context['self']
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context._locals({'next': ih})
ih.inherits = TemplateNamespace("self:%s" % template.uri,
lclcontext,
template=template,
populate_self=False)
context._data['parent'] = lclcontext._data['local'] = ih.inherits
callable_ = getattr(template.module, '_mako_inherit', None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, '_mako_generate_namespaces', None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext) |
def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state)) | bool: The speaker's mute state.
True if muted, False otherwise. | Below is the the instruction that describes the task:
### Input:
bool: The speaker's mute state.
True if muted, False otherwise.
### Response:
def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute([
('InstanceID', 0),
('Channel', 'Master')
])
mute_state = response['CurrentMute']
return bool(int(mute_state)) |
def apply_move(grid, move):
"Try to move: return a new grid, or None if illegal."
p, q = grid
bit = 1 << move
return (q, p | bit) if 0 == (bit & (p | q)) else None | Try to move: return a new grid, or None if illegal. | Below is the the instruction that describes the task:
### Input:
Try to move: return a new grid, or None if illegal.
### Response:
def apply_move(grid, move):
"Try to move: return a new grid, or None if illegal."
p, q = grid
bit = 1 << move
return (q, p | bit) if 0 == (bit & (p | q)) else None |
def validate(self):
"""
Error check the attributes of the ActivateRequestPayload object.
"""
if self.unique_identifier is not None:
if not isinstance(self.unique_identifier,
attributes.UniqueIdentifier):
msg = "invalid unique identifier"
raise TypeError(msg) | Error check the attributes of the ActivateRequestPayload object. | Below is the the instruction that describes the task:
### Input:
Error check the attributes of the ActivateRequestPayload object.
### Response:
def validate(self):
"""
Error check the attributes of the ActivateRequestPayload object.
"""
if self.unique_identifier is not None:
if not isinstance(self.unique_identifier,
attributes.UniqueIdentifier):
msg = "invalid unique identifier"
raise TypeError(msg) |
def compare_states(self, sl, sr):
"""
Compares two states for similarity.
"""
joint_solver = claripy.Solver()
# make sure the canonicalized constraints are the same
n_map, n_counter, n_canon_constraint = claripy.And(*sr.solver.constraints).canonicalize() #pylint:disable=no-member
u_map, u_counter, u_canon_constraint = claripy.And(*sl.solver.constraints).canonicalize() #pylint:disable=no-member
n_canoner_constraint = sr.solver.simplify(n_canon_constraint)
u_canoner_constraint = sl.solver.simplify(u_canon_constraint)
joint_solver.add((n_canoner_constraint, u_canoner_constraint))
if n_canoner_constraint is not u_canoner_constraint:
self._report_incongruency("Different constraints!")
return False
# get the differences in registers and memory
mem_diff = sr.memory.changed_bytes(sl.memory)
reg_diff = sr.registers.changed_bytes(sl.registers)
# this is only for unicorn
if "UNICORN" in sl.options or "UNICORN" in sr.options:
if sl.arch.name == "X86":
reg_diff -= set(range(40, 52)) #ignore cc psuedoregisters
reg_diff -= set(range(320, 324)) #some other VEX weirdness
reg_diff -= set(range(340, 344)) #ip_at_syscall
elif sl.arch.name == "AMD64":
reg_diff -= set(range(144, 168)) #ignore cc psuedoregisters
# make sure the differences in registers and memory are actually just renamed
# versions of the same ASTs
for diffs,(um,nm) in (
(reg_diff, (sl.registers, sr.registers)),
(mem_diff, (sl.memory, sr.memory)),
):
for i in diffs:
bn = nm.load(i, 1)
bu = um.load(i, 1)
bnc = bn.canonicalize(var_map=n_map, counter=n_counter)[-1]
buc = bu.canonicalize(var_map=u_map, counter=u_counter)[-1]
if bnc is not buc:
self._report_incongruency("Different memory or registers (index %d, values %r and %r)!", i, bn, bu)
return False
# make sure the flags are the same
if sl.arch.name in ("AMD64", "X86", "ARM", "ARMEL", "ARMHF", "AARCH64"):
# pylint: disable=unused-variable
n_bkp = sr.regs.cc_op, sr.regs.cc_dep1, sr.regs.cc_dep2, sr.regs.cc_ndep
u_bkp = sl.regs.cc_op, sl.regs.cc_dep1, sl.regs.cc_dep2, sl.regs.cc_ndep
if sl.arch.name in ('AMD64', 'X86'):
n_flags = sr.regs.eflags.canonicalize(var_map=n_map, counter=n_counter)[-1]
u_flags = sl.regs.eflags.canonicalize(var_map=u_map, counter=u_counter)[-1]
else:
n_flags = sr.regs.flags.canonicalize(var_map=n_map, counter=n_counter)[-1]
u_flags = sl.regs.flags.canonicalize(var_map=u_map, counter=u_counter)[-1]
if n_flags is not u_flags and sl.solver.simplify(n_flags) is not sr.solver.simplify(u_flags):
self._report_incongruency("Different flags!")
return False
return True | Compares two states for similarity. | Below is the the instruction that describes the task:
### Input:
Compares two states for similarity.
### Response:
def compare_states(self, sl, sr):
"""
Compares two states for similarity.
"""
joint_solver = claripy.Solver()
# make sure the canonicalized constraints are the same
n_map, n_counter, n_canon_constraint = claripy.And(*sr.solver.constraints).canonicalize() #pylint:disable=no-member
u_map, u_counter, u_canon_constraint = claripy.And(*sl.solver.constraints).canonicalize() #pylint:disable=no-member
n_canoner_constraint = sr.solver.simplify(n_canon_constraint)
u_canoner_constraint = sl.solver.simplify(u_canon_constraint)
joint_solver.add((n_canoner_constraint, u_canoner_constraint))
if n_canoner_constraint is not u_canoner_constraint:
self._report_incongruency("Different constraints!")
return False
# get the differences in registers and memory
mem_diff = sr.memory.changed_bytes(sl.memory)
reg_diff = sr.registers.changed_bytes(sl.registers)
# this is only for unicorn
if "UNICORN" in sl.options or "UNICORN" in sr.options:
if sl.arch.name == "X86":
reg_diff -= set(range(40, 52)) #ignore cc psuedoregisters
reg_diff -= set(range(320, 324)) #some other VEX weirdness
reg_diff -= set(range(340, 344)) #ip_at_syscall
elif sl.arch.name == "AMD64":
reg_diff -= set(range(144, 168)) #ignore cc psuedoregisters
# make sure the differences in registers and memory are actually just renamed
# versions of the same ASTs
for diffs,(um,nm) in (
(reg_diff, (sl.registers, sr.registers)),
(mem_diff, (sl.memory, sr.memory)),
):
for i in diffs:
bn = nm.load(i, 1)
bu = um.load(i, 1)
bnc = bn.canonicalize(var_map=n_map, counter=n_counter)[-1]
buc = bu.canonicalize(var_map=u_map, counter=u_counter)[-1]
if bnc is not buc:
self._report_incongruency("Different memory or registers (index %d, values %r and %r)!", i, bn, bu)
return False
# make sure the flags are the same
if sl.arch.name in ("AMD64", "X86", "ARM", "ARMEL", "ARMHF", "AARCH64"):
# pylint: disable=unused-variable
n_bkp = sr.regs.cc_op, sr.regs.cc_dep1, sr.regs.cc_dep2, sr.regs.cc_ndep
u_bkp = sl.regs.cc_op, sl.regs.cc_dep1, sl.regs.cc_dep2, sl.regs.cc_ndep
if sl.arch.name in ('AMD64', 'X86'):
n_flags = sr.regs.eflags.canonicalize(var_map=n_map, counter=n_counter)[-1]
u_flags = sl.regs.eflags.canonicalize(var_map=u_map, counter=u_counter)[-1]
else:
n_flags = sr.regs.flags.canonicalize(var_map=n_map, counter=n_counter)[-1]
u_flags = sl.regs.flags.canonicalize(var_map=u_map, counter=u_counter)[-1]
if n_flags is not u_flags and sl.solver.simplify(n_flags) is not sr.solver.simplify(u_flags):
self._report_incongruency("Different flags!")
return False
return True |
def _make_metadata_request(self, meta_id, metadata_type=None):
"""
Get the Metadata. The Session initializes with 'COMPACT-DECODED' as the format type. If that returns a DTD error
then we change to the 'STANDARD-XML' format and try again.
:param meta_id: The name of the resource, class, or lookup to get metadata for
:param metadata_type: The RETS metadata type
:return: list
"""
# If this metadata _request has already happened, returned the saved result.
key = '{0!s}:{1!s}'.format(metadata_type, meta_id)
if key in self.metadata_responses and self.cache_metadata:
response = self.metadata_responses[key]
else:
response = self._request(
capability='GetMetadata',
options={
'query': {
'Type': metadata_type,
'ID': meta_id,
'Format': self.metadata_format
}
}
)
self.metadata_responses[key] = response
if self.metadata_format == 'COMPACT-DECODED':
parser = CompactMetadata()
else:
parser = StandardXMLetadata()
try:
return parser.parse(response=response, metadata_type=metadata_type)
except RETSException as e:
# Remove response from cache
self.metadata_responses.pop(key, None)
# If the server responds with an invalid parameter for COMPACT-DECODED, try STANDARD-XML
if self.metadata_format != 'STANDARD-XML' and e.reply_code in ['20513', '20514']:
self.metadata_responses.pop(key, None)
self.metadata_format = 'STANDARD-XML'
return self._make_metadata_request(meta_id=meta_id, metadata_type=metadata_type)
raise RETSException(e.reply_text, e.reply_code) | Get the Metadata. The Session initializes with 'COMPACT-DECODED' as the format type. If that returns a DTD error
then we change to the 'STANDARD-XML' format and try again.
:param meta_id: The name of the resource, class, or lookup to get metadata for
:param metadata_type: The RETS metadata type
:return: list | Below is the the instruction that describes the task:
### Input:
Get the Metadata. The Session initializes with 'COMPACT-DECODED' as the format type. If that returns a DTD error
then we change to the 'STANDARD-XML' format and try again.
:param meta_id: The name of the resource, class, or lookup to get metadata for
:param metadata_type: The RETS metadata type
:return: list
### Response:
def _make_metadata_request(self, meta_id, metadata_type=None):
"""
Get the Metadata. The Session initializes with 'COMPACT-DECODED' as the format type. If that returns a DTD error
then we change to the 'STANDARD-XML' format and try again.
:param meta_id: The name of the resource, class, or lookup to get metadata for
:param metadata_type: The RETS metadata type
:return: list
"""
# If this metadata _request has already happened, returned the saved result.
key = '{0!s}:{1!s}'.format(metadata_type, meta_id)
if key in self.metadata_responses and self.cache_metadata:
response = self.metadata_responses[key]
else:
response = self._request(
capability='GetMetadata',
options={
'query': {
'Type': metadata_type,
'ID': meta_id,
'Format': self.metadata_format
}
}
)
self.metadata_responses[key] = response
if self.metadata_format == 'COMPACT-DECODED':
parser = CompactMetadata()
else:
parser = StandardXMLetadata()
try:
return parser.parse(response=response, metadata_type=metadata_type)
except RETSException as e:
# Remove response from cache
self.metadata_responses.pop(key, None)
# If the server responds with an invalid parameter for COMPACT-DECODED, try STANDARD-XML
if self.metadata_format != 'STANDARD-XML' and e.reply_code in ['20513', '20514']:
self.metadata_responses.pop(key, None)
self.metadata_format = 'STANDARD-XML'
return self._make_metadata_request(meta_id=meta_id, metadata_type=metadata_type)
raise RETSException(e.reply_text, e.reply_code) |
def get_span_datas(self, span):
"""Extracts a list of SpanData tuples from a span
:rtype: list of opencensus.trace.span_data.SpanData
:return list of SpanData tuples
"""
span_datas = [
span_data_module.SpanData(
name=ss.name,
context=self.span_context,
span_id=ss.span_id,
parent_span_id=ss.parent_span.span_id if
ss.parent_span else None,
attributes=ss.attributes,
start_time=ss.start_time,
end_time=ss.end_time,
child_span_count=len(ss.children),
stack_trace=ss.stack_trace,
time_events=ss.time_events,
links=ss.links,
status=ss.status,
same_process_as_parent_span=ss.same_process_as_parent_span,
span_kind=ss.span_kind
)
for ss in span
]
return span_datas | Extracts a list of SpanData tuples from a span
:rtype: list of opencensus.trace.span_data.SpanData
:return list of SpanData tuples | Below is the the instruction that describes the task:
### Input:
Extracts a list of SpanData tuples from a span
:rtype: list of opencensus.trace.span_data.SpanData
:return list of SpanData tuples
### Response:
def get_span_datas(self, span):
"""Extracts a list of SpanData tuples from a span
:rtype: list of opencensus.trace.span_data.SpanData
:return list of SpanData tuples
"""
span_datas = [
span_data_module.SpanData(
name=ss.name,
context=self.span_context,
span_id=ss.span_id,
parent_span_id=ss.parent_span.span_id if
ss.parent_span else None,
attributes=ss.attributes,
start_time=ss.start_time,
end_time=ss.end_time,
child_span_count=len(ss.children),
stack_trace=ss.stack_trace,
time_events=ss.time_events,
links=ss.links,
status=ss.status,
same_process_as_parent_span=ss.same_process_as_parent_span,
span_kind=ss.span_kind
)
for ss in span
]
return span_datas |
def mnist(training):
"""Downloads MNIST and loads it into numpy arrays."""
if training:
data_filename = 'train-images-idx3-ubyte.gz'
labels_filename = 'train-labels-idx1-ubyte.gz'
count = 60000
else:
data_filename = 't10k-images-idx3-ubyte.gz'
labels_filename = 't10k-labels-idx1-ubyte.gz'
count = 10000
data_filename = maybe_download(MNIST_URL, data_filename)
labels_filename = maybe_download(MNIST_URL, labels_filename)
return (mnist_extract_data(data_filename, count),
mnist_extract_labels(labels_filename, count)) | Downloads MNIST and loads it into numpy arrays. | Below is the the instruction that describes the task:
### Input:
Downloads MNIST and loads it into numpy arrays.
### Response:
def mnist(training):
"""Downloads MNIST and loads it into numpy arrays."""
if training:
data_filename = 'train-images-idx3-ubyte.gz'
labels_filename = 'train-labels-idx1-ubyte.gz'
count = 60000
else:
data_filename = 't10k-images-idx3-ubyte.gz'
labels_filename = 't10k-labels-idx1-ubyte.gz'
count = 10000
data_filename = maybe_download(MNIST_URL, data_filename)
labels_filename = maybe_download(MNIST_URL, labels_filename)
return (mnist_extract_data(data_filename, count),
mnist_extract_labels(labels_filename, count)) |
def median(self, **kwargs):
"""Returns median of each column or row.
Returns:
A new QueryCompiler object containing the median of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().median(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs)
return self._full_axis_reduce(axis, func) | Returns median of each column or row.
Returns:
A new QueryCompiler object containing the median of each column or row. | Below is the the instruction that describes the task:
### Input:
Returns median of each column or row.
Returns:
A new QueryCompiler object containing the median of each column or row.
### Response:
def median(self, **kwargs):
"""Returns median of each column or row.
Returns:
A new QueryCompiler object containing the median of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().median(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs)
return self._full_axis_reduce(axis, func) |
def check_extensions(module_name, module_path):
"""
This function checks for extensions to boto modules. It should be called in the
__init__.py file of all boto modules. See:
http://code.google.com/p/boto/wiki/ExtendModules
for details.
"""
option_name = '%s_extend' % module_name
version = config.get('Boto', option_name, None)
if version:
dirname = module_path[0]
path = os.path.join(dirname, version)
if os.path.isdir(path):
log.info('extending module %s with: %s' % (module_name, path))
module_path.insert(0, path) | This function checks for extensions to boto modules. It should be called in the
__init__.py file of all boto modules. See:
http://code.google.com/p/boto/wiki/ExtendModules
for details. | Below is the the instruction that describes the task:
### Input:
This function checks for extensions to boto modules. It should be called in the
__init__.py file of all boto modules. See:
http://code.google.com/p/boto/wiki/ExtendModules
for details.
### Response:
def check_extensions(module_name, module_path):
"""
This function checks for extensions to boto modules. It should be called in the
__init__.py file of all boto modules. See:
http://code.google.com/p/boto/wiki/ExtendModules
for details.
"""
option_name = '%s_extend' % module_name
version = config.get('Boto', option_name, None)
if version:
dirname = module_path[0]
path = os.path.join(dirname, version)
if os.path.isdir(path):
log.info('extending module %s with: %s' % (module_name, path))
module_path.insert(0, path) |
def delete_variable_group(self, project, group_id):
"""DeleteVariableGroup.
[Preview API] Delete a variable group
:param str project: Project ID or project name
:param int group_id: Id of the variable group.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'int')
self._send(http_method='DELETE',
location_id='f5b09dd5-9d54-45a1-8b5a-1c8287d634cc',
version='5.1-preview.1',
route_values=route_values) | DeleteVariableGroup.
[Preview API] Delete a variable group
:param str project: Project ID or project name
:param int group_id: Id of the variable group. | Below is the the instruction that describes the task:
### Input:
DeleteVariableGroup.
[Preview API] Delete a variable group
:param str project: Project ID or project name
:param int group_id: Id of the variable group.
### Response:
def delete_variable_group(self, project, group_id):
"""DeleteVariableGroup.
[Preview API] Delete a variable group
:param str project: Project ID or project name
:param int group_id: Id of the variable group.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'int')
self._send(http_method='DELETE',
location_id='f5b09dd5-9d54-45a1-8b5a-1c8287d634cc',
version='5.1-preview.1',
route_values=route_values) |
def close(self):
'''
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
'''
if self._closing:
return
self._closing = True
for stream in self.streams:
stream.close()
self.streams.clear()
if hasattr(self.sock, 'close'):
self.sock.close() | Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks. | Below is the the instruction that describes the task:
### Input:
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
### Response:
def close(self):
'''
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
'''
if self._closing:
return
self._closing = True
for stream in self.streams:
stream.close()
self.streams.clear()
if hasattr(self.sock, 'close'):
self.sock.close() |
def delete_from_ros(self, service_name='rigid_transforms/rigid_transform_publisher', namespace=None):
"""Removes RigidTransform referencing from_frame and to_frame from ROS publisher.
Note that this may not be this exact transform, but may that references the same frames (order doesn't matter)
Also, note that it may take quite a while for the transform to disappear from rigid_transform_publisher's cache
Requires ROS rigid_transform_publisher service to be running. Assuming autolab_core is installed as a catkin package,
this can be done with: roslaunch autolab_core rigid_transforms.launch
Parameters
----------
service_name : string, optional
RigidTransformPublisher service to interface with. If the RigidTransformPublisher services are started through
rigid_transforms.launch it will be called rigid_transform_publisher
namespace : string, optional
Namespace to prepend to transform_listener_service. If None, current namespace is prepended.
Raises
------
rospy.ServiceException
If service call to rigid_transform_publisher fails
"""
if namespace == None:
service_name = rospy.get_namespace() + service_name
else:
service_name = namespace + service_name
rospy.wait_for_service(service_name, timeout = 10)
publisher = rospy.ServiceProxy(service_name, RigidTransformPublisher)
publisher(0, 0, 0, 0, 0, 0, 0, self.from_frame, self.to_frame, 'delete') | Removes RigidTransform referencing from_frame and to_frame from ROS publisher.
Note that this may not be this exact transform, but may that references the same frames (order doesn't matter)
Also, note that it may take quite a while for the transform to disappear from rigid_transform_publisher's cache
Requires ROS rigid_transform_publisher service to be running. Assuming autolab_core is installed as a catkin package,
this can be done with: roslaunch autolab_core rigid_transforms.launch
Parameters
----------
service_name : string, optional
RigidTransformPublisher service to interface with. If the RigidTransformPublisher services are started through
rigid_transforms.launch it will be called rigid_transform_publisher
namespace : string, optional
Namespace to prepend to transform_listener_service. If None, current namespace is prepended.
Raises
------
rospy.ServiceException
If service call to rigid_transform_publisher fails | Below is the the instruction that describes the task:
### Input:
Removes RigidTransform referencing from_frame and to_frame from ROS publisher.
Note that this may not be this exact transform, but may that references the same frames (order doesn't matter)
Also, note that it may take quite a while for the transform to disappear from rigid_transform_publisher's cache
Requires ROS rigid_transform_publisher service to be running. Assuming autolab_core is installed as a catkin package,
this can be done with: roslaunch autolab_core rigid_transforms.launch
Parameters
----------
service_name : string, optional
RigidTransformPublisher service to interface with. If the RigidTransformPublisher services are started through
rigid_transforms.launch it will be called rigid_transform_publisher
namespace : string, optional
Namespace to prepend to transform_listener_service. If None, current namespace is prepended.
Raises
------
rospy.ServiceException
If service call to rigid_transform_publisher fails
### Response:
def delete_from_ros(self, service_name='rigid_transforms/rigid_transform_publisher', namespace=None):
"""Removes RigidTransform referencing from_frame and to_frame from ROS publisher.
Note that this may not be this exact transform, but may that references the same frames (order doesn't matter)
Also, note that it may take quite a while for the transform to disappear from rigid_transform_publisher's cache
Requires ROS rigid_transform_publisher service to be running. Assuming autolab_core is installed as a catkin package,
this can be done with: roslaunch autolab_core rigid_transforms.launch
Parameters
----------
service_name : string, optional
RigidTransformPublisher service to interface with. If the RigidTransformPublisher services are started through
rigid_transforms.launch it will be called rigid_transform_publisher
namespace : string, optional
Namespace to prepend to transform_listener_service. If None, current namespace is prepended.
Raises
------
rospy.ServiceException
If service call to rigid_transform_publisher fails
"""
if namespace == None:
service_name = rospy.get_namespace() + service_name
else:
service_name = namespace + service_name
rospy.wait_for_service(service_name, timeout = 10)
publisher = rospy.ServiceProxy(service_name, RigidTransformPublisher)
publisher(0, 0, 0, 0, 0, 0, 0, self.from_frame, self.to_frame, 'delete') |
def export(self, class_name, method_name, export_data=False,
export_dir='.', export_filename='data.json',
export_append_checksum=False, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
self.n_features = len(est.sigma_[0])
self.n_classes = len(est.classes_)
temp_type = self.temp('type')
temp_arr = self.temp('arr')
temp_arr_ = self.temp('arr[]')
temp_arr__ = self.temp('arr[][]')
# Create class prior probabilities:
priors = [temp_type.format(self.repr(c)) for c in est.class_prior_]
priors = ', '.join(priors)
self.priors = temp_arr_.format(type='double', name='priors',
values=priors)
# Create sigmas:
sigmas = []
for sigma in est.sigma_:
tmp = [temp_type.format(self.repr(s)) for s in sigma]
tmp = temp_arr.format(', '.join(tmp))
sigmas.append(tmp)
sigmas = ', '.join(sigmas)
self.sigmas = temp_arr__.format(type='double', name='sigmas',
values=sigmas)
# Create thetas:
thetas = []
for theta in est.theta_:
tmp = [temp_type.format(self.repr(t)) for t in theta]
tmp = temp_arr.format(', '.join(tmp))
thetas.append(tmp)
thetas = ', '.join(thetas)
self.thetas = temp_arr__.format(type='double', name='thetas',
values=thetas)
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename,
export_append_checksum)
return self.predict('exported')
# Separated:
return self.predict('separated') | Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders. | Below is the the instruction that describes the task:
### Input:
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
### Response:
def export(self, class_name, method_name, export_data=False,
export_dir='.', export_filename='data.json',
export_append_checksum=False, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
self.n_features = len(est.sigma_[0])
self.n_classes = len(est.classes_)
temp_type = self.temp('type')
temp_arr = self.temp('arr')
temp_arr_ = self.temp('arr[]')
temp_arr__ = self.temp('arr[][]')
# Create class prior probabilities:
priors = [temp_type.format(self.repr(c)) for c in est.class_prior_]
priors = ', '.join(priors)
self.priors = temp_arr_.format(type='double', name='priors',
values=priors)
# Create sigmas:
sigmas = []
for sigma in est.sigma_:
tmp = [temp_type.format(self.repr(s)) for s in sigma]
tmp = temp_arr.format(', '.join(tmp))
sigmas.append(tmp)
sigmas = ', '.join(sigmas)
self.sigmas = temp_arr__.format(type='double', name='sigmas',
values=sigmas)
# Create thetas:
thetas = []
for theta in est.theta_:
tmp = [temp_type.format(self.repr(t)) for t in theta]
tmp = temp_arr.format(', '.join(tmp))
thetas.append(tmp)
thetas = ', '.join(thetas)
self.thetas = temp_arr__.format(type='double', name='thetas',
values=thetas)
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename,
export_append_checksum)
return self.predict('exported')
# Separated:
return self.predict('separated') |
def legacy_decrypt(jwe, jwk, adata='', validate_claims=True,
expiry_seconds=None):
""" Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param adata: Arbitrary string data used during encryption for additional
authentication.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE
"""
protected_header, encrypted_key, iv, ciphertext, authentication_tag = map(
b64decode_url, jwe)
header = json_decode(protected_header)
alg = header[HEADER_ALG]
enc = header[HEADER_ENC]
# decrypt cek
encryption_key = _decrypt_key(encrypted_key, jwk, alg)
# decrypt body
((_, decipher), _), ((hash_fn, _), mod) = JWA[enc]
version = header.get(_TEMP_VER_KEY)
if version:
plaintext = decipher(ciphertext, encryption_key[-mod.digest_size/2:],
iv)
hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version),
encryption_key[:-mod.digest_size/2], mod=mod)
else:
plaintext = decipher(ciphertext, encryption_key[:-mod.digest_size], iv)
hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version),
encryption_key[-mod.digest_size:], mod=mod)
if not const_compare(auth_tag(hash), authentication_tag):
raise Error('Mismatched authentication tags')
if HEADER_ZIP in header:
try:
(_, decompress) = COMPRESSION[header[HEADER_ZIP]]
except KeyError:
raise Error('Unsupported compression algorithm: {}'.format(
header[HEADER_ZIP]))
plaintext = decompress(plaintext)
claims = json_decode(plaintext)
try:
del claims[_TEMP_VER_KEY]
except KeyError:
# expected when decrypting legacy tokens
pass
_validate(claims, validate_claims, expiry_seconds)
return JWT(header, claims) | Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param adata: Arbitrary string data used during encryption for additional
authentication.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE | Below is the the instruction that describes the task:
### Input:
Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param adata: Arbitrary string data used during encryption for additional
authentication.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE
### Response:
def legacy_decrypt(jwe, jwk, adata='', validate_claims=True,
expiry_seconds=None):
""" Decrypts a deserialized :class:`~jose.JWE`
:param jwe: An instance of :class:`~jose.JWE`
:param jwk: A `dict` representing the JWK required to decrypt the content
of the :class:`~jose.JWE`.
:param adata: Arbitrary string data used during encryption for additional
authentication.
:param validate_claims: A `bool` indicating whether or not the `exp`, `iat`
and `nbf` claims should be validated. Defaults to
`True`.
:param expiry_seconds: An `int` containing the JWT expiry in seconds, used
when evaluating the `iat` claim. Defaults to `None`,
which disables `iat` claim validation.
:rtype: :class:`~jose.JWT`
:raises: :class:`~jose.Expired` if the JWT has expired
:raises: :class:`~jose.NotYetValid` if the JWT is not yet valid
:raises: :class:`~jose.Error` if there is an error decrypting the JWE
"""
protected_header, encrypted_key, iv, ciphertext, authentication_tag = map(
b64decode_url, jwe)
header = json_decode(protected_header)
alg = header[HEADER_ALG]
enc = header[HEADER_ENC]
# decrypt cek
encryption_key = _decrypt_key(encrypted_key, jwk, alg)
# decrypt body
((_, decipher), _), ((hash_fn, _), mod) = JWA[enc]
version = header.get(_TEMP_VER_KEY)
if version:
plaintext = decipher(ciphertext, encryption_key[-mod.digest_size/2:],
iv)
hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version),
encryption_key[:-mod.digest_size/2], mod=mod)
else:
plaintext = decipher(ciphertext, encryption_key[:-mod.digest_size], iv)
hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version),
encryption_key[-mod.digest_size:], mod=mod)
if not const_compare(auth_tag(hash), authentication_tag):
raise Error('Mismatched authentication tags')
if HEADER_ZIP in header:
try:
(_, decompress) = COMPRESSION[header[HEADER_ZIP]]
except KeyError:
raise Error('Unsupported compression algorithm: {}'.format(
header[HEADER_ZIP]))
plaintext = decompress(plaintext)
claims = json_decode(plaintext)
try:
del claims[_TEMP_VER_KEY]
except KeyError:
# expected when decrypting legacy tokens
pass
_validate(claims, validate_claims, expiry_seconds)
return JWT(header, claims) |
def list_queues(runas=None, *args):
'''
Returns queue details of the / virtual host
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_queues messages consumers
'''
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
cmd = [RABBITMQCTL, 'list_queues', '-q']
cmd.extend(args)
res = __salt__['cmd.run_all'](cmd, reset_system_locale=False, runas=runas, python_shell=False)
_check_response(res)
return _output_to_dict(res['stdout']) | Returns queue details of the / virtual host
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_queues messages consumers | Below is the the instruction that describes the task:
### Input:
Returns queue details of the / virtual host
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_queues messages consumers
### Response:
def list_queues(runas=None, *args):
'''
Returns queue details of the / virtual host
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_queues messages consumers
'''
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
cmd = [RABBITMQCTL, 'list_queues', '-q']
cmd.extend(args)
res = __salt__['cmd.run_all'](cmd, reset_system_locale=False, runas=runas, python_shell=False)
_check_response(res)
return _output_to_dict(res['stdout']) |
def get_field_info(self, field, field_name):
"""
Given an instance of a serializer field, return a dictionary
of metadata about it.
"""
field_info = OrderedDict()
field_info['type'] = self.label_lookup[field]
field_info['required'] = getattr(field, 'required', False)
attrs = [
'label', 'help_text', 'default_value', 'placeholder', 'required',
'min_length', 'max_length', 'min_value', 'max_value', 'many'
]
if getattr(field, 'read_only', False):
return None
for attr in attrs:
value = getattr(field, attr, None)
if value is not None and value != '':
field_info[attr] = force_text(value, strings_only=True)
if 'label' not in field_info:
field_info['label'] = field_name.replace('_', ' ').title()
if hasattr(field, 'view_name'):
list_view = field.view_name.replace('-detail', '-list')
base_url = reverse(list_view, request=self.request)
field_info['type'] = 'select'
field_info['url'] = base_url
if hasattr(field, 'query_params'):
field_info['url'] += '?%s' % urlencode(field.query_params)
field_info['value_field'] = getattr(field, 'value_field', 'url')
field_info['display_name_field'] = getattr(field, 'display_name_field', 'display_name')
if hasattr(field, 'choices') and not hasattr(field, 'queryset'):
field_info['choices'] = [
{
'value': choice_value,
'display_name': force_text(choice_name, strings_only=True)
}
for choice_value, choice_name in field.choices.items()
]
return field_info | Given an instance of a serializer field, return a dictionary
of metadata about it. | Below is the the instruction that describes the task:
### Input:
Given an instance of a serializer field, return a dictionary
of metadata about it.
### Response:
def get_field_info(self, field, field_name):
"""
Given an instance of a serializer field, return a dictionary
of metadata about it.
"""
field_info = OrderedDict()
field_info['type'] = self.label_lookup[field]
field_info['required'] = getattr(field, 'required', False)
attrs = [
'label', 'help_text', 'default_value', 'placeholder', 'required',
'min_length', 'max_length', 'min_value', 'max_value', 'many'
]
if getattr(field, 'read_only', False):
return None
for attr in attrs:
value = getattr(field, attr, None)
if value is not None and value != '':
field_info[attr] = force_text(value, strings_only=True)
if 'label' not in field_info:
field_info['label'] = field_name.replace('_', ' ').title()
if hasattr(field, 'view_name'):
list_view = field.view_name.replace('-detail', '-list')
base_url = reverse(list_view, request=self.request)
field_info['type'] = 'select'
field_info['url'] = base_url
if hasattr(field, 'query_params'):
field_info['url'] += '?%s' % urlencode(field.query_params)
field_info['value_field'] = getattr(field, 'value_field', 'url')
field_info['display_name_field'] = getattr(field, 'display_name_field', 'display_name')
if hasattr(field, 'choices') and not hasattr(field, 'queryset'):
field_info['choices'] = [
{
'value': choice_value,
'display_name': force_text(choice_name, strings_only=True)
}
for choice_value, choice_name in field.choices.items()
]
return field_info |
def calculated_intervals(self, intervals):
"""
Updates the calculated intervals in the database. Performs an upsert
:param intervals: The calculated intervals
:return: None
"""
logging.debug("set calculated intervals")
self.mongo_model.set_calculated_intervals(intervals)
self.save()
self._calculated_intervals = TimeIntervals(intervals) | Updates the calculated intervals in the database. Performs an upsert
:param intervals: The calculated intervals
:return: None | Below is the the instruction that describes the task:
### Input:
Updates the calculated intervals in the database. Performs an upsert
:param intervals: The calculated intervals
:return: None
### Response:
def calculated_intervals(self, intervals):
"""
Updates the calculated intervals in the database. Performs an upsert
:param intervals: The calculated intervals
:return: None
"""
logging.debug("set calculated intervals")
self.mongo_model.set_calculated_intervals(intervals)
self.save()
self._calculated_intervals = TimeIntervals(intervals) |
def query_recent(num=8, **kwargs):
'''
query recent posts.
'''
order_by_create = kwargs.get('order_by_create', False)
kind = kwargs.get('kind', None)
if order_by_create:
if kind:
recent_recs = TabPost.select().where(
(TabPost.kind == kind) & (TabPost.valid == 1)
).order_by(
TabPost.time_create.desc()
).limit(num)
else:
recent_recs = TabPost.select().where(
TabPost.valid == 1
).order_by(
TabPost.time_create.desc()
).limit(num)
else:
if kind:
recent_recs = TabPost.select().where(
(TabPost.kind == kind) & (TabPost.valid == 1)
).order_by(
TabPost.time_update.desc()
).limit(num)
else:
recent_recs = TabPost.select().where(
TabPost.valid == 1
).order_by(
TabPost.time_update.desc()
).limit(num)
return recent_recs | query recent posts. | Below is the the instruction that describes the task:
### Input:
query recent posts.
### Response:
def query_recent(num=8, **kwargs):
'''
query recent posts.
'''
order_by_create = kwargs.get('order_by_create', False)
kind = kwargs.get('kind', None)
if order_by_create:
if kind:
recent_recs = TabPost.select().where(
(TabPost.kind == kind) & (TabPost.valid == 1)
).order_by(
TabPost.time_create.desc()
).limit(num)
else:
recent_recs = TabPost.select().where(
TabPost.valid == 1
).order_by(
TabPost.time_create.desc()
).limit(num)
else:
if kind:
recent_recs = TabPost.select().where(
(TabPost.kind == kind) & (TabPost.valid == 1)
).order_by(
TabPost.time_update.desc()
).limit(num)
else:
recent_recs = TabPost.select().where(
TabPost.valid == 1
).order_by(
TabPost.time_update.desc()
).limit(num)
return recent_recs |
def internal_get_description(dbg, seq, thread_id, frame_id, expression):
''' Fetch the variable description stub from the debug console
'''
try:
frame = dbg.find_frame(thread_id, frame_id)
description = pydevd_console.get_description(frame, thread_id, frame_id, expression)
description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t'))
description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description
cmd = dbg.cmd_factory.make_get_description_message(seq, description_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(seq, "Error in fetching description" + exc)
dbg.writer.add_command(cmd) | Fetch the variable description stub from the debug console | Below is the the instruction that describes the task:
### Input:
Fetch the variable description stub from the debug console
### Response:
def internal_get_description(dbg, seq, thread_id, frame_id, expression):
''' Fetch the variable description stub from the debug console
'''
try:
frame = dbg.find_frame(thread_id, frame_id)
description = pydevd_console.get_description(frame, thread_id, frame_id, expression)
description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t'))
description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description
cmd = dbg.cmd_factory.make_get_description_message(seq, description_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(seq, "Error in fetching description" + exc)
dbg.writer.add_command(cmd) |
def edit_finished(self):
"""On clean exit, update tab name."""
# Hides editor
self.hide()
if isinstance(self.tab_index, int) and self.tab_index >= 0:
# We are editing a valid tab, update name
tab_text = to_text_string(self.text())
self.main.setTabText(self.tab_index, tab_text)
self.main.sig_change_name.emit(tab_text) | On clean exit, update tab name. | Below is the the instruction that describes the task:
### Input:
On clean exit, update tab name.
### Response:
def edit_finished(self):
"""On clean exit, update tab name."""
# Hides editor
self.hide()
if isinstance(self.tab_index, int) and self.tab_index >= 0:
# We are editing a valid tab, update name
tab_text = to_text_string(self.text())
self.main.setTabText(self.tab_index, tab_text)
self.main.sig_change_name.emit(tab_text) |
def process_class_docstrings(app, what, name, obj, options, lines):
"""
For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring.
"""
if what == "class":
joined = '\n'.join(lines)
templates = [
""".. rubric:: Attributes
.. autosummary::
:toctree:
None
""",
""".. rubric:: Methods
.. autosummary::
:toctree:
None
"""
]
for template in templates:
if template in joined:
joined = joined.replace(template, '')
lines[:] = joined.split('\n') | For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring. | Below is the the instruction that describes the task:
### Input:
For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring.
### Response:
def process_class_docstrings(app, what, name, obj, options, lines):
"""
For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring.
"""
if what == "class":
joined = '\n'.join(lines)
templates = [
""".. rubric:: Attributes
.. autosummary::
:toctree:
None
""",
""".. rubric:: Methods
.. autosummary::
:toctree:
None
"""
]
for template in templates:
if template in joined:
joined = joined.replace(template, '')
lines[:] = joined.split('\n') |
def _add_alpha(self, data, alpha=None):
"""Create an alpha channel and concatenate it to the provided data.
If ``data`` is an integer type then the alpha band will be scaled
to use the smallest (min) value as fully transparent and the largest
(max) value as fully opaque. For float types the alpha band spans
0 to 1.
"""
null_mask = alpha if alpha is not None else self._create_alpha(data)
# if we are using integer data, then alpha needs to be min-int to max-int
# otherwise for floats we want 0 to 1
if np.issubdtype(data.dtype, np.integer):
# xarray sometimes upcasts this calculation, so cast again
null_mask = self._scale_to_dtype(null_mask, data.dtype).astype(data.dtype)
data = xr.concat([data, null_mask], dim="bands")
return data | Create an alpha channel and concatenate it to the provided data.
If ``data`` is an integer type then the alpha band will be scaled
to use the smallest (min) value as fully transparent and the largest
(max) value as fully opaque. For float types the alpha band spans
0 to 1. | Below is the the instruction that describes the task:
### Input:
Create an alpha channel and concatenate it to the provided data.
If ``data`` is an integer type then the alpha band will be scaled
to use the smallest (min) value as fully transparent and the largest
(max) value as fully opaque. For float types the alpha band spans
0 to 1.
### Response:
def _add_alpha(self, data, alpha=None):
"""Create an alpha channel and concatenate it to the provided data.
If ``data`` is an integer type then the alpha band will be scaled
to use the smallest (min) value as fully transparent and the largest
(max) value as fully opaque. For float types the alpha band spans
0 to 1.
"""
null_mask = alpha if alpha is not None else self._create_alpha(data)
# if we are using integer data, then alpha needs to be min-int to max-int
# otherwise for floats we want 0 to 1
if np.issubdtype(data.dtype, np.integer):
# xarray sometimes upcasts this calculation, so cast again
null_mask = self._scale_to_dtype(null_mask, data.dtype).astype(data.dtype)
data = xr.concat([data, null_mask], dim="bands")
return data |
def guess_lexer_using_modeline(text):
"""Guess lexer for given text using Vim modeline.
Returns a tuple of (lexer, accuracy).
"""
lexer, accuracy = None, None
file_type = None
try:
file_type = get_filetype_from_buffer(text)
except: # pragma: nocover
log.traceback(logging.DEBUG)
if file_type is not None:
try:
lexer = get_lexer_by_name(file_type)
except ClassNotFound:
log.traceback(logging.DEBUG)
if lexer is not None:
try:
accuracy = lexer.analyse_text(text)
except: # pragma: nocover
log.traceback(logging.DEBUG)
return lexer, accuracy | Guess lexer for given text using Vim modeline.
Returns a tuple of (lexer, accuracy). | Below is the the instruction that describes the task:
### Input:
Guess lexer for given text using Vim modeline.
Returns a tuple of (lexer, accuracy).
### Response:
def guess_lexer_using_modeline(text):
"""Guess lexer for given text using Vim modeline.
Returns a tuple of (lexer, accuracy).
"""
lexer, accuracy = None, None
file_type = None
try:
file_type = get_filetype_from_buffer(text)
except: # pragma: nocover
log.traceback(logging.DEBUG)
if file_type is not None:
try:
lexer = get_lexer_by_name(file_type)
except ClassNotFound:
log.traceback(logging.DEBUG)
if lexer is not None:
try:
accuracy = lexer.analyse_text(text)
except: # pragma: nocover
log.traceback(logging.DEBUG)
return lexer, accuracy |
def get_paths(self, x1, y1, x2, y2, panel_params, coord, ax):
"""
Compute paths that create the arrow heads
Parameters
----------
x1, y1, x2, y2 : array_like
List of points that define the tails of the arrows.
The arrow heads will be at x1, y1. If you need them
at x2, y2 reverse the input.
Returns
-------
out : list of Path
Paths that create arrow heads
"""
Path = mpath.Path
# Create reusable lists of vertices and codes
# arrowhead path has 3 vertices (Nones),
# plus dummy vertex for the STOP code
verts = [None, None, None,
(0, 0)]
# codes list remains the same after initialization
codes = [Path.MOVETO, Path.LINETO, Path.LINETO,
Path.STOP]
# Slices into the vertices list
slc = slice(0, 3)
# We need the plot dimensions so that we can
# compute scaling factors
fig = ax.get_figure()
width, height = fig.get_size_inches()
ranges = coord.range(panel_params)
width_ = np.ptp(ranges.x)
height_ = np.ptp(ranges.y)
# scaling factors to prevent skewed arrowheads
lx = self.length * width_/width
ly = self.length * height_/height
# angle in radians
a = self.angle * np.pi / 180
# direction of arrow head
xdiff, ydiff = x2 - x1, y2 - y1
rotations = np.arctan2(ydiff/ly, xdiff/lx)
# Arrow head vertices
v1x = x1 + lx * np.cos(rotations + a)
v1y = y1 + ly * np.sin(rotations + a)
v2x = x1 + lx * np.cos(rotations - a)
v2y = y1 + ly * np.sin(rotations - a)
# create a path for each arrow head
paths = []
for t in zip(v1x, v1y, x1, y1, v2x, v2y):
verts[slc] = [t[:2], t[2:4], t[4:]]
paths.append(Path(verts, codes))
return paths | Compute paths that create the arrow heads
Parameters
----------
x1, y1, x2, y2 : array_like
List of points that define the tails of the arrows.
The arrow heads will be at x1, y1. If you need them
at x2, y2 reverse the input.
Returns
-------
out : list of Path
Paths that create arrow heads | Below is the the instruction that describes the task:
### Input:
Compute paths that create the arrow heads
Parameters
----------
x1, y1, x2, y2 : array_like
List of points that define the tails of the arrows.
The arrow heads will be at x1, y1. If you need them
at x2, y2 reverse the input.
Returns
-------
out : list of Path
Paths that create arrow heads
### Response:
def get_paths(self, x1, y1, x2, y2, panel_params, coord, ax):
"""
Compute paths that create the arrow heads
Parameters
----------
x1, y1, x2, y2 : array_like
List of points that define the tails of the arrows.
The arrow heads will be at x1, y1. If you need them
at x2, y2 reverse the input.
Returns
-------
out : list of Path
Paths that create arrow heads
"""
Path = mpath.Path
# Create reusable lists of vertices and codes
# arrowhead path has 3 vertices (Nones),
# plus dummy vertex for the STOP code
verts = [None, None, None,
(0, 0)]
# codes list remains the same after initialization
codes = [Path.MOVETO, Path.LINETO, Path.LINETO,
Path.STOP]
# Slices into the vertices list
slc = slice(0, 3)
# We need the plot dimensions so that we can
# compute scaling factors
fig = ax.get_figure()
width, height = fig.get_size_inches()
ranges = coord.range(panel_params)
width_ = np.ptp(ranges.x)
height_ = np.ptp(ranges.y)
# scaling factors to prevent skewed arrowheads
lx = self.length * width_/width
ly = self.length * height_/height
# angle in radians
a = self.angle * np.pi / 180
# direction of arrow head
xdiff, ydiff = x2 - x1, y2 - y1
rotations = np.arctan2(ydiff/ly, xdiff/lx)
# Arrow head vertices
v1x = x1 + lx * np.cos(rotations + a)
v1y = y1 + ly * np.sin(rotations + a)
v2x = x1 + lx * np.cos(rotations - a)
v2y = y1 + ly * np.sin(rotations - a)
# create a path for each arrow head
paths = []
for t in zip(v1x, v1y, x1, y1, v2x, v2y):
verts[slc] = [t[:2], t[2:4], t[4:]]
paths.append(Path(verts, codes))
return paths |
def update_license_file(data_dir):
"""Update NLPIR license file if it is out-of-date or missing.
:param str data_dir: The NLPIR data directory that houses the license.
:returns bool: Whether or not an update occurred.
"""
license_file = os.path.join(data_dir, LICENSE_FILENAME)
temp_dir = tempfile.mkdtemp()
gh_license_filename = os.path.join(temp_dir, LICENSE_FILENAME)
try:
_, headers = urlretrieve(LICENSE_URL, gh_license_filename)
except IOError as e:
# Python 2 uses the unhelpful IOError for this. Re-raise as the more
# appropriate URLError.
raise URLError(e.strerror)
with open(gh_license_filename, 'rb') as f:
github_license = f.read()
try:
with open(license_file, 'rb') as f:
current_license = f.read()
except (IOError, OSError):
current_license = b''
github_digest = hashlib.sha256(github_license).hexdigest()
current_digest = hashlib.sha256(current_license).hexdigest()
if github_digest == current_digest:
return False
shutil.copyfile(gh_license_filename, license_file)
shutil.rmtree(temp_dir, ignore_errors=True)
return True | Update NLPIR license file if it is out-of-date or missing.
:param str data_dir: The NLPIR data directory that houses the license.
:returns bool: Whether or not an update occurred. | Below is the the instruction that describes the task:
### Input:
Update NLPIR license file if it is out-of-date or missing.
:param str data_dir: The NLPIR data directory that houses the license.
:returns bool: Whether or not an update occurred.
### Response:
def update_license_file(data_dir):
"""Update NLPIR license file if it is out-of-date or missing.
:param str data_dir: The NLPIR data directory that houses the license.
:returns bool: Whether or not an update occurred.
"""
license_file = os.path.join(data_dir, LICENSE_FILENAME)
temp_dir = tempfile.mkdtemp()
gh_license_filename = os.path.join(temp_dir, LICENSE_FILENAME)
try:
_, headers = urlretrieve(LICENSE_URL, gh_license_filename)
except IOError as e:
# Python 2 uses the unhelpful IOError for this. Re-raise as the more
# appropriate URLError.
raise URLError(e.strerror)
with open(gh_license_filename, 'rb') as f:
github_license = f.read()
try:
with open(license_file, 'rb') as f:
current_license = f.read()
except (IOError, OSError):
current_license = b''
github_digest = hashlib.sha256(github_license).hexdigest()
current_digest = hashlib.sha256(current_license).hexdigest()
if github_digest == current_digest:
return False
shutil.copyfile(gh_license_filename, license_file)
shutil.rmtree(temp_dir, ignore_errors=True)
return True |
def split_into_segments(data):
"""Slices JPEG meta data into a list from JPEG binary data.
"""
if data[0:2] != b"\xff\xd8":
raise InvalidImageDataError("Given data isn't JPEG.")
head = 2
segments = [b"\xff\xd8"]
while 1:
if data[head: head + 2] == b"\xff\xda":
segments.append(data[head:])
break
else:
length = struct.unpack(">H", data[head + 2: head + 4])[0]
endPoint = head + length + 2
seg = data[head: endPoint]
segments.append(seg)
head = endPoint
if (head >= len(data)):
raise InvalidImageDataError("Wrong JPEG data.")
return segments | Slices JPEG meta data into a list from JPEG binary data. | Below is the the instruction that describes the task:
### Input:
Slices JPEG meta data into a list from JPEG binary data.
### Response:
def split_into_segments(data):
"""Slices JPEG meta data into a list from JPEG binary data.
"""
if data[0:2] != b"\xff\xd8":
raise InvalidImageDataError("Given data isn't JPEG.")
head = 2
segments = [b"\xff\xd8"]
while 1:
if data[head: head + 2] == b"\xff\xda":
segments.append(data[head:])
break
else:
length = struct.unpack(">H", data[head + 2: head + 4])[0]
endPoint = head + length + 2
seg = data[head: endPoint]
segments.append(seg)
head = endPoint
if (head >= len(data)):
raise InvalidImageDataError("Wrong JPEG data.")
return segments |
def get_acl(request):
"""Returns the ACL for the given content identified by ``uuid``."""
uuid_ = request.matchdict['uuid']
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT TRUE FROM document_controls WHERE uuid = %s""", (uuid_,))
try:
# Check that it exists
cursor.fetchone()[0]
except TypeError:
raise httpexceptions.HTTPNotFound()
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT uuid, user_id AS uid, permission
FROM document_acl AS acl
WHERE uuid = %s
ORDER BY user_id ASC, permission ASC
) as combined_rows""", (uuid_,))
acl = [r[0] for r in cursor.fetchall()]
return acl | Returns the ACL for the given content identified by ``uuid``. | Below is the the instruction that describes the task:
### Input:
Returns the ACL for the given content identified by ``uuid``.
### Response:
def get_acl(request):
"""Returns the ACL for the given content identified by ``uuid``."""
uuid_ = request.matchdict['uuid']
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT TRUE FROM document_controls WHERE uuid = %s""", (uuid_,))
try:
# Check that it exists
cursor.fetchone()[0]
except TypeError:
raise httpexceptions.HTTPNotFound()
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT uuid, user_id AS uid, permission
FROM document_acl AS acl
WHERE uuid = %s
ORDER BY user_id ASC, permission ASC
) as combined_rows""", (uuid_,))
acl = [r[0] for r in cursor.fetchall()]
return acl |
def rest_put(self, url, params=None, headers=None, auth=None, verify=True, cert=None):
"""
Perform a PUT request to url with optional authentication
"""
res = requests.put(url, params=params, headers=headers, auth=auth, verify=verify,
cert=cert)
return res.text, res.status_code | Perform a PUT request to url with optional authentication | Below is the the instruction that describes the task:
### Input:
Perform a PUT request to url with optional authentication
### Response:
def rest_put(self, url, params=None, headers=None, auth=None, verify=True, cert=None):
"""
Perform a PUT request to url with optional authentication
"""
res = requests.put(url, params=params, headers=headers, auth=auth, verify=verify,
cert=cert)
return res.text, res.status_code |
def weather_at_places(self, pattern, searchtype, limit=None):
"""
Queries the OWM Weather API for the currently observed weather in all the
locations whose name is matching the specified text search parameters.
A twofold search can be issued: *'accurate'* (exact matching) and
*'like'* (matches names that are similar to the supplied pattern).
:param pattern: the string pattern (not a regex) to be searched for the
toponym
:type pattern: str
:param searchtype: the search mode to be used, must be *'accurate'* for
an exact matching or *'like'* for a likelihood matching
:type: searchtype: str
:param limit: the maximum number of *Observation* items in the returned
list (default is ``None``, which stands for any number of items)
:param limit: int or ``None``
:returns: a list of *Observation* objects or ``None`` if no weather
data is available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* when bad value is supplied for the search
type or the maximum number of items retrieved
"""
assert isinstance(pattern, str), "'pattern' must be a str"
assert isinstance(searchtype, str), "'searchtype' must be a str"
if searchtype != "accurate" and searchtype != "like":
raise ValueError("'searchtype' value must be 'accurate' or 'like'")
if limit is not None:
assert isinstance(limit, int), "'limit' must be an int or None"
if limit < 1:
raise ValueError("'limit' must be None or greater than zero")
params = {'q': pattern, 'type': searchtype, 'lang': self._language}
if limit is not None:
# fix for OWM 2.5 API bug!
params['cnt'] = limit - 1
uri = http_client.HttpClient.to_url(FIND_OBSERVATIONS_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['observation_list'].parse_JSON(json_data) | Queries the OWM Weather API for the currently observed weather in all the
locations whose name is matching the specified text search parameters.
A twofold search can be issued: *'accurate'* (exact matching) and
*'like'* (matches names that are similar to the supplied pattern).
:param pattern: the string pattern (not a regex) to be searched for the
toponym
:type pattern: str
:param searchtype: the search mode to be used, must be *'accurate'* for
an exact matching or *'like'* for a likelihood matching
:type: searchtype: str
:param limit: the maximum number of *Observation* items in the returned
list (default is ``None``, which stands for any number of items)
:param limit: int or ``None``
:returns: a list of *Observation* objects or ``None`` if no weather
data is available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* when bad value is supplied for the search
type or the maximum number of items retrieved | Below is the the instruction that describes the task:
### Input:
Queries the OWM Weather API for the currently observed weather in all the
locations whose name is matching the specified text search parameters.
A twofold search can be issued: *'accurate'* (exact matching) and
*'like'* (matches names that are similar to the supplied pattern).
:param pattern: the string pattern (not a regex) to be searched for the
toponym
:type pattern: str
:param searchtype: the search mode to be used, must be *'accurate'* for
an exact matching or *'like'* for a likelihood matching
:type: searchtype: str
:param limit: the maximum number of *Observation* items in the returned
list (default is ``None``, which stands for any number of items)
:param limit: int or ``None``
:returns: a list of *Observation* objects or ``None`` if no weather
data is available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* when bad value is supplied for the search
type or the maximum number of items retrieved
### Response:
def weather_at_places(self, pattern, searchtype, limit=None):
"""
Queries the OWM Weather API for the currently observed weather in all the
locations whose name is matching the specified text search parameters.
A twofold search can be issued: *'accurate'* (exact matching) and
*'like'* (matches names that are similar to the supplied pattern).
:param pattern: the string pattern (not a regex) to be searched for the
toponym
:type pattern: str
:param searchtype: the search mode to be used, must be *'accurate'* for
an exact matching or *'like'* for a likelihood matching
:type: searchtype: str
:param limit: the maximum number of *Observation* items in the returned
list (default is ``None``, which stands for any number of items)
:param limit: int or ``None``
:returns: a list of *Observation* objects or ``None`` if no weather
data is available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* when bad value is supplied for the search
type or the maximum number of items retrieved
"""
assert isinstance(pattern, str), "'pattern' must be a str"
assert isinstance(searchtype, str), "'searchtype' must be a str"
if searchtype != "accurate" and searchtype != "like":
raise ValueError("'searchtype' value must be 'accurate' or 'like'")
if limit is not None:
assert isinstance(limit, int), "'limit' must be an int or None"
if limit < 1:
raise ValueError("'limit' must be None or greater than zero")
params = {'q': pattern, 'type': searchtype, 'lang': self._language}
if limit is not None:
# fix for OWM 2.5 API bug!
params['cnt'] = limit - 1
uri = http_client.HttpClient.to_url(FIND_OBSERVATIONS_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['observation_list'].parse_JSON(json_data) |
def _observe_block(self, change):
""" A change handler for the 'objects' list of the Include.
If the object is initialized objects which are removed will be
unparented and objects which are added will be reparented. Old
objects will be destroyed if the 'destroy_old' flag is True.
"""
if self.is_initialized:
if change['type'] == 'update':
old_block = change['oldvalue']
old_block.parent.remove_children(old_block,self.children)
new_block = change['value']
new_block.parent.insert_children(new_block, self.children) | A change handler for the 'objects' list of the Include.
If the object is initialized objects which are removed will be
unparented and objects which are added will be reparented. Old
objects will be destroyed if the 'destroy_old' flag is True. | Below is the the instruction that describes the task:
### Input:
A change handler for the 'objects' list of the Include.
If the object is initialized objects which are removed will be
unparented and objects which are added will be reparented. Old
objects will be destroyed if the 'destroy_old' flag is True.
### Response:
def _observe_block(self, change):
""" A change handler for the 'objects' list of the Include.
If the object is initialized objects which are removed will be
unparented and objects which are added will be reparented. Old
objects will be destroyed if the 'destroy_old' flag is True.
"""
if self.is_initialized:
if change['type'] == 'update':
old_block = change['oldvalue']
old_block.parent.remove_children(old_block,self.children)
new_block = change['value']
new_block.parent.insert_children(new_block, self.children) |
def update(self, status=values.unset, announce_url=values.unset,
announce_method=values.unset):
"""
Update the ConferenceInstance
:param ConferenceInstance.UpdateStatus status: The new status of the resource
:param unicode announce_url: The URL we should call to announce something into the conference
:param unicode announce_method: he HTTP method used to call announce_url
:returns: Updated ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
"""
data = values.of({'Status': status, 'AnnounceUrl': announce_url, 'AnnounceMethod': announce_method, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ConferenceInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
) | Update the ConferenceInstance
:param ConferenceInstance.UpdateStatus status: The new status of the resource
:param unicode announce_url: The URL we should call to announce something into the conference
:param unicode announce_method: he HTTP method used to call announce_url
:returns: Updated ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance | Below is the the instruction that describes the task:
### Input:
Update the ConferenceInstance
:param ConferenceInstance.UpdateStatus status: The new status of the resource
:param unicode announce_url: The URL we should call to announce something into the conference
:param unicode announce_method: he HTTP method used to call announce_url
:returns: Updated ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
### Response:
def update(self, status=values.unset, announce_url=values.unset,
announce_method=values.unset):
"""
Update the ConferenceInstance
:param ConferenceInstance.UpdateStatus status: The new status of the resource
:param unicode announce_url: The URL we should call to announce something into the conference
:param unicode announce_method: he HTTP method used to call announce_url
:returns: Updated ConferenceInstance
:rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
"""
data = values.of({'Status': status, 'AnnounceUrl': announce_url, 'AnnounceMethod': announce_method, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ConferenceInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
) |
def _memoize_function(f, name, cache_scope=_CS_FOREVER):
"""
Wraps a function for memoization and ties it's cache into the
Orca cacheing system.
Parameters
----------
f : function
name : str
Name of injectable.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
"""
cache = {}
@wraps(f)
def wrapper(*args, **kwargs):
try:
cache_key = (
args or None, frozenset(kwargs.items()) if kwargs else None)
in_cache = cache_key in cache
except TypeError:
raise TypeError(
'function arguments must be hashable for memoization')
if _CACHING and in_cache:
return cache[cache_key]
else:
result = f(*args, **kwargs)
cache[cache_key] = result
return result
wrapper.__wrapped__ = f
wrapper.cache = cache
wrapper.clear_cached = lambda: cache.clear()
_MEMOIZED[name] = CacheItem(name, wrapper, cache_scope)
return wrapper | Wraps a function for memoization and ties it's cache into the
Orca cacheing system.
Parameters
----------
f : function
name : str
Name of injectable.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline. | Below is the the instruction that describes the task:
### Input:
Wraps a function for memoization and ties it's cache into the
Orca cacheing system.
Parameters
----------
f : function
name : str
Name of injectable.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
### Response:
def _memoize_function(f, name, cache_scope=_CS_FOREVER):
"""
Wraps a function for memoization and ties it's cache into the
Orca cacheing system.
Parameters
----------
f : function
name : str
Name of injectable.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
"""
cache = {}
@wraps(f)
def wrapper(*args, **kwargs):
try:
cache_key = (
args or None, frozenset(kwargs.items()) if kwargs else None)
in_cache = cache_key in cache
except TypeError:
raise TypeError(
'function arguments must be hashable for memoization')
if _CACHING and in_cache:
return cache[cache_key]
else:
result = f(*args, **kwargs)
cache[cache_key] = result
return result
wrapper.__wrapped__ = f
wrapper.cache = cache
wrapper.clear_cached = lambda: cache.clear()
_MEMOIZED[name] = CacheItem(name, wrapper, cache_scope)
return wrapper |
def true_false_returns(func):
"""Executes function, if error returns False, else True
:param func: function to call
:return: True iff ok, else False
"""
@functools.wraps(func)
def _execute(*args, **kwargs):
"""Executes function, if error returns False, else True
:param args: args of function
:param kwargs: extra args of function
:param *args: args
:param **kwargs: extra args
:return: True iff ok, else False
"""
try:
func(*args, **kwargs)
return True
except:
return False
return _execute | Executes function, if error returns False, else True
:param func: function to call
:return: True iff ok, else False | Below is the the instruction that describes the task:
### Input:
Executes function, if error returns False, else True
:param func: function to call
:return: True iff ok, else False
### Response:
def true_false_returns(func):
"""Executes function, if error returns False, else True
:param func: function to call
:return: True iff ok, else False
"""
@functools.wraps(func)
def _execute(*args, **kwargs):
"""Executes function, if error returns False, else True
:param args: args of function
:param kwargs: extra args of function
:param *args: args
:param **kwargs: extra args
:return: True iff ok, else False
"""
try:
func(*args, **kwargs)
return True
except:
return False
return _execute |
def add_custom_aggregation(self, agg, name=None):
"""
Takes in an es_dsl Aggregation object and adds it to the aggregation dict.
Can be used to add custom aggregations such as moving averages
:param agg: aggregation to be added to the es_dsl search object
:param name: name of the aggregation object (optional)
:returns: self, which allows the method to be chainable with the other methods
"""
agg_name = name if name else 'custom_agg'
self.aggregations[agg_name] = agg
return self | Takes in an es_dsl Aggregation object and adds it to the aggregation dict.
Can be used to add custom aggregations such as moving averages
:param agg: aggregation to be added to the es_dsl search object
:param name: name of the aggregation object (optional)
:returns: self, which allows the method to be chainable with the other methods | Below is the the instruction that describes the task:
### Input:
Takes in an es_dsl Aggregation object and adds it to the aggregation dict.
Can be used to add custom aggregations such as moving averages
:param agg: aggregation to be added to the es_dsl search object
:param name: name of the aggregation object (optional)
:returns: self, which allows the method to be chainable with the other methods
### Response:
def add_custom_aggregation(self, agg, name=None):
"""
Takes in an es_dsl Aggregation object and adds it to the aggregation dict.
Can be used to add custom aggregations such as moving averages
:param agg: aggregation to be added to the es_dsl search object
:param name: name of the aggregation object (optional)
:returns: self, which allows the method to be chainable with the other methods
"""
agg_name = name if name else 'custom_agg'
self.aggregations[agg_name] = agg
return self |
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape)) | Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements. | Below is the the instruction that describes the task:
### Input:
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
### Response:
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape)) |
def collect_participants(bids_dir, participant_label=None, strict=False,
bids_validate=True):
"""
List the participants under the BIDS root and checks that participants
designated with the participant_label argument exist in that folder.
Returns the list of participants to be finally processed.
Requesting all subjects in a BIDS directory root:
>>> collect_participants(str(datadir / 'ds114'), bids_validate=False)
['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
Requesting two subjects, given their IDs:
>>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'],
... bids_validate=False)
['02', '04']
Requesting two subjects, given their IDs (works with 'sub-' prefixes):
>>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'],
... bids_validate=False)
['02', '04']
Requesting two subjects, but one does not exist:
>>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'],
... bids_validate=False)
['02']
>>> collect_participants(
... str(datadir / 'ds114'), participant_label=['02', '14'],
... strict=True, bids_validate=False) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
fmriprep.utils.bids.BIDSError:
...
"""
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
all_participants = set(layout.get_subjects())
# Error: bids_dir does not contain subjects
if not all_participants:
raise BIDSError(
'Could not find participants. Please make sure the BIDS data '
'structure is present and correct. Datasets can be validated online '
'using the BIDS Validator (http://bids-standard.github.io/bids-validator/).\n'
'If you are using Docker for Mac or Docker for Windows, you '
'may need to adjust your "File sharing" preferences.', bids_dir)
# No --participant-label was set, return all
if not participant_label:
return sorted(all_participants)
if isinstance(participant_label, str):
participant_label = [participant_label]
# Drop sub- prefixes
participant_label = [sub[4:] if sub.startswith('sub-') else sub for sub in participant_label]
# Remove duplicates
participant_label = sorted(set(participant_label))
# Remove labels not found
found_label = sorted(set(participant_label) & all_participants)
if not found_label:
raise BIDSError('Could not find participants [{}]'.format(
', '.join(participant_label)), bids_dir)
# Warn if some IDs were not found
notfound_label = sorted(set(participant_label) - all_participants)
if notfound_label:
exc = BIDSError('Some participants were not found: {}'.format(
', '.join(notfound_label)), bids_dir)
if strict:
raise exc
warnings.warn(exc.msg, BIDSWarning)
return found_label | List the participants under the BIDS root and checks that participants
designated with the participant_label argument exist in that folder.
Returns the list of participants to be finally processed.
Requesting all subjects in a BIDS directory root:
>>> collect_participants(str(datadir / 'ds114'), bids_validate=False)
['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
Requesting two subjects, given their IDs:
>>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'],
... bids_validate=False)
['02', '04']
Requesting two subjects, given their IDs (works with 'sub-' prefixes):
>>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'],
... bids_validate=False)
['02', '04']
Requesting two subjects, but one does not exist:
>>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'],
... bids_validate=False)
['02']
>>> collect_participants(
... str(datadir / 'ds114'), participant_label=['02', '14'],
... strict=True, bids_validate=False) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
fmriprep.utils.bids.BIDSError:
... | Below is the the instruction that describes the task:
### Input:
List the participants under the BIDS root and checks that participants
designated with the participant_label argument exist in that folder.
Returns the list of participants to be finally processed.
Requesting all subjects in a BIDS directory root:
>>> collect_participants(str(datadir / 'ds114'), bids_validate=False)
['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
Requesting two subjects, given their IDs:
>>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'],
... bids_validate=False)
['02', '04']
Requesting two subjects, given their IDs (works with 'sub-' prefixes):
>>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'],
... bids_validate=False)
['02', '04']
Requesting two subjects, but one does not exist:
>>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'],
... bids_validate=False)
['02']
>>> collect_participants(
... str(datadir / 'ds114'), participant_label=['02', '14'],
... strict=True, bids_validate=False) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
fmriprep.utils.bids.BIDSError:
...
### Response:
def collect_participants(bids_dir, participant_label=None, strict=False,
bids_validate=True):
"""
List the participants under the BIDS root and checks that participants
designated with the participant_label argument exist in that folder.
Returns the list of participants to be finally processed.
Requesting all subjects in a BIDS directory root:
>>> collect_participants(str(datadir / 'ds114'), bids_validate=False)
['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
Requesting two subjects, given their IDs:
>>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'],
... bids_validate=False)
['02', '04']
Requesting two subjects, given their IDs (works with 'sub-' prefixes):
>>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'],
... bids_validate=False)
['02', '04']
Requesting two subjects, but one does not exist:
>>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'],
... bids_validate=False)
['02']
>>> collect_participants(
... str(datadir / 'ds114'), participant_label=['02', '14'],
... strict=True, bids_validate=False) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
fmriprep.utils.bids.BIDSError:
...
"""
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
all_participants = set(layout.get_subjects())
# Error: bids_dir does not contain subjects
if not all_participants:
raise BIDSError(
'Could not find participants. Please make sure the BIDS data '
'structure is present and correct. Datasets can be validated online '
'using the BIDS Validator (http://bids-standard.github.io/bids-validator/).\n'
'If you are using Docker for Mac or Docker for Windows, you '
'may need to adjust your "File sharing" preferences.', bids_dir)
# No --participant-label was set, return all
if not participant_label:
return sorted(all_participants)
if isinstance(participant_label, str):
participant_label = [participant_label]
# Drop sub- prefixes
participant_label = [sub[4:] if sub.startswith('sub-') else sub for sub in participant_label]
# Remove duplicates
participant_label = sorted(set(participant_label))
# Remove labels not found
found_label = sorted(set(participant_label) & all_participants)
if not found_label:
raise BIDSError('Could not find participants [{}]'.format(
', '.join(participant_label)), bids_dir)
# Warn if some IDs were not found
notfound_label = sorted(set(participant_label) - all_participants)
if notfound_label:
exc = BIDSError('Some participants were not found: {}'.format(
', '.join(notfound_label)), bids_dir)
if strict:
raise exc
warnings.warn(exc.msg, BIDSWarning)
return found_label |
def _findMatchingBracket(self, bracket, qpart, block, columnIndex):
"""Find matching bracket for the bracket.
Return (block, columnIndex) or (None, None)
Raise _TimeoutException, if time is over
"""
if bracket in self._START_BRACKETS:
charsGenerator = self._iterateDocumentCharsForward(block, columnIndex + 1)
else:
charsGenerator = self._iterateDocumentCharsBackward(block, columnIndex)
depth = 1
oposite = self._OPOSITE_BRACKET[bracket]
for block, columnIndex, char in charsGenerator:
if qpart.isCode(block, columnIndex):
if char == oposite:
depth -= 1
if depth == 0:
return block, columnIndex
elif char == bracket:
depth += 1
else:
return None, None | Find matching bracket for the bracket.
Return (block, columnIndex) or (None, None)
Raise _TimeoutException, if time is over | Below is the the instruction that describes the task:
### Input:
Find matching bracket for the bracket.
Return (block, columnIndex) or (None, None)
Raise _TimeoutException, if time is over
### Response:
def _findMatchingBracket(self, bracket, qpart, block, columnIndex):
"""Find matching bracket for the bracket.
Return (block, columnIndex) or (None, None)
Raise _TimeoutException, if time is over
"""
if bracket in self._START_BRACKETS:
charsGenerator = self._iterateDocumentCharsForward(block, columnIndex + 1)
else:
charsGenerator = self._iterateDocumentCharsBackward(block, columnIndex)
depth = 1
oposite = self._OPOSITE_BRACKET[bracket]
for block, columnIndex, char in charsGenerator:
if qpart.isCode(block, columnIndex):
if char == oposite:
depth -= 1
if depth == 0:
return block, columnIndex
elif char == bracket:
depth += 1
else:
return None, None |
def merge(self, commit_message=github.GithubObject.NotSet, commit_title=github.GithubObject.NotSet, merge_method=github.GithubObject.NotSet, sha=github.GithubObject.NotSet):
"""
:calls: `PUT /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
:param commit_message: string
:rtype: :class:`github.PullRequestMergeStatus.PullRequestMergeStatus`
"""
assert commit_message is github.GithubObject.NotSet or isinstance(commit_message, (str, unicode)), commit_message
assert commit_title is github.GithubObject.NotSet or isinstance(commit_title, (str, unicode)), commit_title
assert merge_method is github.GithubObject.NotSet or isinstance(merge_method, (str, unicode)), merge_method
assert sha is github.GithubObject.NotSet or isinstance(sha, (str, unicode)), sha
post_parameters = dict()
if commit_message is not github.GithubObject.NotSet:
post_parameters["commit_message"] = commit_message
if commit_title is not github.GithubObject.NotSet:
post_parameters["commit_title"] = commit_title
if merge_method is not github.GithubObject.NotSet:
post_parameters["merge_method"] = merge_method
if sha is not github.GithubObject.NotSet:
post_parameters["sha"] = sha
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/merge",
input=post_parameters
)
return github.PullRequestMergeStatus.PullRequestMergeStatus(self._requester, headers, data, completed=True) | :calls: `PUT /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
:param commit_message: string
:rtype: :class:`github.PullRequestMergeStatus.PullRequestMergeStatus` | Below is the the instruction that describes the task:
### Input:
:calls: `PUT /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
:param commit_message: string
:rtype: :class:`github.PullRequestMergeStatus.PullRequestMergeStatus`
### Response:
def merge(self, commit_message=github.GithubObject.NotSet, commit_title=github.GithubObject.NotSet, merge_method=github.GithubObject.NotSet, sha=github.GithubObject.NotSet):
"""
:calls: `PUT /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
:param commit_message: string
:rtype: :class:`github.PullRequestMergeStatus.PullRequestMergeStatus`
"""
assert commit_message is github.GithubObject.NotSet or isinstance(commit_message, (str, unicode)), commit_message
assert commit_title is github.GithubObject.NotSet or isinstance(commit_title, (str, unicode)), commit_title
assert merge_method is github.GithubObject.NotSet or isinstance(merge_method, (str, unicode)), merge_method
assert sha is github.GithubObject.NotSet or isinstance(sha, (str, unicode)), sha
post_parameters = dict()
if commit_message is not github.GithubObject.NotSet:
post_parameters["commit_message"] = commit_message
if commit_title is not github.GithubObject.NotSet:
post_parameters["commit_title"] = commit_title
if merge_method is not github.GithubObject.NotSet:
post_parameters["merge_method"] = merge_method
if sha is not github.GithubObject.NotSet:
post_parameters["sha"] = sha
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/merge",
input=post_parameters
)
return github.PullRequestMergeStatus.PullRequestMergeStatus(self._requester, headers, data, completed=True) |
def is_LaTeX(flist,env,abspath):
"""Scan a file list to decide if it's TeX- or LaTeX-flavored."""
# We need to scan files that are included in case the
# \documentclass command is in them.
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print("is_LaTeX search path ",paths)
print("files to search :",flist)
# Now that we have the search path and file list, check each one
for f in flist:
if Verbose:
print(" checking for Latex source ",str(f))
content = f.get_text_contents()
if LaTeX_re.search(content):
if Verbose:
print("file %s is a LaTeX file" % str(f))
return 1
if Verbose:
print("file %s is not a LaTeX file" % str(f))
# now find included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print("files included by '%s': "%str(f),inc_files)
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
# search the included files
for src in inc_files:
srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
# make this a list since is_LaTeX takes a list.
fileList = [srcNode,]
if Verbose:
print("FindFile found ",srcNode)
if srcNode is not None:
file_test = is_LaTeX(fileList, env, abspath)
# return on first file that finds latex is needed.
if file_test:
return file_test
if Verbose:
print(" done scanning ",str(f))
return 0 | Scan a file list to decide if it's TeX- or LaTeX-flavored. | Below is the the instruction that describes the task:
### Input:
Scan a file list to decide if it's TeX- or LaTeX-flavored.
### Response:
def is_LaTeX(flist,env,abspath):
"""Scan a file list to decide if it's TeX- or LaTeX-flavored."""
# We need to scan files that are included in case the
# \documentclass command is in them.
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print("is_LaTeX search path ",paths)
print("files to search :",flist)
# Now that we have the search path and file list, check each one
for f in flist:
if Verbose:
print(" checking for Latex source ",str(f))
content = f.get_text_contents()
if LaTeX_re.search(content):
if Verbose:
print("file %s is a LaTeX file" % str(f))
return 1
if Verbose:
print("file %s is not a LaTeX file" % str(f))
# now find included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print("files included by '%s': "%str(f),inc_files)
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
# search the included files
for src in inc_files:
srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
# make this a list since is_LaTeX takes a list.
fileList = [srcNode,]
if Verbose:
print("FindFile found ",srcNode)
if srcNode is not None:
file_test = is_LaTeX(fileList, env, abspath)
# return on first file that finds latex is needed.
if file_test:
return file_test
if Verbose:
print(" done scanning ",str(f))
return 0 |
def parse_packets(self, binary_packets):
"""
Parses binary packets and return a list of parsed packets.
DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done
working with it.
"""
if not binary_packets:
raise ValueError("Must supply at least one packet")
parsed_packets = []
if not self._current_tshark:
self.eventloop.run_until_complete(self._get_tshark_process())
for binary_packet in binary_packets:
self._write_packet(binary_packet)
def callback(pkt):
parsed_packets.append(pkt)
if len(parsed_packets) == len(binary_packets):
raise StopCapture()
self.eventloop.run_until_complete(self._get_parsed_packet_from_tshark(callback))
return parsed_packets | Parses binary packets and return a list of parsed packets.
DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done
working with it. | Below is the the instruction that describes the task:
### Input:
Parses binary packets and return a list of parsed packets.
DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done
working with it.
### Response:
def parse_packets(self, binary_packets):
"""
Parses binary packets and return a list of parsed packets.
DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done
working with it.
"""
if not binary_packets:
raise ValueError("Must supply at least one packet")
parsed_packets = []
if not self._current_tshark:
self.eventloop.run_until_complete(self._get_tshark_process())
for binary_packet in binary_packets:
self._write_packet(binary_packet)
def callback(pkt):
parsed_packets.append(pkt)
if len(parsed_packets) == len(binary_packets):
raise StopCapture()
self.eventloop.run_until_complete(self._get_parsed_packet_from_tshark(callback))
return parsed_packets |
def _malloc(self, sim_size):
"""
Handler for any libc `malloc` SimProcedure call. If the heap has faithful support for `malloc`, it ought to be
implemented in a `malloc` function (as opposed to the `_malloc` function).
:param sim_size: the amount of memory (in bytes) to be allocated
"""
raise NotImplementedError("%s not implemented for %s" % (self._malloc.__func__.__name__,
self.__class__.__name__)) | Handler for any libc `malloc` SimProcedure call. If the heap has faithful support for `malloc`, it ought to be
implemented in a `malloc` function (as opposed to the `_malloc` function).
:param sim_size: the amount of memory (in bytes) to be allocated | Below is the the instruction that describes the task:
### Input:
Handler for any libc `malloc` SimProcedure call. If the heap has faithful support for `malloc`, it ought to be
implemented in a `malloc` function (as opposed to the `_malloc` function).
:param sim_size: the amount of memory (in bytes) to be allocated
### Response:
def _malloc(self, sim_size):
"""
Handler for any libc `malloc` SimProcedure call. If the heap has faithful support for `malloc`, it ought to be
implemented in a `malloc` function (as opposed to the `_malloc` function).
:param sim_size: the amount of memory (in bytes) to be allocated
"""
raise NotImplementedError("%s not implemented for %s" % (self._malloc.__func__.__name__,
self.__class__.__name__)) |
def _protobuf_value_type(value):
"""Returns the type of the google.protobuf.Value message as an api.DataType.
Returns None if the type of 'value' is not one of the types supported in
api_pb2.DataType.
Args:
value: google.protobuf.Value message.
"""
if value.HasField("number_value"):
return api_pb2.DATA_TYPE_FLOAT64
if value.HasField("string_value"):
return api_pb2.DATA_TYPE_STRING
if value.HasField("bool_value"):
return api_pb2.DATA_TYPE_BOOL
return None | Returns the type of the google.protobuf.Value message as an api.DataType.
Returns None if the type of 'value' is not one of the types supported in
api_pb2.DataType.
Args:
value: google.protobuf.Value message. | Below is the the instruction that describes the task:
### Input:
Returns the type of the google.protobuf.Value message as an api.DataType.
Returns None if the type of 'value' is not one of the types supported in
api_pb2.DataType.
Args:
value: google.protobuf.Value message.
### Response:
def _protobuf_value_type(value):
"""Returns the type of the google.protobuf.Value message as an api.DataType.
Returns None if the type of 'value' is not one of the types supported in
api_pb2.DataType.
Args:
value: google.protobuf.Value message.
"""
if value.HasField("number_value"):
return api_pb2.DATA_TYPE_FLOAT64
if value.HasField("string_value"):
return api_pb2.DATA_TYPE_STRING
if value.HasField("bool_value"):
return api_pb2.DATA_TYPE_BOOL
return None |
def patch(f):
'''Adds method f to the Dataset class'''
name = f.__name__
Dataset.__hidden__[name] = f
return f | Adds method f to the Dataset class | Below is the the instruction that describes the task:
### Input:
Adds method f to the Dataset class
### Response:
def patch(f):
'''Adds method f to the Dataset class'''
name = f.__name__
Dataset.__hidden__[name] = f
return f |
def load_models(context: mx.context.Context,
max_input_len: Optional[int],
beam_size: int,
batch_size: int,
model_folders: List[str],
checkpoints: Optional[List[int]] = None,
softmax_temperature: Optional[float] = None,
max_output_length_num_stds: int = C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
decoder_return_logit_inputs: bool = False,
cache_output_layer_w_b: bool = False,
forced_max_output_len: Optional[int] = None,
override_dtype: Optional[str] = None,
output_scores: bool = False,
sampling: bool = False) -> Tuple[List[InferenceModel],
List[vocab.Vocab],
vocab.Vocab]:
"""
Loads a list of models for inference.
:param context: MXNet context to bind modules to.
:param max_input_len: Maximum input length.
:param beam_size: Beam size.
:param batch_size: Batch size.
:param model_folders: List of model folders to load models from.
:param checkpoints: List of checkpoints to use for each model in model_folders. Use None to load best checkpoint.
:param softmax_temperature: Optional parameter to control steepness of softmax distribution.
:param max_output_length_num_stds: Number of standard deviations to add to mean target-source length ratio
to compute maximum output length.
:param decoder_return_logit_inputs: Model decoders return inputs to logit computation instead of softmax over target
vocabulary. Used when logits/softmax are handled separately.
:param cache_output_layer_w_b: Models cache weights and biases for logit computation as NumPy arrays (used with
restrict lexicon).
:param forced_max_output_len: An optional overwrite of the maximum output length.
:param override_dtype: Overrides dtype of encoder and decoder defined at training time to a different one.
:param output_scores: Whether the scores will be needed as outputs. If True, scores will be normalized, negative
log probabilities. If False, scores will be negative, raw logit activations if decoding with beam size 1
and a single model.
:param sampling: True if the model is sampling instead of doing normal topk().
:return: List of models, source vocabulary, target vocabulary, source factor vocabularies.
"""
logger.info("Loading %d model(s) from %s ...", len(model_folders), model_folders)
load_time_start = time.time()
models = [] # type: List[InferenceModel]
source_vocabs = [] # type: List[List[vocab.Vocab]]
target_vocabs = [] # type: List[vocab.Vocab]
if checkpoints is None:
checkpoints = [None] * len(model_folders)
else:
utils.check_condition(len(checkpoints) == len(model_folders), "Must provide checkpoints for each model")
skip_softmax = False
# performance tweak: skip softmax for a single model, decoding with beam size 1, when not sampling and no scores are required in output.
if len(model_folders) == 1 and beam_size == 1 and not output_scores and not sampling:
skip_softmax = True
logger.info("Enabled skipping softmax for a single model and greedy decoding.")
for model_folder, checkpoint in zip(model_folders, checkpoints):
model_source_vocabs = vocab.load_source_vocabs(model_folder)
model_target_vocab = vocab.load_target_vocab(model_folder)
source_vocabs.append(model_source_vocabs)
target_vocabs.append(model_target_vocab)
model_version = utils.load_version(os.path.join(model_folder, C.VERSION_NAME))
logger.info("Model version: %s", model_version)
utils.check_version(model_version)
model_config = model.SockeyeModel.load_config(os.path.join(model_folder, C.CONFIG_NAME))
logger.info("Disabling dropout layers for performance reasons")
model_config.disable_dropout()
if override_dtype is not None:
model_config.config_encoder.dtype = override_dtype
model_config.config_decoder.dtype = override_dtype
if override_dtype == C.DTYPE_FP16:
logger.warning('Experimental feature \'override_dtype=float16\' has been used. '
'This feature may be removed or change its behaviour in future. '
'DO NOT USE IT IN PRODUCTION!')
if checkpoint is None:
params_fname = os.path.join(model_folder, C.PARAMS_BEST_NAME)
else:
params_fname = os.path.join(model_folder, C.PARAMS_NAME % checkpoint)
inference_model = InferenceModel(config=model_config,
params_fname=params_fname,
context=context,
beam_size=beam_size,
softmax_temperature=softmax_temperature,
decoder_return_logit_inputs=decoder_return_logit_inputs,
cache_output_layer_w_b=cache_output_layer_w_b,
skip_softmax=skip_softmax)
utils.check_condition(inference_model.num_source_factors == len(model_source_vocabs),
"Number of loaded source vocabularies (%d) does not match "
"number of source factors for model '%s' (%d)" % (len(model_source_vocabs), model_folder,
inference_model.num_source_factors))
models.append(inference_model)
utils.check_condition(vocab.are_identical(*target_vocabs), "Target vocabulary ids do not match")
first_model_vocabs = source_vocabs[0]
for fi in range(len(first_model_vocabs)):
utils.check_condition(vocab.are_identical(*[source_vocabs[i][fi] for i in range(len(source_vocabs))]),
"Source vocabulary ids do not match. Factor %d" % fi)
source_with_eos = models[0].source_with_eos
utils.check_condition(all(source_with_eos == m.source_with_eos for m in models),
"All models must agree on using source-side EOS symbols or not. "
"Did you try combining models trained with different versions?")
# set a common max_output length for all models.
max_input_len, get_max_output_length = models_max_input_output_length(models,
max_output_length_num_stds,
max_input_len,
forced_max_output_len=forced_max_output_len)
for inference_model in models:
inference_model.initialize(batch_size, max_input_len, get_max_output_length)
load_time = time.time() - load_time_start
logger.info("%d model(s) loaded in %.4fs", len(models), load_time)
return models, source_vocabs[0], target_vocabs[0] | Loads a list of models for inference.
:param context: MXNet context to bind modules to.
:param max_input_len: Maximum input length.
:param beam_size: Beam size.
:param batch_size: Batch size.
:param model_folders: List of model folders to load models from.
:param checkpoints: List of checkpoints to use for each model in model_folders. Use None to load best checkpoint.
:param softmax_temperature: Optional parameter to control steepness of softmax distribution.
:param max_output_length_num_stds: Number of standard deviations to add to mean target-source length ratio
to compute maximum output length.
:param decoder_return_logit_inputs: Model decoders return inputs to logit computation instead of softmax over target
vocabulary. Used when logits/softmax are handled separately.
:param cache_output_layer_w_b: Models cache weights and biases for logit computation as NumPy arrays (used with
restrict lexicon).
:param forced_max_output_len: An optional overwrite of the maximum output length.
:param override_dtype: Overrides dtype of encoder and decoder defined at training time to a different one.
:param output_scores: Whether the scores will be needed as outputs. If True, scores will be normalized, negative
log probabilities. If False, scores will be negative, raw logit activations if decoding with beam size 1
and a single model.
:param sampling: True if the model is sampling instead of doing normal topk().
:return: List of models, source vocabulary, target vocabulary, source factor vocabularies. | Below is the the instruction that describes the task:
### Input:
Loads a list of models for inference.
:param context: MXNet context to bind modules to.
:param max_input_len: Maximum input length.
:param beam_size: Beam size.
:param batch_size: Batch size.
:param model_folders: List of model folders to load models from.
:param checkpoints: List of checkpoints to use for each model in model_folders. Use None to load best checkpoint.
:param softmax_temperature: Optional parameter to control steepness of softmax distribution.
:param max_output_length_num_stds: Number of standard deviations to add to mean target-source length ratio
to compute maximum output length.
:param decoder_return_logit_inputs: Model decoders return inputs to logit computation instead of softmax over target
vocabulary. Used when logits/softmax are handled separately.
:param cache_output_layer_w_b: Models cache weights and biases for logit computation as NumPy arrays (used with
restrict lexicon).
:param forced_max_output_len: An optional overwrite of the maximum output length.
:param override_dtype: Overrides dtype of encoder and decoder defined at training time to a different one.
:param output_scores: Whether the scores will be needed as outputs. If True, scores will be normalized, negative
log probabilities. If False, scores will be negative, raw logit activations if decoding with beam size 1
and a single model.
:param sampling: True if the model is sampling instead of doing normal topk().
:return: List of models, source vocabulary, target vocabulary, source factor vocabularies.
### Response:
def load_models(context: mx.context.Context,
max_input_len: Optional[int],
beam_size: int,
batch_size: int,
model_folders: List[str],
checkpoints: Optional[List[int]] = None,
softmax_temperature: Optional[float] = None,
max_output_length_num_stds: int = C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
decoder_return_logit_inputs: bool = False,
cache_output_layer_w_b: bool = False,
forced_max_output_len: Optional[int] = None,
override_dtype: Optional[str] = None,
output_scores: bool = False,
sampling: bool = False) -> Tuple[List[InferenceModel],
List[vocab.Vocab],
vocab.Vocab]:
"""
Loads a list of models for inference.
:param context: MXNet context to bind modules to.
:param max_input_len: Maximum input length.
:param beam_size: Beam size.
:param batch_size: Batch size.
:param model_folders: List of model folders to load models from.
:param checkpoints: List of checkpoints to use for each model in model_folders. Use None to load best checkpoint.
:param softmax_temperature: Optional parameter to control steepness of softmax distribution.
:param max_output_length_num_stds: Number of standard deviations to add to mean target-source length ratio
to compute maximum output length.
:param decoder_return_logit_inputs: Model decoders return inputs to logit computation instead of softmax over target
vocabulary. Used when logits/softmax are handled separately.
:param cache_output_layer_w_b: Models cache weights and biases for logit computation as NumPy arrays (used with
restrict lexicon).
:param forced_max_output_len: An optional overwrite of the maximum output length.
:param override_dtype: Overrides dtype of encoder and decoder defined at training time to a different one.
:param output_scores: Whether the scores will be needed as outputs. If True, scores will be normalized, negative
log probabilities. If False, scores will be negative, raw logit activations if decoding with beam size 1
and a single model.
:param sampling: True if the model is sampling instead of doing normal topk().
:return: List of models, source vocabulary, target vocabulary, source factor vocabularies.
"""
logger.info("Loading %d model(s) from %s ...", len(model_folders), model_folders)
load_time_start = time.time()
models = [] # type: List[InferenceModel]
source_vocabs = [] # type: List[List[vocab.Vocab]]
target_vocabs = [] # type: List[vocab.Vocab]
if checkpoints is None:
checkpoints = [None] * len(model_folders)
else:
utils.check_condition(len(checkpoints) == len(model_folders), "Must provide checkpoints for each model")
skip_softmax = False
# performance tweak: skip softmax for a single model, decoding with beam size 1, when not sampling and no scores are required in output.
if len(model_folders) == 1 and beam_size == 1 and not output_scores and not sampling:
skip_softmax = True
logger.info("Enabled skipping softmax for a single model and greedy decoding.")
for model_folder, checkpoint in zip(model_folders, checkpoints):
model_source_vocabs = vocab.load_source_vocabs(model_folder)
model_target_vocab = vocab.load_target_vocab(model_folder)
source_vocabs.append(model_source_vocabs)
target_vocabs.append(model_target_vocab)
model_version = utils.load_version(os.path.join(model_folder, C.VERSION_NAME))
logger.info("Model version: %s", model_version)
utils.check_version(model_version)
model_config = model.SockeyeModel.load_config(os.path.join(model_folder, C.CONFIG_NAME))
logger.info("Disabling dropout layers for performance reasons")
model_config.disable_dropout()
if override_dtype is not None:
model_config.config_encoder.dtype = override_dtype
model_config.config_decoder.dtype = override_dtype
if override_dtype == C.DTYPE_FP16:
logger.warning('Experimental feature \'override_dtype=float16\' has been used. '
'This feature may be removed or change its behaviour in future. '
'DO NOT USE IT IN PRODUCTION!')
if checkpoint is None:
params_fname = os.path.join(model_folder, C.PARAMS_BEST_NAME)
else:
params_fname = os.path.join(model_folder, C.PARAMS_NAME % checkpoint)
inference_model = InferenceModel(config=model_config,
params_fname=params_fname,
context=context,
beam_size=beam_size,
softmax_temperature=softmax_temperature,
decoder_return_logit_inputs=decoder_return_logit_inputs,
cache_output_layer_w_b=cache_output_layer_w_b,
skip_softmax=skip_softmax)
utils.check_condition(inference_model.num_source_factors == len(model_source_vocabs),
"Number of loaded source vocabularies (%d) does not match "
"number of source factors for model '%s' (%d)" % (len(model_source_vocabs), model_folder,
inference_model.num_source_factors))
models.append(inference_model)
utils.check_condition(vocab.are_identical(*target_vocabs), "Target vocabulary ids do not match")
first_model_vocabs = source_vocabs[0]
for fi in range(len(first_model_vocabs)):
utils.check_condition(vocab.are_identical(*[source_vocabs[i][fi] for i in range(len(source_vocabs))]),
"Source vocabulary ids do not match. Factor %d" % fi)
source_with_eos = models[0].source_with_eos
utils.check_condition(all(source_with_eos == m.source_with_eos for m in models),
"All models must agree on using source-side EOS symbols or not. "
"Did you try combining models trained with different versions?")
# set a common max_output length for all models.
max_input_len, get_max_output_length = models_max_input_output_length(models,
max_output_length_num_stds,
max_input_len,
forced_max_output_len=forced_max_output_len)
for inference_model in models:
inference_model.initialize(batch_size, max_input_len, get_max_output_length)
load_time = time.time() - load_time_start
logger.info("%d model(s) loaded in %.4fs", len(models), load_time)
return models, source_vocabs[0], target_vocabs[0] |
def calc_gamma_from_energy_autocorrelation_fit(self, GammaGuess=None, silent=False, MakeFig=True, show_fig=True):
"""
Calculates the total damping, i.e. Gamma, by calculating the energy each
point in time. This energy array is then used for the autocorrleation.
The autocorrelation is fitted with an exponential relaxation function and
the function returns the parameters with errors.
Parameters
----------
GammaGuess : float, optional
Inital guess for BigGamma (in radians)
silent : bool, optional
Whether it prints the values fitted or is silent.
MakeFig : bool, optional
Whether to construct and return the figure object showing
the fitting. defaults to True
show_fig : bool, optional
Whether to show the figure object when it has been created.
defaults to True
Returns
-------
Gamma : ufloat
Big Gamma, the total damping in radians
fig : matplotlib.figure.Figure object
The figure object created showing the autocorrelation
of the data with the fit
ax : matplotlib.axes.Axes object
The axes object created showing the autocorrelation
of the data with the fit
"""
autocorrelation = calc_autocorrelation(self.voltage[:-1]**2*self.OmegaTrap.n**2+(_np.diff(self.voltage)*self.SampleFreq)**2)
time = self.time.get_array()[:len(autocorrelation)]
if GammaGuess==None:
Gamma_Initial = (time[4]-time[0])/(autocorrelation[0]-autocorrelation[4])
else:
Gamma_Initial = GammaGuess
if MakeFig == True:
Params, ParamsErr, fig, ax = fit_autocorrelation(
autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig)
else:
Params, ParamsErr, _ , _ = fit_autocorrelation(
autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig)
if silent == False:
print("\n")
print(
"Big Gamma: {} +- {}% ".format(Params[0], ParamsErr[0] / Params[0] * 100))
Gamma = _uncertainties.ufloat(Params[0], ParamsErr[0])
if MakeFig == True:
return Gamma, fig, ax
else:
return Gamma, None, None | Calculates the total damping, i.e. Gamma, by calculating the energy each
point in time. This energy array is then used for the autocorrleation.
The autocorrelation is fitted with an exponential relaxation function and
the function returns the parameters with errors.
Parameters
----------
GammaGuess : float, optional
Inital guess for BigGamma (in radians)
silent : bool, optional
Whether it prints the values fitted or is silent.
MakeFig : bool, optional
Whether to construct and return the figure object showing
the fitting. defaults to True
show_fig : bool, optional
Whether to show the figure object when it has been created.
defaults to True
Returns
-------
Gamma : ufloat
Big Gamma, the total damping in radians
fig : matplotlib.figure.Figure object
The figure object created showing the autocorrelation
of the data with the fit
ax : matplotlib.axes.Axes object
The axes object created showing the autocorrelation
of the data with the fit | Below is the the instruction that describes the task:
### Input:
Calculates the total damping, i.e. Gamma, by calculating the energy each
point in time. This energy array is then used for the autocorrleation.
The autocorrelation is fitted with an exponential relaxation function and
the function returns the parameters with errors.
Parameters
----------
GammaGuess : float, optional
Inital guess for BigGamma (in radians)
silent : bool, optional
Whether it prints the values fitted or is silent.
MakeFig : bool, optional
Whether to construct and return the figure object showing
the fitting. defaults to True
show_fig : bool, optional
Whether to show the figure object when it has been created.
defaults to True
Returns
-------
Gamma : ufloat
Big Gamma, the total damping in radians
fig : matplotlib.figure.Figure object
The figure object created showing the autocorrelation
of the data with the fit
ax : matplotlib.axes.Axes object
The axes object created showing the autocorrelation
of the data with the fit
### Response:
def calc_gamma_from_energy_autocorrelation_fit(self, GammaGuess=None, silent=False, MakeFig=True, show_fig=True):
"""
Calculates the total damping, i.e. Gamma, by calculating the energy each
point in time. This energy array is then used for the autocorrleation.
The autocorrelation is fitted with an exponential relaxation function and
the function returns the parameters with errors.
Parameters
----------
GammaGuess : float, optional
Inital guess for BigGamma (in radians)
silent : bool, optional
Whether it prints the values fitted or is silent.
MakeFig : bool, optional
Whether to construct and return the figure object showing
the fitting. defaults to True
show_fig : bool, optional
Whether to show the figure object when it has been created.
defaults to True
Returns
-------
Gamma : ufloat
Big Gamma, the total damping in radians
fig : matplotlib.figure.Figure object
The figure object created showing the autocorrelation
of the data with the fit
ax : matplotlib.axes.Axes object
The axes object created showing the autocorrelation
of the data with the fit
"""
autocorrelation = calc_autocorrelation(self.voltage[:-1]**2*self.OmegaTrap.n**2+(_np.diff(self.voltage)*self.SampleFreq)**2)
time = self.time.get_array()[:len(autocorrelation)]
if GammaGuess==None:
Gamma_Initial = (time[4]-time[0])/(autocorrelation[0]-autocorrelation[4])
else:
Gamma_Initial = GammaGuess
if MakeFig == True:
Params, ParamsErr, fig, ax = fit_autocorrelation(
autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig)
else:
Params, ParamsErr, _ , _ = fit_autocorrelation(
autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig)
if silent == False:
print("\n")
print(
"Big Gamma: {} +- {}% ".format(Params[0], ParamsErr[0] / Params[0] * 100))
Gamma = _uncertainties.ufloat(Params[0], ParamsErr[0])
if MakeFig == True:
return Gamma, fig, ax
else:
return Gamma, None, None |
def _check_worktree_support(failhard=True):
'''
Ensure that we don't try to operate on worktrees in git < 2.5.0.
'''
git_version = version(versioninfo=False)
if _LooseVersion(git_version) < _LooseVersion('2.5.0'):
if failhard:
raise CommandExecutionError(
'Worktrees are only supported in git 2.5.0 and newer '
'(detected git version: ' + git_version + ')'
)
return False
return True | Ensure that we don't try to operate on worktrees in git < 2.5.0. | Below is the the instruction that describes the task:
### Input:
Ensure that we don't try to operate on worktrees in git < 2.5.0.
### Response:
def _check_worktree_support(failhard=True):
'''
Ensure that we don't try to operate on worktrees in git < 2.5.0.
'''
git_version = version(versioninfo=False)
if _LooseVersion(git_version) < _LooseVersion('2.5.0'):
if failhard:
raise CommandExecutionError(
'Worktrees are only supported in git 2.5.0 and newer '
'(detected git version: ' + git_version + ')'
)
return False
return True |
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Passes the result to SigOpt unless early terminated or errored.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial.
"""
if result:
self.conn.experiments(self.experiment.id).observations().create(
suggestion=self._live_trial_mapping[trial_id].id,
value=result[self._reward_attr],
)
# Update the experiment object
self.experiment = self.conn.experiments(self.experiment.id).fetch()
elif error or early_terminated:
# Reports a failed Observation
self.conn.experiments(self.experiment.id).observations().create(
failed=True, suggestion=self._live_trial_mapping[trial_id].id)
del self._live_trial_mapping[trial_id] | Passes the result to SigOpt unless early terminated or errored.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial. | Below is the the instruction that describes the task:
### Input:
Passes the result to SigOpt unless early terminated or errored.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial.
### Response:
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Passes the result to SigOpt unless early terminated or errored.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial.
"""
if result:
self.conn.experiments(self.experiment.id).observations().create(
suggestion=self._live_trial_mapping[trial_id].id,
value=result[self._reward_attr],
)
# Update the experiment object
self.experiment = self.conn.experiments(self.experiment.id).fetch()
elif error or early_terminated:
# Reports a failed Observation
self.conn.experiments(self.experiment.id).observations().create(
failed=True, suggestion=self._live_trial_mapping[trial_id].id)
del self._live_trial_mapping[trial_id] |
def sixlowpan_fragment(packet, datagram_tag=1):
"""Split a packet into different links to transmit as 6lowpan packets.
Usage example:
>>> ipv6 = ..... (very big packet)
>>> pkts = sixlowpan_fragment(ipv6, datagram_tag=0x17)
>>> send = [Dot15d4()/Dot15d4Data()/x for x in pkts]
>>> wireshark(send)
"""
if not packet.haslayer(IPv6):
raise Exception("SixLoWPAN only fragments IPv6 packets !")
str_packet = raw(packet[IPv6])
if len(str_packet) <= MAX_SIZE:
return [packet]
def chunks(l, n):
return [l[i:i + n] for i in range(0, len(l), n)]
new_packet = chunks(str_packet, MAX_SIZE)
new_packet[0] = LoWPANFragmentationFirst(datagramTag=datagram_tag, datagramSize=len(str_packet)) / new_packet[0] # noqa: E501
i = 1
while i < len(new_packet):
new_packet[i] = LoWPANFragmentationSubsequent(datagramTag=datagram_tag, datagramSize=len(str_packet), datagramOffset=MAX_SIZE // 8 * i) / new_packet[i] # noqa: E501
i += 1
return new_packet | Split a packet into different links to transmit as 6lowpan packets.
Usage example:
>>> ipv6 = ..... (very big packet)
>>> pkts = sixlowpan_fragment(ipv6, datagram_tag=0x17)
>>> send = [Dot15d4()/Dot15d4Data()/x for x in pkts]
>>> wireshark(send) | Below is the the instruction that describes the task:
### Input:
Split a packet into different links to transmit as 6lowpan packets.
Usage example:
>>> ipv6 = ..... (very big packet)
>>> pkts = sixlowpan_fragment(ipv6, datagram_tag=0x17)
>>> send = [Dot15d4()/Dot15d4Data()/x for x in pkts]
>>> wireshark(send)
### Response:
def sixlowpan_fragment(packet, datagram_tag=1):
"""Split a packet into different links to transmit as 6lowpan packets.
Usage example:
>>> ipv6 = ..... (very big packet)
>>> pkts = sixlowpan_fragment(ipv6, datagram_tag=0x17)
>>> send = [Dot15d4()/Dot15d4Data()/x for x in pkts]
>>> wireshark(send)
"""
if not packet.haslayer(IPv6):
raise Exception("SixLoWPAN only fragments IPv6 packets !")
str_packet = raw(packet[IPv6])
if len(str_packet) <= MAX_SIZE:
return [packet]
def chunks(l, n):
return [l[i:i + n] for i in range(0, len(l), n)]
new_packet = chunks(str_packet, MAX_SIZE)
new_packet[0] = LoWPANFragmentationFirst(datagramTag=datagram_tag, datagramSize=len(str_packet)) / new_packet[0] # noqa: E501
i = 1
while i < len(new_packet):
new_packet[i] = LoWPANFragmentationSubsequent(datagramTag=datagram_tag, datagramSize=len(str_packet), datagramOffset=MAX_SIZE // 8 * i) / new_packet[i] # noqa: E501
i += 1
return new_packet |
def set_similarity_limit(self, value):
''' setter '''
if isinstance(value, float) is False:
raise TypeError("__similarity_limit must be float.")
self.__similarity_limit = value | setter | Below is the the instruction that describes the task:
### Input:
setter
### Response:
def set_similarity_limit(self, value):
''' setter '''
if isinstance(value, float) is False:
raise TypeError("__similarity_limit must be float.")
self.__similarity_limit = value |
def reset_all(self):
"""
Resets all parameters to None
"""
for item in self.inputs:
setattr(self, "_%s" % item, None)
self.stack = [] | Resets all parameters to None | Below is the the instruction that describes the task:
### Input:
Resets all parameters to None
### Response:
def reset_all(self):
"""
Resets all parameters to None
"""
for item in self.inputs:
setattr(self, "_%s" % item, None)
self.stack = [] |
def merge_pairs(data, two_files, merged_out, revcomp, merge):
"""
Merge PE reads. Takes in a list of unmerged files [r1, r2] and the
filehandle to write merged data to, and it returns the number of reads
that were merged (overlapping). If merge==0 then only concat pairs (nnnn),
no merging in vsearch.
Parameters
-----------
two_files (tuple):
A list or tuple of the [r1, r2] files to be merged.
merged_out (str):
A string file handle for the merged data to be written to.
revcomp (bool):
Whether or not to revcomp the R2s.
merge (bool):
Whether or not to perform vsearch merging. If not then reads are simply
concatenated with a 'nnnn' separator.
Returns
--------
If merge is on then the func will return the number of pairs
successfully merged, else it returns -1.
"""
LOGGER.debug("Entering merge_pairs()")
## Return the number of merged pairs
nmerged = -1
## Check input files from inside list-tuple [(r1, r2)]
for fhandle in two_files[0]:
if not os.path.exists(fhandle):
raise IPyradWarningExit("""
Attempting to merge a file that doesn't exist - {}""".format(fhandle))
## If it already exists, clean up the old merged file
if os.path.exists(merged_out):
os.remove(merged_out)
## if merge then catch nonmerged in a separate file
if merge:
nonmerged1 = tempfile.NamedTemporaryFile(mode='wb',
dir=data.dirs.edits,
suffix="_nonmerged_R1_.fastq").name
nonmerged2 = tempfile.NamedTemporaryFile(mode='wb',
dir=data.dirs.edits,
suffix="_nonmerged_R2_.fastq").name
## if not merging then the nonmerged reads will come from the normal edits
else:
nonmerged1 = two_files[0][0]
nonmerged2 = two_files[0][1]
## get the maxn and minlen values
try:
maxn = sum(data.paramsdict['max_low_qual_bases'])
except TypeError:
maxn = data.paramsdict['max_low_qual_bases']
minlen = str(max(32, data.paramsdict["filter_min_trim_len"]))
## we need to gunzip the files if they are zipped (at least for now)
if merge and two_files[0][0].endswith(".gz"):
LOGGER.info("gunzipping pairs")
tmp1 = os.path.splitext(two_files[0][0])[0]+".tmp1"
tmp2 = os.path.splitext(two_files[0][1])[0]+".tmp2"
out1 = open(tmp1, 'w')
out2 = open(tmp2, 'w')
gun1 = sps.Popen(["gunzip", "-c", two_files[0][0]],
stderr=sps.STDOUT, stdout=out1, close_fds=True)
gun2 = sps.Popen(["gunzip", "-c", two_files[0][1]],
stderr=sps.STDOUT, stdout=out2, close_fds=True)
_ = gun1.communicate()
_ = gun2.communicate()
out1.close()
out2.close()
else:
tmp1 = two_files[0][0]
tmp2 = two_files[0][1]
try:
## If we are actually mergeing and not just joining then do vsearch
if merge:
## create tmp files with high quality scores and with R2 oriented
cmd = [ipyrad.bins.vsearch,
"--fastq_mergepairs", tmp1,
"--reverse", tmp2,
"--fastqout", merged_out,
"--fastqout_notmerged_fwd", nonmerged1,
"--fastqout_notmerged_rev", nonmerged2,
"--fasta_width", "0",
"--fastq_minmergelen", minlen,
"--fastq_maxns", str(maxn),
"--fastq_minovlen", "20",
"--fastq_maxdiffs", "4",
"--label_suffix", "_m1",
"--fastq_qmax", "1000",
"--threads", "2",
"--fastq_allowmergestagger"]
LOGGER.debug("merge cmd: %s", " ".join(cmd))
proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
try:
res = proc.communicate()[0]
except KeyboardInterrupt:
proc.kill()
if proc.returncode:
LOGGER.error("Error: %s %s", cmd, res)
## remove temp files
rmfiles = [os.path.splitext(two_files[0][0])[0]+".tmp1",
os.path.splitext(two_files[0][1])[0]+".tmp2",
nonmerged1, nonmerged2]
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile)
raise IPyradWarningExit("Error merge pairs:\n %s\n%s", cmd, res)
## record how many read pairs were merged
with open(merged_out, 'r') as tmpf:
#nmerged = len(tmpf.readlines()) // 4
nmerged = sum(1 for i in tmpf.readlines()) // 4
## Combine the unmerged pairs and append to the merge file
with open(merged_out, 'ab') as combout:
## read in paired end read files 4 lines at a time
if nonmerged1.endswith(".gz"):
fr1 = gzip.open(nonmerged1, 'rb')
else:
fr1 = open(nonmerged1, 'rb')
quart1 = itertools.izip(*[iter(fr1)]*4)
if nonmerged2.endswith(".gz"):
fr2 = gzip.open(nonmerged2, 'rb')
else:
fr2 = open(nonmerged2, 'rb')
quart2 = itertools.izip(*[iter(fr2)]*4)
quarts = itertools.izip(quart1, quart2)
## a list to store until writing
writing = []
counts = 0
## iterate until done
while 1:
try:
read1s, read2s = quarts.next()
except StopIteration:
break
if revcomp:
writing.append("".join([
read1s[0],
read1s[1].strip() + "nnnn" + \
comp(read2s[1].strip()[::-1]) + "\n",
read1s[2],
read1s[3].strip() + "nnnn" + \
read2s[3].strip()[::-1] + "\n",
]))
else:
writing.append("".join([
read1s[0],
read1s[1].strip() + "nnnn" + \
read2s[1],
read1s[2],
read1s[3].strip() + "nnnn" + \
read2s[3],
]))
counts += 1
if not counts % 10:
combout.write("".join(writing)) #+"\n")
writing = []
if writing:
combout.write("".join(writing))
## close handles
fr1.close()
fr2.close()
combout.close()
except Exception as inst:
LOGGER.error("Exception in merge_pairs - {}".format(inst))
raise
## No matter what happens please clean up the temp files.
finally:
## if merged then delete the nonmerge tmp files
if merge:
## remove temp files
rmfiles = [nonmerged1, nonmerged2,
os.path.splitext(two_files[0][0])[0]+".tmp1",
os.path.splitext(two_files[0][1])[0]+".tmp2"]
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile)
return nmerged | Merge PE reads. Takes in a list of unmerged files [r1, r2] and the
filehandle to write merged data to, and it returns the number of reads
that were merged (overlapping). If merge==0 then only concat pairs (nnnn),
no merging in vsearch.
Parameters
-----------
two_files (tuple):
A list or tuple of the [r1, r2] files to be merged.
merged_out (str):
A string file handle for the merged data to be written to.
revcomp (bool):
Whether or not to revcomp the R2s.
merge (bool):
Whether or not to perform vsearch merging. If not then reads are simply
concatenated with a 'nnnn' separator.
Returns
--------
If merge is on then the func will return the number of pairs
successfully merged, else it returns -1. | Below is the the instruction that describes the task:
### Input:
Merge PE reads. Takes in a list of unmerged files [r1, r2] and the
filehandle to write merged data to, and it returns the number of reads
that were merged (overlapping). If merge==0 then only concat pairs (nnnn),
no merging in vsearch.
Parameters
-----------
two_files (tuple):
A list or tuple of the [r1, r2] files to be merged.
merged_out (str):
A string file handle for the merged data to be written to.
revcomp (bool):
Whether or not to revcomp the R2s.
merge (bool):
Whether or not to perform vsearch merging. If not then reads are simply
concatenated with a 'nnnn' separator.
Returns
--------
If merge is on then the func will return the number of pairs
successfully merged, else it returns -1.
### Response:
def merge_pairs(data, two_files, merged_out, revcomp, merge):
"""
Merge PE reads. Takes in a list of unmerged files [r1, r2] and the
filehandle to write merged data to, and it returns the number of reads
that were merged (overlapping). If merge==0 then only concat pairs (nnnn),
no merging in vsearch.
Parameters
-----------
two_files (tuple):
A list or tuple of the [r1, r2] files to be merged.
merged_out (str):
A string file handle for the merged data to be written to.
revcomp (bool):
Whether or not to revcomp the R2s.
merge (bool):
Whether or not to perform vsearch merging. If not then reads are simply
concatenated with a 'nnnn' separator.
Returns
--------
If merge is on then the func will return the number of pairs
successfully merged, else it returns -1.
"""
LOGGER.debug("Entering merge_pairs()")
## Return the number of merged pairs
nmerged = -1
## Check input files from inside list-tuple [(r1, r2)]
for fhandle in two_files[0]:
if not os.path.exists(fhandle):
raise IPyradWarningExit("""
Attempting to merge a file that doesn't exist - {}""".format(fhandle))
## If it already exists, clean up the old merged file
if os.path.exists(merged_out):
os.remove(merged_out)
## if merge then catch nonmerged in a separate file
if merge:
nonmerged1 = tempfile.NamedTemporaryFile(mode='wb',
dir=data.dirs.edits,
suffix="_nonmerged_R1_.fastq").name
nonmerged2 = tempfile.NamedTemporaryFile(mode='wb',
dir=data.dirs.edits,
suffix="_nonmerged_R2_.fastq").name
## if not merging then the nonmerged reads will come from the normal edits
else:
nonmerged1 = two_files[0][0]
nonmerged2 = two_files[0][1]
## get the maxn and minlen values
try:
maxn = sum(data.paramsdict['max_low_qual_bases'])
except TypeError:
maxn = data.paramsdict['max_low_qual_bases']
minlen = str(max(32, data.paramsdict["filter_min_trim_len"]))
## we need to gunzip the files if they are zipped (at least for now)
if merge and two_files[0][0].endswith(".gz"):
LOGGER.info("gunzipping pairs")
tmp1 = os.path.splitext(two_files[0][0])[0]+".tmp1"
tmp2 = os.path.splitext(two_files[0][1])[0]+".tmp2"
out1 = open(tmp1, 'w')
out2 = open(tmp2, 'w')
gun1 = sps.Popen(["gunzip", "-c", two_files[0][0]],
stderr=sps.STDOUT, stdout=out1, close_fds=True)
gun2 = sps.Popen(["gunzip", "-c", two_files[0][1]],
stderr=sps.STDOUT, stdout=out2, close_fds=True)
_ = gun1.communicate()
_ = gun2.communicate()
out1.close()
out2.close()
else:
tmp1 = two_files[0][0]
tmp2 = two_files[0][1]
try:
## If we are actually mergeing and not just joining then do vsearch
if merge:
## create tmp files with high quality scores and with R2 oriented
cmd = [ipyrad.bins.vsearch,
"--fastq_mergepairs", tmp1,
"--reverse", tmp2,
"--fastqout", merged_out,
"--fastqout_notmerged_fwd", nonmerged1,
"--fastqout_notmerged_rev", nonmerged2,
"--fasta_width", "0",
"--fastq_minmergelen", minlen,
"--fastq_maxns", str(maxn),
"--fastq_minovlen", "20",
"--fastq_maxdiffs", "4",
"--label_suffix", "_m1",
"--fastq_qmax", "1000",
"--threads", "2",
"--fastq_allowmergestagger"]
LOGGER.debug("merge cmd: %s", " ".join(cmd))
proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
try:
res = proc.communicate()[0]
except KeyboardInterrupt:
proc.kill()
if proc.returncode:
LOGGER.error("Error: %s %s", cmd, res)
## remove temp files
rmfiles = [os.path.splitext(two_files[0][0])[0]+".tmp1",
os.path.splitext(two_files[0][1])[0]+".tmp2",
nonmerged1, nonmerged2]
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile)
raise IPyradWarningExit("Error merge pairs:\n %s\n%s", cmd, res)
## record how many read pairs were merged
with open(merged_out, 'r') as tmpf:
#nmerged = len(tmpf.readlines()) // 4
nmerged = sum(1 for i in tmpf.readlines()) // 4
## Combine the unmerged pairs and append to the merge file
with open(merged_out, 'ab') as combout:
## read in paired end read files 4 lines at a time
if nonmerged1.endswith(".gz"):
fr1 = gzip.open(nonmerged1, 'rb')
else:
fr1 = open(nonmerged1, 'rb')
quart1 = itertools.izip(*[iter(fr1)]*4)
if nonmerged2.endswith(".gz"):
fr2 = gzip.open(nonmerged2, 'rb')
else:
fr2 = open(nonmerged2, 'rb')
quart2 = itertools.izip(*[iter(fr2)]*4)
quarts = itertools.izip(quart1, quart2)
## a list to store until writing
writing = []
counts = 0
## iterate until done
while 1:
try:
read1s, read2s = quarts.next()
except StopIteration:
break
if revcomp:
writing.append("".join([
read1s[0],
read1s[1].strip() + "nnnn" + \
comp(read2s[1].strip()[::-1]) + "\n",
read1s[2],
read1s[3].strip() + "nnnn" + \
read2s[3].strip()[::-1] + "\n",
]))
else:
writing.append("".join([
read1s[0],
read1s[1].strip() + "nnnn" + \
read2s[1],
read1s[2],
read1s[3].strip() + "nnnn" + \
read2s[3],
]))
counts += 1
if not counts % 10:
combout.write("".join(writing)) #+"\n")
writing = []
if writing:
combout.write("".join(writing))
## close handles
fr1.close()
fr2.close()
combout.close()
except Exception as inst:
LOGGER.error("Exception in merge_pairs - {}".format(inst))
raise
## No matter what happens please clean up the temp files.
finally:
## if merged then delete the nonmerge tmp files
if merge:
## remove temp files
rmfiles = [nonmerged1, nonmerged2,
os.path.splitext(two_files[0][0])[0]+".tmp1",
os.path.splitext(two_files[0][1])[0]+".tmp2"]
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile)
return nmerged |
def one_hot2indices(one_hots):
"""
Convert an iterable of one-hot encoded targets to a list of indices.
Parameters
----------
one_hot : list
Returns
-------
indices : list
Examples
--------
>>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
[0, 1, 2]
>>> one_hot2indices([[1, 0], [1, 0], [0, 1]])
[0, 0, 1]
"""
indices = []
for one_hot in one_hots:
indices.append(argmax(one_hot))
return indices | Convert an iterable of one-hot encoded targets to a list of indices.
Parameters
----------
one_hot : list
Returns
-------
indices : list
Examples
--------
>>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
[0, 1, 2]
>>> one_hot2indices([[1, 0], [1, 0], [0, 1]])
[0, 0, 1] | Below is the the instruction that describes the task:
### Input:
Convert an iterable of one-hot encoded targets to a list of indices.
Parameters
----------
one_hot : list
Returns
-------
indices : list
Examples
--------
>>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
[0, 1, 2]
>>> one_hot2indices([[1, 0], [1, 0], [0, 1]])
[0, 0, 1]
### Response:
def one_hot2indices(one_hots):
"""
Convert an iterable of one-hot encoded targets to a list of indices.
Parameters
----------
one_hot : list
Returns
-------
indices : list
Examples
--------
>>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
[0, 1, 2]
>>> one_hot2indices([[1, 0], [1, 0], [0, 1]])
[0, 0, 1]
"""
indices = []
for one_hot in one_hots:
indices.append(argmax(one_hot))
return indices |
async def close(self):
"""
Close this request, send all data. You can still run other operations in the handler.
"""
if not self._sendHeaders:
self._startResponse()
if self.inputstream is not None:
self.inputstream.close(self.connection.scheduler)
if self.outputstream is not None:
await self.flush(True)
if hasattr(self, 'session') and self.session:
self.session.unlock() | Close this request, send all data. You can still run other operations in the handler. | Below is the the instruction that describes the task:
### Input:
Close this request, send all data. You can still run other operations in the handler.
### Response:
async def close(self):
"""
Close this request, send all data. You can still run other operations in the handler.
"""
if not self._sendHeaders:
self._startResponse()
if self.inputstream is not None:
self.inputstream.close(self.connection.scheduler)
if self.outputstream is not None:
await self.flush(True)
if hasattr(self, 'session') and self.session:
self.session.unlock() |
def load_config(data, *models, **kwargs):
'''
Generate and load the config on the device using the OpenConfig or IETF
models and device profiles.
data
Dictionary structured with respect to the models referenced.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.load_config {} models.openconfig_interfaces test=True debug=True
Output Example:
.. code-block:: jinja
device1:
----------
already_configured:
False
comment:
diff:
[edit interfaces ge-0/0/0]
- mtu 1400;
[edit interfaces ge-0/0/0 unit 0 family inet]
- dhcp;
[edit interfaces lo0]
- unit 0 {
- description lo0.0;
- }
+ unit 1 {
+ description "new loopback";
+ }
loaded_config:
<configuration>
<interfaces replace="replace">
<interface>
<name>ge-0/0/0</name>
<unit>
<name>0</name>
<family>
<inet/>
</family>
<description>ge-0/0/0.0</description>
</unit>
<description>management interface</description>
</interface>
<interface>
<name>ge-0/0/1</name>
<disable/>
<description>ge-0/0/1</description>
</interface>
<interface>
<name>ae0</name>
<unit>
<name>0</name>
<vlan-id>100</vlan-id>
<family>
<inet>
<address>
<name>192.168.100.1/24</name>
</address>
<address>
<name>172.20.100.1/24</name>
</address>
</inet>
</family>
<description>a description</description>
</unit>
<vlan-tagging/>
<unit>
<name>1</name>
<vlan-id>1</vlan-id>
<family>
<inet>
<address>
<name>192.168.101.1/24</name>
</address>
</inet>
</family>
<disable/>
<description>ae0.1</description>
</unit>
<vlan-tagging/>
<unit>
<name>2</name>
<vlan-id>2</vlan-id>
<family>
<inet>
<address>
<name>192.168.102.1/24</name>
</address>
</inet>
</family>
<description>ae0.2</description>
</unit>
<vlan-tagging/>
</interface>
<interface>
<name>lo0</name>
<unit>
<name>1</name>
<description>new loopback</description>
</unit>
<description>lo0</description>
</interface>
</interfaces>
</configuration>
result:
True
'''
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
config = get_config(data, *models, **kwargs)
test = kwargs.pop('test', False)
debug = kwargs.pop('debug', False)
commit = kwargs.pop('commit', True)
replace = kwargs.pop('replace', False)
return __salt__['net.load_config'](text=config,
test=test,
debug=debug,
commit=commit,
replace=replace,
inherit_napalm_device=napalm_device) | Generate and load the config on the device using the OpenConfig or IETF
models and device profiles.
data
Dictionary structured with respect to the models referenced.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.load_config {} models.openconfig_interfaces test=True debug=True
Output Example:
.. code-block:: jinja
device1:
----------
already_configured:
False
comment:
diff:
[edit interfaces ge-0/0/0]
- mtu 1400;
[edit interfaces ge-0/0/0 unit 0 family inet]
- dhcp;
[edit interfaces lo0]
- unit 0 {
- description lo0.0;
- }
+ unit 1 {
+ description "new loopback";
+ }
loaded_config:
<configuration>
<interfaces replace="replace">
<interface>
<name>ge-0/0/0</name>
<unit>
<name>0</name>
<family>
<inet/>
</family>
<description>ge-0/0/0.0</description>
</unit>
<description>management interface</description>
</interface>
<interface>
<name>ge-0/0/1</name>
<disable/>
<description>ge-0/0/1</description>
</interface>
<interface>
<name>ae0</name>
<unit>
<name>0</name>
<vlan-id>100</vlan-id>
<family>
<inet>
<address>
<name>192.168.100.1/24</name>
</address>
<address>
<name>172.20.100.1/24</name>
</address>
</inet>
</family>
<description>a description</description>
</unit>
<vlan-tagging/>
<unit>
<name>1</name>
<vlan-id>1</vlan-id>
<family>
<inet>
<address>
<name>192.168.101.1/24</name>
</address>
</inet>
</family>
<disable/>
<description>ae0.1</description>
</unit>
<vlan-tagging/>
<unit>
<name>2</name>
<vlan-id>2</vlan-id>
<family>
<inet>
<address>
<name>192.168.102.1/24</name>
</address>
</inet>
</family>
<description>ae0.2</description>
</unit>
<vlan-tagging/>
</interface>
<interface>
<name>lo0</name>
<unit>
<name>1</name>
<description>new loopback</description>
</unit>
<description>lo0</description>
</interface>
</interfaces>
</configuration>
result:
True | Below is the the instruction that describes the task:
### Input:
Generate and load the config on the device using the OpenConfig or IETF
models and device profiles.
data
Dictionary structured with respect to the models referenced.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.load_config {} models.openconfig_interfaces test=True debug=True
Output Example:
.. code-block:: jinja
device1:
----------
already_configured:
False
comment:
diff:
[edit interfaces ge-0/0/0]
- mtu 1400;
[edit interfaces ge-0/0/0 unit 0 family inet]
- dhcp;
[edit interfaces lo0]
- unit 0 {
- description lo0.0;
- }
+ unit 1 {
+ description "new loopback";
+ }
loaded_config:
<configuration>
<interfaces replace="replace">
<interface>
<name>ge-0/0/0</name>
<unit>
<name>0</name>
<family>
<inet/>
</family>
<description>ge-0/0/0.0</description>
</unit>
<description>management interface</description>
</interface>
<interface>
<name>ge-0/0/1</name>
<disable/>
<description>ge-0/0/1</description>
</interface>
<interface>
<name>ae0</name>
<unit>
<name>0</name>
<vlan-id>100</vlan-id>
<family>
<inet>
<address>
<name>192.168.100.1/24</name>
</address>
<address>
<name>172.20.100.1/24</name>
</address>
</inet>
</family>
<description>a description</description>
</unit>
<vlan-tagging/>
<unit>
<name>1</name>
<vlan-id>1</vlan-id>
<family>
<inet>
<address>
<name>192.168.101.1/24</name>
</address>
</inet>
</family>
<disable/>
<description>ae0.1</description>
</unit>
<vlan-tagging/>
<unit>
<name>2</name>
<vlan-id>2</vlan-id>
<family>
<inet>
<address>
<name>192.168.102.1/24</name>
</address>
</inet>
</family>
<description>ae0.2</description>
</unit>
<vlan-tagging/>
</interface>
<interface>
<name>lo0</name>
<unit>
<name>1</name>
<description>new loopback</description>
</unit>
<description>lo0</description>
</interface>
</interfaces>
</configuration>
result:
True
### Response:
def load_config(data, *models, **kwargs):
'''
Generate and load the config on the device using the OpenConfig or IETF
models and device profiles.
data
Dictionary structured with respect to the models referenced.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.load_config {} models.openconfig_interfaces test=True debug=True
Output Example:
.. code-block:: jinja
device1:
----------
already_configured:
False
comment:
diff:
[edit interfaces ge-0/0/0]
- mtu 1400;
[edit interfaces ge-0/0/0 unit 0 family inet]
- dhcp;
[edit interfaces lo0]
- unit 0 {
- description lo0.0;
- }
+ unit 1 {
+ description "new loopback";
+ }
loaded_config:
<configuration>
<interfaces replace="replace">
<interface>
<name>ge-0/0/0</name>
<unit>
<name>0</name>
<family>
<inet/>
</family>
<description>ge-0/0/0.0</description>
</unit>
<description>management interface</description>
</interface>
<interface>
<name>ge-0/0/1</name>
<disable/>
<description>ge-0/0/1</description>
</interface>
<interface>
<name>ae0</name>
<unit>
<name>0</name>
<vlan-id>100</vlan-id>
<family>
<inet>
<address>
<name>192.168.100.1/24</name>
</address>
<address>
<name>172.20.100.1/24</name>
</address>
</inet>
</family>
<description>a description</description>
</unit>
<vlan-tagging/>
<unit>
<name>1</name>
<vlan-id>1</vlan-id>
<family>
<inet>
<address>
<name>192.168.101.1/24</name>
</address>
</inet>
</family>
<disable/>
<description>ae0.1</description>
</unit>
<vlan-tagging/>
<unit>
<name>2</name>
<vlan-id>2</vlan-id>
<family>
<inet>
<address>
<name>192.168.102.1/24</name>
</address>
</inet>
</family>
<description>ae0.2</description>
</unit>
<vlan-tagging/>
</interface>
<interface>
<name>lo0</name>
<unit>
<name>1</name>
<description>new loopback</description>
</unit>
<description>lo0</description>
</interface>
</interfaces>
</configuration>
result:
True
'''
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
config = get_config(data, *models, **kwargs)
test = kwargs.pop('test', False)
debug = kwargs.pop('debug', False)
commit = kwargs.pop('commit', True)
replace = kwargs.pop('replace', False)
return __salt__['net.load_config'](text=config,
test=test,
debug=debug,
commit=commit,
replace=replace,
inherit_napalm_device=napalm_device) |
def handle_onchain_secretreveal(
target_state: TargetTransferState,
state_change: ContractReceiveSecretReveal,
channel_state: NettingChannelState,
) -> TransitionResult[TargetTransferState]:
""" Validates and handles a ContractReceiveSecretReveal state change. """
valid_secret = is_valid_secret_reveal(
state_change=state_change,
transfer_secrethash=target_state.transfer.lock.secrethash,
secret=state_change.secret,
)
if valid_secret:
channel.register_onchain_secret(
channel_state=channel_state,
secret=state_change.secret,
secrethash=state_change.secrethash,
secret_reveal_block_number=state_change.block_number,
)
target_state.state = TargetTransferState.ONCHAIN_UNLOCK
target_state.secret = state_change.secret
return TransitionResult(target_state, list()) | Validates and handles a ContractReceiveSecretReveal state change. | Below is the the instruction that describes the task:
### Input:
Validates and handles a ContractReceiveSecretReveal state change.
### Response:
def handle_onchain_secretreveal(
target_state: TargetTransferState,
state_change: ContractReceiveSecretReveal,
channel_state: NettingChannelState,
) -> TransitionResult[TargetTransferState]:
""" Validates and handles a ContractReceiveSecretReveal state change. """
valid_secret = is_valid_secret_reveal(
state_change=state_change,
transfer_secrethash=target_state.transfer.lock.secrethash,
secret=state_change.secret,
)
if valid_secret:
channel.register_onchain_secret(
channel_state=channel_state,
secret=state_change.secret,
secrethash=state_change.secrethash,
secret_reveal_block_number=state_change.block_number,
)
target_state.state = TargetTransferState.ONCHAIN_UNLOCK
target_state.secret = state_change.secret
return TransitionResult(target_state, list()) |
def download_and_calibrate(img_id=None, overwrite=False, recalibrate=False, **kwargs):
"""Download and calibrate one or more image ids, in parallel.
Parameters
----------
img_id : str or io.PathManager, optional
If more than one item is in img_id, a parallel process is started
overwrite: bool, optional
If the pm.cubepath exists, this switch controls if it is being overwritten.
Default: False
"""
if isinstance(img_id, io.PathManager):
pm = img_id
else:
# get a PathManager object that knows where your data is or should be
logger.debug("Creating Pathmanager object")
pm = io.PathManager(img_id)
if not pm.raw_image.exists() or overwrite is True:
logger.debug("Downloading file %s" % pm.img_id)
download_file_id(pm.img_id)
pm = io.PathManager(img_id) # refresh, to get proper PDS version id.
else:
logger.info("Found ")
if not (pm.cubepath.exists() and pm.undestriped.exists()) or overwrite is True:
calib = pipeline.Calibrator(img_id, **kwargs)
calib.standard_calib()
else:
print("All files exist. Use overwrite=True to redownload and calibrate.") | Download and calibrate one or more image ids, in parallel.
Parameters
----------
img_id : str or io.PathManager, optional
If more than one item is in img_id, a parallel process is started
overwrite: bool, optional
If the pm.cubepath exists, this switch controls if it is being overwritten.
Default: False | Below is the the instruction that describes the task:
### Input:
Download and calibrate one or more image ids, in parallel.
Parameters
----------
img_id : str or io.PathManager, optional
If more than one item is in img_id, a parallel process is started
overwrite: bool, optional
If the pm.cubepath exists, this switch controls if it is being overwritten.
Default: False
### Response:
def download_and_calibrate(img_id=None, overwrite=False, recalibrate=False, **kwargs):
"""Download and calibrate one or more image ids, in parallel.
Parameters
----------
img_id : str or io.PathManager, optional
If more than one item is in img_id, a parallel process is started
overwrite: bool, optional
If the pm.cubepath exists, this switch controls if it is being overwritten.
Default: False
"""
if isinstance(img_id, io.PathManager):
pm = img_id
else:
# get a PathManager object that knows where your data is or should be
logger.debug("Creating Pathmanager object")
pm = io.PathManager(img_id)
if not pm.raw_image.exists() or overwrite is True:
logger.debug("Downloading file %s" % pm.img_id)
download_file_id(pm.img_id)
pm = io.PathManager(img_id) # refresh, to get proper PDS version id.
else:
logger.info("Found ")
if not (pm.cubepath.exists() and pm.undestriped.exists()) or overwrite is True:
calib = pipeline.Calibrator(img_id, **kwargs)
calib.standard_calib()
else:
print("All files exist. Use overwrite=True to redownload and calibrate.") |
def last_name(languages=None):
"""
return a random last name
>>> from mock import patch
>>> with patch('%s._get_lastnames' % __name__, lambda *args: ['aaa']):
... last_name()
'Aaa'
>>> with patch('%s.get_lastnames' % __name__, lambda lang: ['%s_lastname'% lang]):
... last_name(['it'])
'It_Lastname'
"""
choices = []
languages = languages or ['en']
for lang in languages:
samples = _get_lastnames(lang)
choices.extend(samples)
return random.choice(choices).title() | return a random last name
>>> from mock import patch
>>> with patch('%s._get_lastnames' % __name__, lambda *args: ['aaa']):
... last_name()
'Aaa'
>>> with patch('%s.get_lastnames' % __name__, lambda lang: ['%s_lastname'% lang]):
... last_name(['it'])
'It_Lastname' | Below is the the instruction that describes the task:
### Input:
return a random last name
>>> from mock import patch
>>> with patch('%s._get_lastnames' % __name__, lambda *args: ['aaa']):
... last_name()
'Aaa'
>>> with patch('%s.get_lastnames' % __name__, lambda lang: ['%s_lastname'% lang]):
... last_name(['it'])
'It_Lastname'
### Response:
def last_name(languages=None):
"""
return a random last name
>>> from mock import patch
>>> with patch('%s._get_lastnames' % __name__, lambda *args: ['aaa']):
... last_name()
'Aaa'
>>> with patch('%s.get_lastnames' % __name__, lambda lang: ['%s_lastname'% lang]):
... last_name(['it'])
'It_Lastname'
"""
choices = []
languages = languages or ['en']
for lang in languages:
samples = _get_lastnames(lang)
choices.extend(samples)
return random.choice(choices).title() |
def refresh(self, line=None):
"""Refreshes progress bar."""
# Just go away if it is locked. Will update next time
if not self._lock.acquire(False):
return
if line is None:
line = self._line
if sys.stdout.isatty() and line is not None:
self._writeln(line)
self._line = line
self._lock.release() | Refreshes progress bar. | Below is the the instruction that describes the task:
### Input:
Refreshes progress bar.
### Response:
def refresh(self, line=None):
"""Refreshes progress bar."""
# Just go away if it is locked. Will update next time
if not self._lock.acquire(False):
return
if line is None:
line = self._line
if sys.stdout.isatty() and line is not None:
self._writeln(line)
self._line = line
self._lock.release() |
def time_step(z,Ns,t_step,Nstep):
"""
Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param Ns: number of sample per symbol
:param t_step: in samples relative to Ns
:param Nstep: symbol sample location where the step turns on
:return: the one sample per symbol signal containing the phase step
Mark Wickert July 2014
"""
z_step = np.hstack((z[:Ns*Nstep], z[(Ns*Nstep+t_step):], np.zeros(t_step)))
return z_step | Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param Ns: number of sample per symbol
:param t_step: in samples relative to Ns
:param Nstep: symbol sample location where the step turns on
:return: the one sample per symbol signal containing the phase step
Mark Wickert July 2014 | Below is the the instruction that describes the task:
### Input:
Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param Ns: number of sample per symbol
:param t_step: in samples relative to Ns
:param Nstep: symbol sample location where the step turns on
:return: the one sample per symbol signal containing the phase step
Mark Wickert July 2014
### Response:
def time_step(z,Ns,t_step,Nstep):
"""
Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param Ns: number of sample per symbol
:param t_step: in samples relative to Ns
:param Nstep: symbol sample location where the step turns on
:return: the one sample per symbol signal containing the phase step
Mark Wickert July 2014
"""
z_step = np.hstack((z[:Ns*Nstep], z[(Ns*Nstep+t_step):], np.zeros(t_step)))
return z_step |
def addlayer(self, name, srs, geomType):
"""
add a layer to the vector layer
Parameters
----------
name: str
the layer name
srs: int, str or :osgeo:class:`osr.SpatialReference`
the spatial reference system. See :func:`spatialist.auxil.crsConvert` for options.
geomType: int
an OGR well-known binary data type.
See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_.
Returns
-------
"""
self.vector.CreateLayer(name, srs, geomType)
self.init_layer() | add a layer to the vector layer
Parameters
----------
name: str
the layer name
srs: int, str or :osgeo:class:`osr.SpatialReference`
the spatial reference system. See :func:`spatialist.auxil.crsConvert` for options.
geomType: int
an OGR well-known binary data type.
See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_.
Returns
------- | Below is the the instruction that describes the task:
### Input:
add a layer to the vector layer
Parameters
----------
name: str
the layer name
srs: int, str or :osgeo:class:`osr.SpatialReference`
the spatial reference system. See :func:`spatialist.auxil.crsConvert` for options.
geomType: int
an OGR well-known binary data type.
See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_.
Returns
-------
### Response:
def addlayer(self, name, srs, geomType):
"""
add a layer to the vector layer
Parameters
----------
name: str
the layer name
srs: int, str or :osgeo:class:`osr.SpatialReference`
the spatial reference system. See :func:`spatialist.auxil.crsConvert` for options.
geomType: int
an OGR well-known binary data type.
See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_.
Returns
-------
"""
self.vector.CreateLayer(name, srs, geomType)
self.init_layer() |
def _list_view(self, model, **kwargs):
"""
:param model:
:param fields_convert_map: it's different from ListView
:param kwargs:
:return:
"""
from uliweb import request
#add download fields process
fields = kwargs.pop('fields', None)
meta = kwargs.pop('meta', 'Table')
if 'download' in request.GET:
if 'download_fields' in kwargs:
fields = kwargs.pop('download_fields', fields)
if 'download_meta' in kwargs:
meta = kwargs.pop('download_meta')
else:
if hasattr(model, 'Download'):
meta = 'Download'
else:
meta = meta
view = functions.ListView(model, fields=fields, meta=meta, **kwargs)
return view | :param model:
:param fields_convert_map: it's different from ListView
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
:param model:
:param fields_convert_map: it's different from ListView
:param kwargs:
:return:
### Response:
def _list_view(self, model, **kwargs):
"""
:param model:
:param fields_convert_map: it's different from ListView
:param kwargs:
:return:
"""
from uliweb import request
#add download fields process
fields = kwargs.pop('fields', None)
meta = kwargs.pop('meta', 'Table')
if 'download' in request.GET:
if 'download_fields' in kwargs:
fields = kwargs.pop('download_fields', fields)
if 'download_meta' in kwargs:
meta = kwargs.pop('download_meta')
else:
if hasattr(model, 'Download'):
meta = 'Download'
else:
meta = meta
view = functions.ListView(model, fields=fields, meta=meta, **kwargs)
return view |
def read_text_file(filename, encoding="utf-8"):
"""
Reads a file under python3 with encoding (default UTF-8).
Also works under python2, without encoding.
Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp)
principle.
"""
try:
with open(filename, 'r', encoding) as f:
r = f.read()
except TypeError:
with open(filename, 'r') as f:
r = f.read()
return r | Reads a file under python3 with encoding (default UTF-8).
Also works under python2, without encoding.
Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp)
principle. | Below is the the instruction that describes the task:
### Input:
Reads a file under python3 with encoding (default UTF-8).
Also works under python2, without encoding.
Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp)
principle.
### Response:
def read_text_file(filename, encoding="utf-8"):
"""
Reads a file under python3 with encoding (default UTF-8).
Also works under python2, without encoding.
Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp)
principle.
"""
try:
with open(filename, 'r', encoding) as f:
r = f.read()
except TypeError:
with open(filename, 'r') as f:
r = f.read()
return r |
def parse_oxi_states(self, data):
"""
Parse oxidation states from data dictionary
"""
try:
oxi_states = {
data["_atom_type_symbol"][i]:
str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))}
# attempt to strip oxidation state from _atom_type_symbol
# in case the label does not contain an oxidation state
for i, symbol in enumerate(data["_atom_type_symbol"]):
oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = \
str2float(data["_atom_type_oxidation_number"][i])
except (ValueError, KeyError):
oxi_states = None
return oxi_states | Parse oxidation states from data dictionary | Below is the the instruction that describes the task:
### Input:
Parse oxidation states from data dictionary
### Response:
def parse_oxi_states(self, data):
"""
Parse oxidation states from data dictionary
"""
try:
oxi_states = {
data["_atom_type_symbol"][i]:
str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))}
# attempt to strip oxidation state from _atom_type_symbol
# in case the label does not contain an oxidation state
for i, symbol in enumerate(data["_atom_type_symbol"]):
oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = \
str2float(data["_atom_type_oxidation_number"][i])
except (ValueError, KeyError):
oxi_states = None
return oxi_states |
def positive_int(val):
"""Parse `val` into a positive integer."""
if isinstance(val, float):
raise ValueError('"{}" must not be a float'.format(val))
val = int(val)
if val >= 0:
return val
raise ValueError('"{}" must be positive'.format(val)) | Parse `val` into a positive integer. | Below is the the instruction that describes the task:
### Input:
Parse `val` into a positive integer.
### Response:
def positive_int(val):
"""Parse `val` into a positive integer."""
if isinstance(val, float):
raise ValueError('"{}" must not be a float'.format(val))
val = int(val)
if val >= 0:
return val
raise ValueError('"{}" must be positive'.format(val)) |
def calculate_file_access_time(workflow_workspace):
"""Calculate access times of files in workspace."""
access_times = {}
for subdir, dirs, files in os.walk(workflow_workspace):
for file in files:
file_path = os.path.join(subdir, file)
access_times[file_path] = os.stat(file_path).st_atime
return access_times | Calculate access times of files in workspace. | Below is the the instruction that describes the task:
### Input:
Calculate access times of files in workspace.
### Response:
def calculate_file_access_time(workflow_workspace):
"""Calculate access times of files in workspace."""
access_times = {}
for subdir, dirs, files in os.walk(workflow_workspace):
for file in files:
file_path = os.path.join(subdir, file)
access_times[file_path] = os.stat(file_path).st_atime
return access_times |
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = '- ' + name + '.[' if name else ''
ls = wrap(
', '.join(ks),
width,
initial_indent=pfx,
subsequent_indent=' ',
break_long_words=False,
)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[: x.rfind('.')]):
ks = [x[len(k) + 1 :] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s | Builds a concise listing of available options, grouped by prefix | Below is the the instruction that describes the task:
### Input:
Builds a concise listing of available options, grouped by prefix
### Response:
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = '- ' + name + '.[' if name else ''
ls = wrap(
', '.join(ks),
width,
initial_indent=pfx,
subsequent_indent=' ',
break_long_words=False,
)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[: x.rfind('.')]):
ks = [x[len(k) + 1 :] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s |
def propagate(p0, angle, d, deg=True, bearing=False, r=r_earth_mean):
"""
Given an initial point and angle, move distance d along the surface
Parameters
----------
p0 : point-like (or array of point-like) [lon, lat] objects
angle : float (or array of float)
bearing. Note that by default, 0 degrees is due East increasing
clockwise so that 90 degrees is due North. See the bearing flag
to change the meaning of this angle
d : float (or array of float)
distance to move. The units of d should be consistent with input r
deg : bool, optional (default True)
Whether both p0 and angle are specified in degrees. The output
points will also match the value of this flag.
bearing : bool, optional (default False)
Indicates whether to interpret the input angle as the classical
definition of bearing.
r : float, optional (default r_earth_mean)
radius of the sphere
Reference
---------
http://www.movable-type.co.uk/scripts/latlong.html - Destination
Note: Spherical earth model. By default uses radius of 6371.0 km.
"""
single, (p0, angle, d) = _to_arrays((p0, 2), (angle, 1), (d, 1))
if deg:
p0 = np.radians(p0)
angle = np.radians(angle)
if not bearing:
angle = np.pi / 2.0 - angle
lon0, lat0 = p0[:,0], p0[:,1]
angd = d / r
lat1 = arcsin(sin(lat0) * cos(angd) + cos(lat0) * sin(angd) * cos(angle))
a = sin(angle) * sin(angd) * cos(lat0)
b = cos(angd) - sin(lat0) * sin(lat1)
lon1 = lon0 + arctan2(a, b)
p1 = np.column_stack([lon1, lat1])
if deg:
p1 = np.degrees(p1)
if single:
p1 = p1[0]
return p1 | Given an initial point and angle, move distance d along the surface
Parameters
----------
p0 : point-like (or array of point-like) [lon, lat] objects
angle : float (or array of float)
bearing. Note that by default, 0 degrees is due East increasing
clockwise so that 90 degrees is due North. See the bearing flag
to change the meaning of this angle
d : float (or array of float)
distance to move. The units of d should be consistent with input r
deg : bool, optional (default True)
Whether both p0 and angle are specified in degrees. The output
points will also match the value of this flag.
bearing : bool, optional (default False)
Indicates whether to interpret the input angle as the classical
definition of bearing.
r : float, optional (default r_earth_mean)
radius of the sphere
Reference
---------
http://www.movable-type.co.uk/scripts/latlong.html - Destination
Note: Spherical earth model. By default uses radius of 6371.0 km. | Below is the the instruction that describes the task:
### Input:
Given an initial point and angle, move distance d along the surface
Parameters
----------
p0 : point-like (or array of point-like) [lon, lat] objects
angle : float (or array of float)
bearing. Note that by default, 0 degrees is due East increasing
clockwise so that 90 degrees is due North. See the bearing flag
to change the meaning of this angle
d : float (or array of float)
distance to move. The units of d should be consistent with input r
deg : bool, optional (default True)
Whether both p0 and angle are specified in degrees. The output
points will also match the value of this flag.
bearing : bool, optional (default False)
Indicates whether to interpret the input angle as the classical
definition of bearing.
r : float, optional (default r_earth_mean)
radius of the sphere
Reference
---------
http://www.movable-type.co.uk/scripts/latlong.html - Destination
Note: Spherical earth model. By default uses radius of 6371.0 km.
### Response:
def propagate(p0, angle, d, deg=True, bearing=False, r=r_earth_mean):
"""
Given an initial point and angle, move distance d along the surface
Parameters
----------
p0 : point-like (or array of point-like) [lon, lat] objects
angle : float (or array of float)
bearing. Note that by default, 0 degrees is due East increasing
clockwise so that 90 degrees is due North. See the bearing flag
to change the meaning of this angle
d : float (or array of float)
distance to move. The units of d should be consistent with input r
deg : bool, optional (default True)
Whether both p0 and angle are specified in degrees. The output
points will also match the value of this flag.
bearing : bool, optional (default False)
Indicates whether to interpret the input angle as the classical
definition of bearing.
r : float, optional (default r_earth_mean)
radius of the sphere
Reference
---------
http://www.movable-type.co.uk/scripts/latlong.html - Destination
Note: Spherical earth model. By default uses radius of 6371.0 km.
"""
single, (p0, angle, d) = _to_arrays((p0, 2), (angle, 1), (d, 1))
if deg:
p0 = np.radians(p0)
angle = np.radians(angle)
if not bearing:
angle = np.pi / 2.0 - angle
lon0, lat0 = p0[:,0], p0[:,1]
angd = d / r
lat1 = arcsin(sin(lat0) * cos(angd) + cos(lat0) * sin(angd) * cos(angle))
a = sin(angle) * sin(angd) * cos(lat0)
b = cos(angd) - sin(lat0) * sin(lat1)
lon1 = lon0 + arctan2(a, b)
p1 = np.column_stack([lon1, lat1])
if deg:
p1 = np.degrees(p1)
if single:
p1 = p1[0]
return p1 |
def defined_namespace_keywords(self) -> Set[str]: # noqa: D401
"""The set of all keywords defined as namespaces in this graph."""
return set(self.namespace_pattern) | set(self.namespace_url) | The set of all keywords defined as namespaces in this graph. | Below is the the instruction that describes the task:
### Input:
The set of all keywords defined as namespaces in this graph.
### Response:
def defined_namespace_keywords(self) -> Set[str]: # noqa: D401
"""The set of all keywords defined as namespaces in this graph."""
return set(self.namespace_pattern) | set(self.namespace_url) |
def convert_sum(
params, w_name, scope_name, inputs, layers, weights, names
):
"""
Convert sum.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting Sum ...')
def target_layer(x):
import keras.backend as K
return K.sum(x)
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]]) | Convert sum.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers | Below is the the instruction that describes the task:
### Input:
Convert sum.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
### Response:
def convert_sum(
params, w_name, scope_name, inputs, layers, weights, names
):
"""
Convert sum.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting Sum ...')
def target_layer(x):
import keras.backend as K
return K.sum(x)
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]]) |
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images | Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch | Below is the the instruction that describes the task:
### Input:
Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
### Response:
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images |
def _get_all_property_mappings(encoder: MappingJSONEncoder, property_mappings: Iterable[JsonPropertyMapping],
superclasses: Tuple[PropertyMapper]) -> List[JsonPropertyMapping]:
"""
Gets all of the property mappings from the given property mapper, considering the property mappings for self and the
property mappings defined by the superclass.
:param encoder: `self` when binded as class method
:param property_mappings: mappings defined for the given encoder, excluding mappings defined by superclasses
:param superclasses: superclasses of the given encoder. Property mappers in later superclasses may override the
effects of property mappers defined by superclasses closer to the start of the list
:return: all of the property mappings for the given encoder
"""
mappings = []
for superclass in superclasses:
super_mappings = superclass._get_property_mappings(superclass)
mappings.extend(super_mappings)
# Add property mappings of own to end of the mappings list
mappings.extend(property_mappings)
# Note: It is very difficult to cull all property mappers that target the same properties, leaving only the ones
# from the lowest class in the hierarchy. This is because such mappers may be encoded as functions. Given that such
# overloading is unlikely to be used much and the cost of doing a mapping and then mapping again over the top of it
# will likely be small, there will be no attempt of such a cull.
return mappings | Gets all of the property mappings from the given property mapper, considering the property mappings for self and the
property mappings defined by the superclass.
:param encoder: `self` when binded as class method
:param property_mappings: mappings defined for the given encoder, excluding mappings defined by superclasses
:param superclasses: superclasses of the given encoder. Property mappers in later superclasses may override the
effects of property mappers defined by superclasses closer to the start of the list
:return: all of the property mappings for the given encoder | Below is the the instruction that describes the task:
### Input:
Gets all of the property mappings from the given property mapper, considering the property mappings for self and the
property mappings defined by the superclass.
:param encoder: `self` when binded as class method
:param property_mappings: mappings defined for the given encoder, excluding mappings defined by superclasses
:param superclasses: superclasses of the given encoder. Property mappers in later superclasses may override the
effects of property mappers defined by superclasses closer to the start of the list
:return: all of the property mappings for the given encoder
### Response:
def _get_all_property_mappings(encoder: MappingJSONEncoder, property_mappings: Iterable[JsonPropertyMapping],
superclasses: Tuple[PropertyMapper]) -> List[JsonPropertyMapping]:
"""
Gets all of the property mappings from the given property mapper, considering the property mappings for self and the
property mappings defined by the superclass.
:param encoder: `self` when binded as class method
:param property_mappings: mappings defined for the given encoder, excluding mappings defined by superclasses
:param superclasses: superclasses of the given encoder. Property mappers in later superclasses may override the
effects of property mappers defined by superclasses closer to the start of the list
:return: all of the property mappings for the given encoder
"""
mappings = []
for superclass in superclasses:
super_mappings = superclass._get_property_mappings(superclass)
mappings.extend(super_mappings)
# Add property mappings of own to end of the mappings list
mappings.extend(property_mappings)
# Note: It is very difficult to cull all property mappers that target the same properties, leaving only the ones
# from the lowest class in the hierarchy. This is because such mappers may be encoded as functions. Given that such
# overloading is unlikely to be used much and the cost of doing a mapping and then mapping again over the top of it
# will likely be small, there will be no attempt of such a cull.
return mappings |
def set_published_date(self, published_date):
"""Sets the published date.
arg: published_date (osid.calendaring.DateTime): the new
published date
raise: InvalidArgument - ``published_date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``published_date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.set_start_time_template
if self.get_published_date_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_date_time(
published_date,
self.get_published_date_metadata()):
raise errors.InvalidArgument()
self._my_map['publishedDate'] = published_date | Sets the published date.
arg: published_date (osid.calendaring.DateTime): the new
published date
raise: InvalidArgument - ``published_date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``published_date`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets the published date.
arg: published_date (osid.calendaring.DateTime): the new
published date
raise: InvalidArgument - ``published_date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``published_date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_published_date(self, published_date):
"""Sets the published date.
arg: published_date (osid.calendaring.DateTime): the new
published date
raise: InvalidArgument - ``published_date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``published_date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.set_start_time_template
if self.get_published_date_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_date_time(
published_date,
self.get_published_date_metadata()):
raise errors.InvalidArgument()
self._my_map['publishedDate'] = published_date |
def NextToken(self):
"""Fetch the next token by trying to match any of the regexes in order."""
current_state = self.state
for token in self.tokens:
# Does the rule apply to us?
if not token.state_regex.match(current_state):
continue
# Try to match the rule
m = token.regex.match(self.buffer)
if not m:
continue
# The match consumes the data off the buffer (the handler can put it back
# if it likes)
# TODO: using joins might be more efficient here.
self.processed_buffer += self.buffer[:m.end()]
self.buffer = self.buffer[m.end():]
self.processed += m.end()
next_state = token.next_state
for action in token.actions:
# Is there a callback to handle this action?
callback = getattr(self, action, self.Default)
# Allow a callback to skip other callbacks.
try:
possible_next_state = callback(string=m.group(0), match=m)
if possible_next_state == self._CONTINUE_STATE:
continue
# Override the state from the Token
elif possible_next_state:
next_state = possible_next_state
except errors.ParseError as exception:
self.Error(exception)
# Update the next state
if next_state:
self.state = next_state
return token
# Check that we are making progress - if we are too full, we assume we are
# stuck.
self.Error('Expected {0:s}'.format(self.state))
self.processed_buffer += self.buffer[:1]
self.buffer = self.buffer[1:]
return self._ERROR_TOKEN | Fetch the next token by trying to match any of the regexes in order. | Below is the the instruction that describes the task:
### Input:
Fetch the next token by trying to match any of the regexes in order.
### Response:
def NextToken(self):
"""Fetch the next token by trying to match any of the regexes in order."""
current_state = self.state
for token in self.tokens:
# Does the rule apply to us?
if not token.state_regex.match(current_state):
continue
# Try to match the rule
m = token.regex.match(self.buffer)
if not m:
continue
# The match consumes the data off the buffer (the handler can put it back
# if it likes)
# TODO: using joins might be more efficient here.
self.processed_buffer += self.buffer[:m.end()]
self.buffer = self.buffer[m.end():]
self.processed += m.end()
next_state = token.next_state
for action in token.actions:
# Is there a callback to handle this action?
callback = getattr(self, action, self.Default)
# Allow a callback to skip other callbacks.
try:
possible_next_state = callback(string=m.group(0), match=m)
if possible_next_state == self._CONTINUE_STATE:
continue
# Override the state from the Token
elif possible_next_state:
next_state = possible_next_state
except errors.ParseError as exception:
self.Error(exception)
# Update the next state
if next_state:
self.state = next_state
return token
# Check that we are making progress - if we are too full, we assume we are
# stuck.
self.Error('Expected {0:s}'.format(self.state))
self.processed_buffer += self.buffer[:1]
self.buffer = self.buffer[1:]
return self._ERROR_TOKEN |
def response_delay_text(records):
"""
The response delay of the user within a conversation (in seconds)
The following sequence of messages defines conversations (``I`` for an
incoming text, ``O`` for an outgoing text, ``-`` for a one minute
delay): ::
I-O--I----O, we have a 60 seconds response delay and a 240 seconds response delay
O--O---I--O, we have a 1200 seconds response delay
I--II---I-I, we don't have a response delay. The user hasn't answered
For this user, the distribution of response delays will be ``[60, 240, 60]``
Notes
-----
See :ref:`Using bandicoot <conversations-label>` for a definition of
conversations. Conversation are defined to be a series of text messages each
sent no more than an hour after the previous. The response delay can thus
not be greater than one hour.
"""
interactions = defaultdict(list)
for r in records:
interactions[r.correspondent_id].append(r)
def _response_delay(grouped):
ts = ((b.datetime - a.datetime).total_seconds()
for conv in _conversations(grouped)
for a, b in pairwise(conv)
if b.direction == 'out' and a.direction == 'in')
return ts
delays = [r for i in interactions.values() for r in _response_delay(i)
if r > 0]
return summary_stats(delays) | The response delay of the user within a conversation (in seconds)
The following sequence of messages defines conversations (``I`` for an
incoming text, ``O`` for an outgoing text, ``-`` for a one minute
delay): ::
I-O--I----O, we have a 60 seconds response delay and a 240 seconds response delay
O--O---I--O, we have a 1200 seconds response delay
I--II---I-I, we don't have a response delay. The user hasn't answered
For this user, the distribution of response delays will be ``[60, 240, 60]``
Notes
-----
See :ref:`Using bandicoot <conversations-label>` for a definition of
conversations. Conversation are defined to be a series of text messages each
sent no more than an hour after the previous. The response delay can thus
not be greater than one hour. | Below is the the instruction that describes the task:
### Input:
The response delay of the user within a conversation (in seconds)
The following sequence of messages defines conversations (``I`` for an
incoming text, ``O`` for an outgoing text, ``-`` for a one minute
delay): ::
I-O--I----O, we have a 60 seconds response delay and a 240 seconds response delay
O--O---I--O, we have a 1200 seconds response delay
I--II---I-I, we don't have a response delay. The user hasn't answered
For this user, the distribution of response delays will be ``[60, 240, 60]``
Notes
-----
See :ref:`Using bandicoot <conversations-label>` for a definition of
conversations. Conversation are defined to be a series of text messages each
sent no more than an hour after the previous. The response delay can thus
not be greater than one hour.
### Response:
def response_delay_text(records):
"""
The response delay of the user within a conversation (in seconds)
The following sequence of messages defines conversations (``I`` for an
incoming text, ``O`` for an outgoing text, ``-`` for a one minute
delay): ::
I-O--I----O, we have a 60 seconds response delay and a 240 seconds response delay
O--O---I--O, we have a 1200 seconds response delay
I--II---I-I, we don't have a response delay. The user hasn't answered
For this user, the distribution of response delays will be ``[60, 240, 60]``
Notes
-----
See :ref:`Using bandicoot <conversations-label>` for a definition of
conversations. Conversation are defined to be a series of text messages each
sent no more than an hour after the previous. The response delay can thus
not be greater than one hour.
"""
interactions = defaultdict(list)
for r in records:
interactions[r.correspondent_id].append(r)
def _response_delay(grouped):
ts = ((b.datetime - a.datetime).total_seconds()
for conv in _conversations(grouped)
for a, b in pairwise(conv)
if b.direction == 'out' and a.direction == 'in')
return ts
delays = [r for i in interactions.values() for r in _response_delay(i)
if r > 0]
return summary_stats(delays) |
def _convert_flat_to_nest(self, params):
"""
Convert a structure in the form of::
{'foo.1.bar': 'value',
'foo.2.baz': 'value'}
to::
{'foo': {'1': {'bar': 'value'},
'2': {'baz': 'value'}}}
This is intended for use both during parsing of HTTP arguments like
'foo.1.bar=value' and when dealing with schema declarations that look
like 'foo.n.bar'.
This is the inverse of L{_convert_nest_to_flat}.
"""
result = {}
for k, v in params.iteritems():
last = result
segments = k.split('.')
for index, item in enumerate(segments):
if index == len(segments) - 1:
newd = v
else:
newd = {}
if not isinstance(last, dict):
raise InconsistentParameterError(k)
if type(last.get(item)) is dict and type(newd) is not dict:
raise InconsistentParameterError(k)
last = last.setdefault(item, newd)
return result | Convert a structure in the form of::
{'foo.1.bar': 'value',
'foo.2.baz': 'value'}
to::
{'foo': {'1': {'bar': 'value'},
'2': {'baz': 'value'}}}
This is intended for use both during parsing of HTTP arguments like
'foo.1.bar=value' and when dealing with schema declarations that look
like 'foo.n.bar'.
This is the inverse of L{_convert_nest_to_flat}. | Below is the the instruction that describes the task:
### Input:
Convert a structure in the form of::
{'foo.1.bar': 'value',
'foo.2.baz': 'value'}
to::
{'foo': {'1': {'bar': 'value'},
'2': {'baz': 'value'}}}
This is intended for use both during parsing of HTTP arguments like
'foo.1.bar=value' and when dealing with schema declarations that look
like 'foo.n.bar'.
This is the inverse of L{_convert_nest_to_flat}.
### Response:
def _convert_flat_to_nest(self, params):
"""
Convert a structure in the form of::
{'foo.1.bar': 'value',
'foo.2.baz': 'value'}
to::
{'foo': {'1': {'bar': 'value'},
'2': {'baz': 'value'}}}
This is intended for use both during parsing of HTTP arguments like
'foo.1.bar=value' and when dealing with schema declarations that look
like 'foo.n.bar'.
This is the inverse of L{_convert_nest_to_flat}.
"""
result = {}
for k, v in params.iteritems():
last = result
segments = k.split('.')
for index, item in enumerate(segments):
if index == len(segments) - 1:
newd = v
else:
newd = {}
if not isinstance(last, dict):
raise InconsistentParameterError(k)
if type(last.get(item)) is dict and type(newd) is not dict:
raise InconsistentParameterError(k)
last = last.setdefault(item, newd)
return result |
def put_multiple(self, task_args_kwargs_list):
"""put a list of tasks and their arguments
This method can be used to put multiple tasks at once. Calling
this method once with multiple tasks can be much faster than
calling `put()` multiple times.
Parameters
----------
task_args_kwargs_list : list
A list of lists with three items that can be parameters of
`put()`, i.e., `task`, `args`, `kwargs`.
Returns
-------
list
A list of task IDs.
"""
if not self.isopen:
logger = logging.getLogger(__name__)
logger.warning('the drop box is not open')
return
packages = [ ]
for t in task_args_kwargs_list:
try:
task = t['task']
args = t.get('args', ())
kwargs = t.get('kwargs', {})
package = TaskPackage(task=task, args=args, kwargs=kwargs)
except TypeError:
package = TaskPackage(task=t, args=(), kwargs={})
packages.append(package)
return self.dropbox.put_multiple(packages) | put a list of tasks and their arguments
This method can be used to put multiple tasks at once. Calling
this method once with multiple tasks can be much faster than
calling `put()` multiple times.
Parameters
----------
task_args_kwargs_list : list
A list of lists with three items that can be parameters of
`put()`, i.e., `task`, `args`, `kwargs`.
Returns
-------
list
A list of task IDs. | Below is the the instruction that describes the task:
### Input:
put a list of tasks and their arguments
This method can be used to put multiple tasks at once. Calling
this method once with multiple tasks can be much faster than
calling `put()` multiple times.
Parameters
----------
task_args_kwargs_list : list
A list of lists with three items that can be parameters of
`put()`, i.e., `task`, `args`, `kwargs`.
Returns
-------
list
A list of task IDs.
### Response:
def put_multiple(self, task_args_kwargs_list):
"""put a list of tasks and their arguments
This method can be used to put multiple tasks at once. Calling
this method once with multiple tasks can be much faster than
calling `put()` multiple times.
Parameters
----------
task_args_kwargs_list : list
A list of lists with three items that can be parameters of
`put()`, i.e., `task`, `args`, `kwargs`.
Returns
-------
list
A list of task IDs.
"""
if not self.isopen:
logger = logging.getLogger(__name__)
logger.warning('the drop box is not open')
return
packages = [ ]
for t in task_args_kwargs_list:
try:
task = t['task']
args = t.get('args', ())
kwargs = t.get('kwargs', {})
package = TaskPackage(task=task, args=args, kwargs=kwargs)
except TypeError:
package = TaskPackage(task=t, args=(), kwargs={})
packages.append(package)
return self.dropbox.put_multiple(packages) |
def _refresh_db_conditional(saltenv, **kwargs):
'''
Internal use only in this module, has a different set of defaults and
returns True or False. And supports checking the age of the existing
generated metadata db, as well as ensure metadata db exists to begin with
Args:
saltenv (str): Salt environment
Kwargs:
force (bool):
Force a refresh if the minimum age has been reached. Default is
False.
failhard (bool):
If ``True``, an error will be raised if any repo SLS files failed to
process.
Returns:
bool: True Fetched or Cache uptodate, False to indicate an issue
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
force = salt.utils.data.is_true(kwargs.pop('force', False))
failhard = salt.utils.data.is_true(kwargs.pop('failhard', False))
expired_max = __opts__['winrepo_cache_expire_max']
expired_min = __opts__['winrepo_cache_expire_min']
repo_details = _get_repo_details(saltenv)
# Skip force if age less than minimum age
if force and expired_min > 0 and repo_details.winrepo_age < expired_min:
log.info(
'Refresh skipped, age of winrepo metadata in seconds (%s) is less '
'than winrepo_cache_expire_min (%s)',
repo_details.winrepo_age, expired_min
)
force = False
# winrepo_age is -1 if repo db does not exist
refresh = True if force \
or repo_details.winrepo_age == -1 \
or repo_details.winrepo_age > expired_max \
else False
if not refresh:
log.debug(
'Using existing pkg metadata db for saltenv \'%s\' (age is %s)',
saltenv, datetime.timedelta(seconds=repo_details.winrepo_age)
)
return True
if repo_details.winrepo_age == -1:
# no repo meta db
log.debug(
'No winrepo.p cache file for saltenv \'%s\', creating one now',
saltenv
)
results = refresh_db(saltenv=saltenv, verbose=False, failhard=failhard)
try:
# Return True if there were no failed winrepo SLS files, and False if
# failures were reported.
return not bool(results.get('failed', 0))
except AttributeError:
return False | Internal use only in this module, has a different set of defaults and
returns True or False. And supports checking the age of the existing
generated metadata db, as well as ensure metadata db exists to begin with
Args:
saltenv (str): Salt environment
Kwargs:
force (bool):
Force a refresh if the minimum age has been reached. Default is
False.
failhard (bool):
If ``True``, an error will be raised if any repo SLS files failed to
process.
Returns:
bool: True Fetched or Cache uptodate, False to indicate an issue
:codeauthor: Damon Atkins <https://github.com/damon-atkins> | Below is the the instruction that describes the task:
### Input:
Internal use only in this module, has a different set of defaults and
returns True or False. And supports checking the age of the existing
generated metadata db, as well as ensure metadata db exists to begin with
Args:
saltenv (str): Salt environment
Kwargs:
force (bool):
Force a refresh if the minimum age has been reached. Default is
False.
failhard (bool):
If ``True``, an error will be raised if any repo SLS files failed to
process.
Returns:
bool: True Fetched or Cache uptodate, False to indicate an issue
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
### Response:
def _refresh_db_conditional(saltenv, **kwargs):
'''
Internal use only in this module, has a different set of defaults and
returns True or False. And supports checking the age of the existing
generated metadata db, as well as ensure metadata db exists to begin with
Args:
saltenv (str): Salt environment
Kwargs:
force (bool):
Force a refresh if the minimum age has been reached. Default is
False.
failhard (bool):
If ``True``, an error will be raised if any repo SLS files failed to
process.
Returns:
bool: True Fetched or Cache uptodate, False to indicate an issue
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
force = salt.utils.data.is_true(kwargs.pop('force', False))
failhard = salt.utils.data.is_true(kwargs.pop('failhard', False))
expired_max = __opts__['winrepo_cache_expire_max']
expired_min = __opts__['winrepo_cache_expire_min']
repo_details = _get_repo_details(saltenv)
# Skip force if age less than minimum age
if force and expired_min > 0 and repo_details.winrepo_age < expired_min:
log.info(
'Refresh skipped, age of winrepo metadata in seconds (%s) is less '
'than winrepo_cache_expire_min (%s)',
repo_details.winrepo_age, expired_min
)
force = False
# winrepo_age is -1 if repo db does not exist
refresh = True if force \
or repo_details.winrepo_age == -1 \
or repo_details.winrepo_age > expired_max \
else False
if not refresh:
log.debug(
'Using existing pkg metadata db for saltenv \'%s\' (age is %s)',
saltenv, datetime.timedelta(seconds=repo_details.winrepo_age)
)
return True
if repo_details.winrepo_age == -1:
# no repo meta db
log.debug(
'No winrepo.p cache file for saltenv \'%s\', creating one now',
saltenv
)
results = refresh_db(saltenv=saltenv, verbose=False, failhard=failhard)
try:
# Return True if there were no failed winrepo SLS files, and False if
# failures were reported.
return not bool(results.get('failed', 0))
except AttributeError:
return False |
def get_usage(self):
"""
parses /proc/stat and calcualtes total and busy time
(more specific USER_HZ see man 5 proc for further informations )
"""
usage = {}
for cpu, timings in self.get_cpu_timings().items():
cpu_total = sum(timings)
del timings[3:5]
cpu_busy = sum(timings)
cpu_usage = self.calculate_usage(cpu, cpu_total, cpu_busy)
usage['usage_' + cpu] = cpu_usage
# for backward compatibility
usage['usage'] = usage['usage_cpu']
return usage | parses /proc/stat and calcualtes total and busy time
(more specific USER_HZ see man 5 proc for further informations ) | Below is the the instruction that describes the task:
### Input:
parses /proc/stat and calcualtes total and busy time
(more specific USER_HZ see man 5 proc for further informations )
### Response:
def get_usage(self):
"""
parses /proc/stat and calcualtes total and busy time
(more specific USER_HZ see man 5 proc for further informations )
"""
usage = {}
for cpu, timings in self.get_cpu_timings().items():
cpu_total = sum(timings)
del timings[3:5]
cpu_busy = sum(timings)
cpu_usage = self.calculate_usage(cpu, cpu_total, cpu_busy)
usage['usage_' + cpu] = cpu_usage
# for backward compatibility
usage['usage'] = usage['usage_cpu']
return usage |
def sort(self, key, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None, groups=False):
'''Sort and return the list, set or sorted set at ``key``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
'''
if ((start is not None and num is None) or
(num is not None and start is None)):
raise CommandError("``start`` and ``num`` must both be specified")
pieces = [key]
if by is not None:
pieces.append('BY')
pieces.append(by)
if start is not None and num is not None:
pieces.append('LIMIT')
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, str):
pieces.append('GET')
pieces.append(get)
else:
for g in get:
pieces.append('GET')
pieces.append(g)
if desc:
pieces.append('DESC')
if alpha:
pieces.append('ALPHA')
if store is not None:
pieces.append('STORE')
pieces.append(store)
if groups:
if not get or isinstance(get, str) or len(get) < 2:
raise CommandError('when using "groups" the "get" argument '
'must be specified and contain at least '
'two keys')
options = {'groups': len(get) if groups else None}
return self.execute_command('SORT', *pieces, **options) | Sort and return the list, set or sorted set at ``key``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``. | Below is the the instruction that describes the task:
### Input:
Sort and return the list, set or sorted set at ``key``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
### Response:
def sort(self, key, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None, groups=False):
'''Sort and return the list, set or sorted set at ``key``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
'''
if ((start is not None and num is None) or
(num is not None and start is None)):
raise CommandError("``start`` and ``num`` must both be specified")
pieces = [key]
if by is not None:
pieces.append('BY')
pieces.append(by)
if start is not None and num is not None:
pieces.append('LIMIT')
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, str):
pieces.append('GET')
pieces.append(get)
else:
for g in get:
pieces.append('GET')
pieces.append(g)
if desc:
pieces.append('DESC')
if alpha:
pieces.append('ALPHA')
if store is not None:
pieces.append('STORE')
pieces.append(store)
if groups:
if not get or isinstance(get, str) or len(get) < 2:
raise CommandError('when using "groups" the "get" argument '
'must be specified and contain at least '
'two keys')
options = {'groups': len(get) if groups else None}
return self.execute_command('SORT', *pieces, **options) |
def bip32_serialize(rawtuple):
"""
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
"""
vbytes, depth, fingerprint, i, chaincode, key = rawtuple
i = encode(i, 256, 4)
chaincode = encode(hash_to_int(chaincode), 256, 32)
keydata = b'\x00' +key[:-1] if vbytes in PRIVATE else key
bindata = vbytes + from_int_to_byte(depth % 256) + fingerprint + i + chaincode + keydata
return changebase(bindata + bin_dbl_sha256(bindata)[:4], 256, 58) | Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin | Below is the the instruction that describes the task:
### Input:
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
### Response:
def bip32_serialize(rawtuple):
"""
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
"""
vbytes, depth, fingerprint, i, chaincode, key = rawtuple
i = encode(i, 256, 4)
chaincode = encode(hash_to_int(chaincode), 256, 32)
keydata = b'\x00' +key[:-1] if vbytes in PRIVATE else key
bindata = vbytes + from_int_to_byte(depth % 256) + fingerprint + i + chaincode + keydata
return changebase(bindata + bin_dbl_sha256(bindata)[:4], 256, 58) |
def level(self, level, time=0):
"""(Helper) Set light to specified level"""
if level <= 0:
self._elk.send(pf_encode(self._index))
elif level >= 98:
self._elk.send(pn_encode(self._index))
else:
self._elk.send(pc_encode(self._index, 9, level, time)) | (Helper) Set light to specified level | Below is the the instruction that describes the task:
### Input:
(Helper) Set light to specified level
### Response:
def level(self, level, time=0):
"""(Helper) Set light to specified level"""
if level <= 0:
self._elk.send(pf_encode(self._index))
elif level >= 98:
self._elk.send(pn_encode(self._index))
else:
self._elk.send(pc_encode(self._index, 9, level, time)) |
def full_import(handler, host=None, core_name=None, options=None, extra=None):
'''
MASTER ONLY
Submits an import command to the specified handler using specified options.
This command can only be run if the minion is configured with
solr.type=master
handler : str
The name of the data import handler.
host : str (None)
The solr host to query. __opts__['host'] is default.
core : str (None)
The core the handler belongs to.
options : dict (__opts__)
A list of options such as clean, optimize commit, verbose, and
pause_replication. leave blank to use __opts__ defaults. options will
be merged with __opts__
extra : dict ([])
Extra name value pairs to pass to the handler. e.g. ["name=value"]
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.full_import dataimport None music {'clean':True}
'''
options = {} if options is None else options
extra = [] if extra is None else extra
if not _is_master():
err = ['solr.full_import can only be called on "master" minions']
return _get_return_dict(False, errors=err)
if _get_none_or_value(core_name) is None and _check_for_cores():
err = ['No core specified when minion is configured as "multi-core".']
return _get_return_dict(False, err)
resp = _pre_index_check(handler, host, core_name)
if not resp['success']:
return resp
options = _merge_options(options)
if options['clean']:
resp = set_replication_enabled(False, host=host, core_name=core_name)
if not resp['success']:
errors = ['Failed to set the replication status on the master.']
return _get_return_dict(False, errors=errors)
params = ['command=full-import']
for key, val in six.iteritems(options):
params.append('&{0}={1}'.format(key, val))
url = _format_url(handler, host=host, core_name=core_name,
extra=params + extra)
return _http_request(url) | MASTER ONLY
Submits an import command to the specified handler using specified options.
This command can only be run if the minion is configured with
solr.type=master
handler : str
The name of the data import handler.
host : str (None)
The solr host to query. __opts__['host'] is default.
core : str (None)
The core the handler belongs to.
options : dict (__opts__)
A list of options such as clean, optimize commit, verbose, and
pause_replication. leave blank to use __opts__ defaults. options will
be merged with __opts__
extra : dict ([])
Extra name value pairs to pass to the handler. e.g. ["name=value"]
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.full_import dataimport None music {'clean':True} | Below is the the instruction that describes the task:
### Input:
MASTER ONLY
Submits an import command to the specified handler using specified options.
This command can only be run if the minion is configured with
solr.type=master
handler : str
The name of the data import handler.
host : str (None)
The solr host to query. __opts__['host'] is default.
core : str (None)
The core the handler belongs to.
options : dict (__opts__)
A list of options such as clean, optimize commit, verbose, and
pause_replication. leave blank to use __opts__ defaults. options will
be merged with __opts__
extra : dict ([])
Extra name value pairs to pass to the handler. e.g. ["name=value"]
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.full_import dataimport None music {'clean':True}
### Response:
def full_import(handler, host=None, core_name=None, options=None, extra=None):
'''
MASTER ONLY
Submits an import command to the specified handler using specified options.
This command can only be run if the minion is configured with
solr.type=master
handler : str
The name of the data import handler.
host : str (None)
The solr host to query. __opts__['host'] is default.
core : str (None)
The core the handler belongs to.
options : dict (__opts__)
A list of options such as clean, optimize commit, verbose, and
pause_replication. leave blank to use __opts__ defaults. options will
be merged with __opts__
extra : dict ([])
Extra name value pairs to pass to the handler. e.g. ["name=value"]
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.full_import dataimport None music {'clean':True}
'''
options = {} if options is None else options
extra = [] if extra is None else extra
if not _is_master():
err = ['solr.full_import can only be called on "master" minions']
return _get_return_dict(False, errors=err)
if _get_none_or_value(core_name) is None and _check_for_cores():
err = ['No core specified when minion is configured as "multi-core".']
return _get_return_dict(False, err)
resp = _pre_index_check(handler, host, core_name)
if not resp['success']:
return resp
options = _merge_options(options)
if options['clean']:
resp = set_replication_enabled(False, host=host, core_name=core_name)
if not resp['success']:
errors = ['Failed to set the replication status on the master.']
return _get_return_dict(False, errors=errors)
params = ['command=full-import']
for key, val in six.iteritems(options):
params.append('&{0}={1}'.format(key, val))
url = _format_url(handler, host=host, core_name=core_name,
extra=params + extra)
return _http_request(url) |
def create_token(user):
"""
Create token.
"""
payload = jwt_payload_handler(user)
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.utcnow().utctimetuple()
)
# Return values
token = jwt_encode_handler(payload)
return token | Create token. | Below is the the instruction that describes the task:
### Input:
Create token.
### Response:
def create_token(user):
"""
Create token.
"""
payload = jwt_payload_handler(user)
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.utcnow().utctimetuple()
)
# Return values
token = jwt_encode_handler(payload)
return token |
def after_request(self, region, endpoint_name, method_name, url, response):
"""
Called after a response is received and before it is returned to the user.
:param string region: the region of this request
:param string endpoint_name: the name of the endpoint that was requested
:param string method_name: the name of the method that was requested
:param url: The url that was requested
:param response: the response received. This is a response from the Requests library
"""
for limiter in self._limiters:
limiter.update_limiter(region, endpoint_name, method_name, response)
return response | Called after a response is received and before it is returned to the user.
:param string region: the region of this request
:param string endpoint_name: the name of the endpoint that was requested
:param string method_name: the name of the method that was requested
:param url: The url that was requested
:param response: the response received. This is a response from the Requests library | Below is the the instruction that describes the task:
### Input:
Called after a response is received and before it is returned to the user.
:param string region: the region of this request
:param string endpoint_name: the name of the endpoint that was requested
:param string method_name: the name of the method that was requested
:param url: The url that was requested
:param response: the response received. This is a response from the Requests library
### Response:
def after_request(self, region, endpoint_name, method_name, url, response):
"""
Called after a response is received and before it is returned to the user.
:param string region: the region of this request
:param string endpoint_name: the name of the endpoint that was requested
:param string method_name: the name of the method that was requested
:param url: The url that was requested
:param response: the response received. This is a response from the Requests library
"""
for limiter in self._limiters:
limiter.update_limiter(region, endpoint_name, method_name, response)
return response |
def cache(self):
"""Query or return the Graph API representation of this resource."""
if not self._cache:
self._cache = self.graph.get('%s' % self.id)
return self._cache | Query or return the Graph API representation of this resource. | Below is the the instruction that describes the task:
### Input:
Query or return the Graph API representation of this resource.
### Response:
def cache(self):
"""Query or return the Graph API representation of this resource."""
if not self._cache:
self._cache = self.graph.get('%s' % self.id)
return self._cache |
def site_prefixed(path):
"""
*Mockup*: adds the path prefix when required.
"""
if path is None:
path = ''
if settings.DEBUG and hasattr(settings, 'APP_NAME'):
path_prefix = '/%s' % settings.APP_NAME
else:
path_prefix = ''
if path:
# We have an actual path instead of generating a prefix that will
# be placed in front of static urls (ie. {{'pricing'|site_prefixed}}
# insted of {{''|site_prefixed}}{{ASSET_URL}}).
path_prefix += '/'
if path.startswith('/'):
path = path[1:]
return urljoin(path_prefix, path) | *Mockup*: adds the path prefix when required. | Below is the the instruction that describes the task:
### Input:
*Mockup*: adds the path prefix when required.
### Response:
def site_prefixed(path):
"""
*Mockup*: adds the path prefix when required.
"""
if path is None:
path = ''
if settings.DEBUG and hasattr(settings, 'APP_NAME'):
path_prefix = '/%s' % settings.APP_NAME
else:
path_prefix = ''
if path:
# We have an actual path instead of generating a prefix that will
# be placed in front of static urls (ie. {{'pricing'|site_prefixed}}
# insted of {{''|site_prefixed}}{{ASSET_URL}}).
path_prefix += '/'
if path.startswith('/'):
path = path[1:]
return urljoin(path_prefix, path) |
def load(self, callables_fname):
r"""
Load traced modules information from a `JSON <http://www.json.org/>`_ file.
The loaded module information is merged with any existing module information
:param callables_fname: File name
:type callables_fname: :ref:`FileNameExists`
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \\`callables_fname\\` is not valid)
"""
# Validate file name
_validate_fname(callables_fname)
if not os.path.exists(callables_fname):
raise OSError("File {0} could not be found".format(callables_fname))
with open(callables_fname, "r") as fobj:
fdict = json.load(fobj)
if sys.hexversion < 0x03000000: # pragma: no cover
fdict = _unicode_to_ascii(fdict)
self._callables_db.update(fdict["_callables_db"])
# Reverse the tuple-to-string conversion that the save method
# does due to the fact that JSON keys need to be strings and the
# keys of the reverse callable dictionary are tuples where the first
# item is a file name and the second item is the starting line of the
# callable within that file (dictionary value)
rdict = {}
for key, value in fdict["_reverse_callables_db"].items():
tokens = key[1:-1].split(",")
key = tokens[0].strip()[1:-1]
if platform.system().lower() == "windows": # pragma: no cover
while True:
tmp = key
key = key.replace("\\\\", "\\")
if tmp == key:
break
rdict[(key, int(tokens[1]))] = value
self._reverse_callables_db.update(rdict)
self._modules_dict.update(fdict["_modules_dict"])
self._fnames.update(fdict["_fnames"])
self._module_names.extend(fdict["_module_names"])
self._class_names.extend(fdict["_class_names"])
self._module_names = sorted(list(set(self._module_names)))
self._class_names = sorted(list(set(self._class_names))) | r"""
Load traced modules information from a `JSON <http://www.json.org/>`_ file.
The loaded module information is merged with any existing module information
:param callables_fname: File name
:type callables_fname: :ref:`FileNameExists`
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \\`callables_fname\\` is not valid) | Below is the the instruction that describes the task:
### Input:
r"""
Load traced modules information from a `JSON <http://www.json.org/>`_ file.
The loaded module information is merged with any existing module information
:param callables_fname: File name
:type callables_fname: :ref:`FileNameExists`
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \\`callables_fname\\` is not valid)
### Response:
def load(self, callables_fname):
r"""
Load traced modules information from a `JSON <http://www.json.org/>`_ file.
The loaded module information is merged with any existing module information
:param callables_fname: File name
:type callables_fname: :ref:`FileNameExists`
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \\`callables_fname\\` is not valid)
"""
# Validate file name
_validate_fname(callables_fname)
if not os.path.exists(callables_fname):
raise OSError("File {0} could not be found".format(callables_fname))
with open(callables_fname, "r") as fobj:
fdict = json.load(fobj)
if sys.hexversion < 0x03000000: # pragma: no cover
fdict = _unicode_to_ascii(fdict)
self._callables_db.update(fdict["_callables_db"])
# Reverse the tuple-to-string conversion that the save method
# does due to the fact that JSON keys need to be strings and the
# keys of the reverse callable dictionary are tuples where the first
# item is a file name and the second item is the starting line of the
# callable within that file (dictionary value)
rdict = {}
for key, value in fdict["_reverse_callables_db"].items():
tokens = key[1:-1].split(",")
key = tokens[0].strip()[1:-1]
if platform.system().lower() == "windows": # pragma: no cover
while True:
tmp = key
key = key.replace("\\\\", "\\")
if tmp == key:
break
rdict[(key, int(tokens[1]))] = value
self._reverse_callables_db.update(rdict)
self._modules_dict.update(fdict["_modules_dict"])
self._fnames.update(fdict["_fnames"])
self._module_names.extend(fdict["_module_names"])
self._class_names.extend(fdict["_class_names"])
self._module_names = sorted(list(set(self._module_names)))
self._class_names = sorted(list(set(self._class_names))) |
def _guess_x_kmeans(self, y_desired, **kwargs):
"""Provide an initial guesses for a probable x from y"""
k = kwargs.get('k', self.k)
_, indexes = self.fmodel.dataset.nn_y(y_desired, k=k)
X = np.array([self.fmodel.get_x(i) for i in indexes])
if np.sum(X) == 0.:
centroids = [self.fmodel.get_x(indexes[0])]
else:
try:
centroids, _ = kmeans2(X, 2)
except np.linalg.linalg.LinAlgError:
centroids = [self.fmodel.get_x(indexes[0])]
return centroids | Provide an initial guesses for a probable x from y | Below is the the instruction that describes the task:
### Input:
Provide an initial guesses for a probable x from y
### Response:
def _guess_x_kmeans(self, y_desired, **kwargs):
"""Provide an initial guesses for a probable x from y"""
k = kwargs.get('k', self.k)
_, indexes = self.fmodel.dataset.nn_y(y_desired, k=k)
X = np.array([self.fmodel.get_x(i) for i in indexes])
if np.sum(X) == 0.:
centroids = [self.fmodel.get_x(indexes[0])]
else:
try:
centroids, _ = kmeans2(X, 2)
except np.linalg.linalg.LinAlgError:
centroids = [self.fmodel.get_x(indexes[0])]
return centroids |
def split_bottom_up(lower, upper, __fval=None, **fval):
"""This call un-links an association that was made using bind_bottom_up.
Have a look at help(bind_bottom_up)
"""
if __fval is not None:
fval.update(__fval)
def do_filter(params, cls):
params_is_invalid = any(
k not in params or params[k] != v for k, v in six.iteritems(fval)
)
return cls != upper or params_is_invalid
lower.payload_guess = [x for x in lower.payload_guess if do_filter(*x)] | This call un-links an association that was made using bind_bottom_up.
Have a look at help(bind_bottom_up) | Below is the the instruction that describes the task:
### Input:
This call un-links an association that was made using bind_bottom_up.
Have a look at help(bind_bottom_up)
### Response:
def split_bottom_up(lower, upper, __fval=None, **fval):
"""This call un-links an association that was made using bind_bottom_up.
Have a look at help(bind_bottom_up)
"""
if __fval is not None:
fval.update(__fval)
def do_filter(params, cls):
params_is_invalid = any(
k not in params or params[k] != v for k, v in six.iteritems(fval)
)
return cls != upper or params_is_invalid
lower.payload_guess = [x for x in lower.payload_guess if do_filter(*x)] |
def _check_acl(self, acl_no, network, netmask):
"""Check a ACL config exists in the running config.
:param acl_no: access control list (ACL) number
:param network: network which this ACL permits
:param netmask: netmask of the network
:return:
"""
exp_cfg_lines = ['ip access-list standard ' + str(acl_no),
' permit ' + str(network) + ' ' + str(netmask)]
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
acls_raw = parse.find_children(exp_cfg_lines[0])
if acls_raw:
if exp_cfg_lines[1] in acls_raw:
return True
LOG.error("Mismatch in ACL configuration for %s", acl_no)
return False
LOG.debug("%s is not present in config", acl_no)
return False | Check a ACL config exists in the running config.
:param acl_no: access control list (ACL) number
:param network: network which this ACL permits
:param netmask: netmask of the network
:return: | Below is the the instruction that describes the task:
### Input:
Check a ACL config exists in the running config.
:param acl_no: access control list (ACL) number
:param network: network which this ACL permits
:param netmask: netmask of the network
:return:
### Response:
def _check_acl(self, acl_no, network, netmask):
"""Check a ACL config exists in the running config.
:param acl_no: access control list (ACL) number
:param network: network which this ACL permits
:param netmask: netmask of the network
:return:
"""
exp_cfg_lines = ['ip access-list standard ' + str(acl_no),
' permit ' + str(network) + ' ' + str(netmask)]
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
acls_raw = parse.find_children(exp_cfg_lines[0])
if acls_raw:
if exp_cfg_lines[1] in acls_raw:
return True
LOG.error("Mismatch in ACL configuration for %s", acl_no)
return False
LOG.debug("%s is not present in config", acl_no)
return False |
def symlink(source, target, isfile=True):
"""Creates a symlink at target *file* pointing to source.
:arg isfile: when True, if symlinking is disabled in the global config, the file
is copied instead with fortpy.utility.copyfile; otherwise fortpy.utility.copy
is used and the target is considered a directory.
"""
from fortpy.code import config
from os import path
if config.symlink:
from os import symlink, remove
if path.isfile(target) or path.islink(target):
remove(target)
elif path.isdir(target):
msg.warn("Cannot auto-delete directory '{}' for symlinking.".format(target))
return
symlink(source, target)
else:
msg.info(" COPY: {}".format(source))
if isfile:
copyfile(source, target)
else:
copy(source, target) | Creates a symlink at target *file* pointing to source.
:arg isfile: when True, if symlinking is disabled in the global config, the file
is copied instead with fortpy.utility.copyfile; otherwise fortpy.utility.copy
is used and the target is considered a directory. | Below is the the instruction that describes the task:
### Input:
Creates a symlink at target *file* pointing to source.
:arg isfile: when True, if symlinking is disabled in the global config, the file
is copied instead with fortpy.utility.copyfile; otherwise fortpy.utility.copy
is used and the target is considered a directory.
### Response:
def symlink(source, target, isfile=True):
"""Creates a symlink at target *file* pointing to source.
:arg isfile: when True, if symlinking is disabled in the global config, the file
is copied instead with fortpy.utility.copyfile; otherwise fortpy.utility.copy
is used and the target is considered a directory.
"""
from fortpy.code import config
from os import path
if config.symlink:
from os import symlink, remove
if path.isfile(target) or path.islink(target):
remove(target)
elif path.isdir(target):
msg.warn("Cannot auto-delete directory '{}' for symlinking.".format(target))
return
symlink(source, target)
else:
msg.info(" COPY: {}".format(source))
if isfile:
copyfile(source, target)
else:
copy(source, target) |
def color(self, c=False):
"""
Set/get actor's color.
If None is passed as input, will use colors from active scalars.
Same as `c()`.
"""
if c is False:
return np.array(self.GetProperty().GetColor())
elif c is None:
self.GetMapper().ScalarVisibilityOn()
return self
else:
self.GetMapper().ScalarVisibilityOff()
self.GetProperty().SetColor(colors.getColor(c))
return self | Set/get actor's color.
If None is passed as input, will use colors from active scalars.
Same as `c()`. | Below is the the instruction that describes the task:
### Input:
Set/get actor's color.
If None is passed as input, will use colors from active scalars.
Same as `c()`.
### Response:
def color(self, c=False):
"""
Set/get actor's color.
If None is passed as input, will use colors from active scalars.
Same as `c()`.
"""
if c is False:
return np.array(self.GetProperty().GetColor())
elif c is None:
self.GetMapper().ScalarVisibilityOn()
return self
else:
self.GetMapper().ScalarVisibilityOff()
self.GetProperty().SetColor(colors.getColor(c))
return self |
def parse_tibiacom_content(content, *, html_class="BoxContent", tag="div", builder="lxml"):
"""Parses HTML content from Tibia.com into a BeautifulSoup object.
Parameters
----------
content: :class:`str`
The raw HTML content from Tibia.com
html_class: :class:`str`
The HTML class of the parsed element. The default value is ``BoxContent``.
tag: :class:`str`
The HTML tag select. The default value is ``div``.
builder: :class:`str`
The builder to use. The default value is ``lxml``.
Returns
-------
:class:`bs4.BeautifulSoup`, optional
The parsed content.
"""
return bs4.BeautifulSoup(content.replace('ISO-8859-1', 'utf-8'), builder,
parse_only=bs4.SoupStrainer(tag, class_=html_class)) | Parses HTML content from Tibia.com into a BeautifulSoup object.
Parameters
----------
content: :class:`str`
The raw HTML content from Tibia.com
html_class: :class:`str`
The HTML class of the parsed element. The default value is ``BoxContent``.
tag: :class:`str`
The HTML tag select. The default value is ``div``.
builder: :class:`str`
The builder to use. The default value is ``lxml``.
Returns
-------
:class:`bs4.BeautifulSoup`, optional
The parsed content. | Below is the the instruction that describes the task:
### Input:
Parses HTML content from Tibia.com into a BeautifulSoup object.
Parameters
----------
content: :class:`str`
The raw HTML content from Tibia.com
html_class: :class:`str`
The HTML class of the parsed element. The default value is ``BoxContent``.
tag: :class:`str`
The HTML tag select. The default value is ``div``.
builder: :class:`str`
The builder to use. The default value is ``lxml``.
Returns
-------
:class:`bs4.BeautifulSoup`, optional
The parsed content.
### Response:
def parse_tibiacom_content(content, *, html_class="BoxContent", tag="div", builder="lxml"):
"""Parses HTML content from Tibia.com into a BeautifulSoup object.
Parameters
----------
content: :class:`str`
The raw HTML content from Tibia.com
html_class: :class:`str`
The HTML class of the parsed element. The default value is ``BoxContent``.
tag: :class:`str`
The HTML tag select. The default value is ``div``.
builder: :class:`str`
The builder to use. The default value is ``lxml``.
Returns
-------
:class:`bs4.BeautifulSoup`, optional
The parsed content.
"""
return bs4.BeautifulSoup(content.replace('ISO-8859-1', 'utf-8'), builder,
parse_only=bs4.SoupStrainer(tag, class_=html_class)) |
def labels(
self, include_missing=False, include_transforms=False, include_cat_ids=False
):
"""Return list of str labels for the elements of this dimension.
Returns a list of (label, element_id) pairs if *include_cat_ids* is
True. The `element_id` value in the second position of the pair is
None for subtotal items (which don't have an element-id).
"""
# TODO: Having an alternate return type triggered by a flag-parameter
# (`include_cat_ids` in this case) is poor practice. Using flags like
# that effectively squashes what should be two methods into one.
# Either get rid of the need for that alternate return value type or
# create a separate method for it.
elements = self.all_elements if include_missing else self.valid_elements
include_subtotals = include_transforms and self.dimension_type != DT.CA_SUBVAR
# ---items are elements or subtotals, interleaved in display order---
interleaved_items = tuple(self._iter_interleaved_items(elements))
labels = list(
item.label
for item in interleaved_items
if include_subtotals or not item.is_insertion
)
if include_cat_ids:
element_ids = tuple(
None if item.is_insertion else item.element_id
for item in interleaved_items
if include_subtotals or not item.is_insertion
)
return list(zip(labels, element_ids))
return labels | Return list of str labels for the elements of this dimension.
Returns a list of (label, element_id) pairs if *include_cat_ids* is
True. The `element_id` value in the second position of the pair is
None for subtotal items (which don't have an element-id). | Below is the the instruction that describes the task:
### Input:
Return list of str labels for the elements of this dimension.
Returns a list of (label, element_id) pairs if *include_cat_ids* is
True. The `element_id` value in the second position of the pair is
None for subtotal items (which don't have an element-id).
### Response:
def labels(
self, include_missing=False, include_transforms=False, include_cat_ids=False
):
"""Return list of str labels for the elements of this dimension.
Returns a list of (label, element_id) pairs if *include_cat_ids* is
True. The `element_id` value in the second position of the pair is
None for subtotal items (which don't have an element-id).
"""
# TODO: Having an alternate return type triggered by a flag-parameter
# (`include_cat_ids` in this case) is poor practice. Using flags like
# that effectively squashes what should be two methods into one.
# Either get rid of the need for that alternate return value type or
# create a separate method for it.
elements = self.all_elements if include_missing else self.valid_elements
include_subtotals = include_transforms and self.dimension_type != DT.CA_SUBVAR
# ---items are elements or subtotals, interleaved in display order---
interleaved_items = tuple(self._iter_interleaved_items(elements))
labels = list(
item.label
for item in interleaved_items
if include_subtotals or not item.is_insertion
)
if include_cat_ids:
element_ids = tuple(
None if item.is_insertion else item.element_id
for item in interleaved_items
if include_subtotals or not item.is_insertion
)
return list(zip(labels, element_ids))
return labels |
def traverse_layout(root, callback):
"""
Tree walker and invokes the callback as it
traverse pdf object tree
"""
callback(root)
if isinstance(root, collections.Iterable):
for child in root:
traverse_layout(child, callback) | Tree walker and invokes the callback as it
traverse pdf object tree | Below is the the instruction that describes the task:
### Input:
Tree walker and invokes the callback as it
traverse pdf object tree
### Response:
def traverse_layout(root, callback):
"""
Tree walker and invokes the callback as it
traverse pdf object tree
"""
callback(root)
if isinstance(root, collections.Iterable):
for child in root:
traverse_layout(child, callback) |
def include_from_file(records, handle):
"""
Filter the records, keeping only sequences whose ID is contained in the
handle.
"""
ids = set(i.strip() for i in handle)
for record in records:
if record.id.strip() in ids:
yield record | Filter the records, keeping only sequences whose ID is contained in the
handle. | Below is the the instruction that describes the task:
### Input:
Filter the records, keeping only sequences whose ID is contained in the
handle.
### Response:
def include_from_file(records, handle):
"""
Filter the records, keeping only sequences whose ID is contained in the
handle.
"""
ids = set(i.strip() for i in handle)
for record in records:
if record.id.strip() in ids:
yield record |
def write_error(self, status_code, **kwargs):
"""
Handle Exceptions from the server. Formats the HTML into readable form
"""
reason = self._reason
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
error = []
for line in traceback.format_exception(*kwargs["exc_info"]):
error.append(line)
else:
error = None
data = {'_traceback': error, 'message': reason, 'code': status_code}
content = self.render_exception(**data)
self.write(content) | Handle Exceptions from the server. Formats the HTML into readable form | Below is the the instruction that describes the task:
### Input:
Handle Exceptions from the server. Formats the HTML into readable form
### Response:
def write_error(self, status_code, **kwargs):
"""
Handle Exceptions from the server. Formats the HTML into readable form
"""
reason = self._reason
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
error = []
for line in traceback.format_exception(*kwargs["exc_info"]):
error.append(line)
else:
error = None
data = {'_traceback': error, 'message': reason, 'code': status_code}
content = self.render_exception(**data)
self.write(content) |
def get_project(self) -> str:
""" Get the ihc project and make sure controller is ready before"""
with IHCController._mutex:
if self._project is None:
if self.client.get_state() != IHCSTATE_READY:
ready = self.client.wait_for_state_change(IHCSTATE_READY,
10)
if ready != IHCSTATE_READY:
return None
self._project = self.client.get_project()
return self._project | Get the ihc project and make sure controller is ready before | Below is the the instruction that describes the task:
### Input:
Get the ihc project and make sure controller is ready before
### Response:
def get_project(self) -> str:
""" Get the ihc project and make sure controller is ready before"""
with IHCController._mutex:
if self._project is None:
if self.client.get_state() != IHCSTATE_READY:
ready = self.client.wait_for_state_change(IHCSTATE_READY,
10)
if ready != IHCSTATE_READY:
return None
self._project = self.client.get_project()
return self._project |
def get_parent(self, el, no_iframe=False):
"""Get parent."""
parent = el.parent
if no_iframe and parent is not None and self.is_iframe(parent):
parent = None
return parent | Get parent. | Below is the the instruction that describes the task:
### Input:
Get parent.
### Response:
def get_parent(self, el, no_iframe=False):
"""Get parent."""
parent = el.parent
if no_iframe and parent is not None and self.is_iframe(parent):
parent = None
return parent |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.