code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _shifted_reg_next(reg, direct, num=1):
"""
Creates a shifted 'next' property for shifted (left or right) register.\n
Use: `myReg.next = shifted_reg_next(myReg, 'l', 4)`
:param string direct: direction of shift, either 'l' or 'r'
:param int num: number of shifts
:return: Register containing reg's (shifted) next state
"""
if direct == 'l':
if num >= len(reg):
return 0
else:
return pyrtl.concat(reg, pyrtl.Const(0, num))
elif direct == 'r':
if num >= len(reg):
return 0
else:
return reg[num:]
else:
raise pyrtl.PyrtlError("direction must be specified with 'direct'"
"parameter as either 'l' or 'r'") | Creates a shifted 'next' property for shifted (left or right) register.\n
Use: `myReg.next = shifted_reg_next(myReg, 'l', 4)`
:param string direct: direction of shift, either 'l' or 'r'
:param int num: number of shifts
:return: Register containing reg's (shifted) next state | Below is the the instruction that describes the task:
### Input:
Creates a shifted 'next' property for shifted (left or right) register.\n
Use: `myReg.next = shifted_reg_next(myReg, 'l', 4)`
:param string direct: direction of shift, either 'l' or 'r'
:param int num: number of shifts
:return: Register containing reg's (shifted) next state
### Response:
def _shifted_reg_next(reg, direct, num=1):
"""
Creates a shifted 'next' property for shifted (left or right) register.\n
Use: `myReg.next = shifted_reg_next(myReg, 'l', 4)`
:param string direct: direction of shift, either 'l' or 'r'
:param int num: number of shifts
:return: Register containing reg's (shifted) next state
"""
if direct == 'l':
if num >= len(reg):
return 0
else:
return pyrtl.concat(reg, pyrtl.Const(0, num))
elif direct == 'r':
if num >= len(reg):
return 0
else:
return reg[num:]
else:
raise pyrtl.PyrtlError("direction must be specified with 'direct'"
"parameter as either 'l' or 'r'") |
def _get_spot_history(ctx, instance_type):
"""
Returns list of 1,000 most recent spot market data points represented as SpotPriceHistory
objects. Note: The most recent object/data point will be first in the list.
:rtype: list[SpotPriceHistory]
"""
one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
spot_data = ctx.ec2.get_spot_price_history(start_time=one_week_ago.isoformat(),
instance_type=instance_type,
product_description="Linux/UNIX")
spot_data.sort(key=attrgetter("timestamp"), reverse=True)
return spot_data | Returns list of 1,000 most recent spot market data points represented as SpotPriceHistory
objects. Note: The most recent object/data point will be first in the list.
:rtype: list[SpotPriceHistory] | Below is the the instruction that describes the task:
### Input:
Returns list of 1,000 most recent spot market data points represented as SpotPriceHistory
objects. Note: The most recent object/data point will be first in the list.
:rtype: list[SpotPriceHistory]
### Response:
def _get_spot_history(ctx, instance_type):
"""
Returns list of 1,000 most recent spot market data points represented as SpotPriceHistory
objects. Note: The most recent object/data point will be first in the list.
:rtype: list[SpotPriceHistory]
"""
one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
spot_data = ctx.ec2.get_spot_price_history(start_time=one_week_ago.isoformat(),
instance_type=instance_type,
product_description="Linux/UNIX")
spot_data.sort(key=attrgetter("timestamp"), reverse=True)
return spot_data |
def get_standard_package(self, server_id, is_virt=True):
"""Retrieves the standard firewall package for the virtual server.
:param int server_id: The ID of the server to create the firewall for
:param bool is_virt: True if the ID provided is for a virtual server,
False for a server
:returns: A dictionary containing the standard virtual server firewall
package
"""
firewall_port_speed = self._get_fwl_port_speed(server_id, is_virt)
_value = "%s%s" % (firewall_port_speed, "Mbps Hardware Firewall")
_filter = {'items': {'description': utils.query_filter(_value)}}
return self.prod_pkg.getItems(id=0, filter=_filter) | Retrieves the standard firewall package for the virtual server.
:param int server_id: The ID of the server to create the firewall for
:param bool is_virt: True if the ID provided is for a virtual server,
False for a server
:returns: A dictionary containing the standard virtual server firewall
package | Below is the the instruction that describes the task:
### Input:
Retrieves the standard firewall package for the virtual server.
:param int server_id: The ID of the server to create the firewall for
:param bool is_virt: True if the ID provided is for a virtual server,
False for a server
:returns: A dictionary containing the standard virtual server firewall
package
### Response:
def get_standard_package(self, server_id, is_virt=True):
"""Retrieves the standard firewall package for the virtual server.
:param int server_id: The ID of the server to create the firewall for
:param bool is_virt: True if the ID provided is for a virtual server,
False for a server
:returns: A dictionary containing the standard virtual server firewall
package
"""
firewall_port_speed = self._get_fwl_port_speed(server_id, is_virt)
_value = "%s%s" % (firewall_port_speed, "Mbps Hardware Firewall")
_filter = {'items': {'description': utils.query_filter(_value)}}
return self.prod_pkg.getItems(id=0, filter=_filter) |
def next(self):
"""
Handles the next debug event.
@see: L{cont}, L{dispatch}, L{wait}, L{stop}
@raise WindowsError: Raises an exception on error.
If the wait operation causes an error, debugging is stopped
(meaning all debugees are either killed or detached from).
If the event dispatching causes an error, the event is still
continued before returning. This may happen, for example, if the
event handler raises an exception nobody catches.
"""
try:
event = self.wait()
except Exception:
self.stop()
raise
try:
self.dispatch()
finally:
self.cont() | Handles the next debug event.
@see: L{cont}, L{dispatch}, L{wait}, L{stop}
@raise WindowsError: Raises an exception on error.
If the wait operation causes an error, debugging is stopped
(meaning all debugees are either killed or detached from).
If the event dispatching causes an error, the event is still
continued before returning. This may happen, for example, if the
event handler raises an exception nobody catches. | Below is the the instruction that describes the task:
### Input:
Handles the next debug event.
@see: L{cont}, L{dispatch}, L{wait}, L{stop}
@raise WindowsError: Raises an exception on error.
If the wait operation causes an error, debugging is stopped
(meaning all debugees are either killed or detached from).
If the event dispatching causes an error, the event is still
continued before returning. This may happen, for example, if the
event handler raises an exception nobody catches.
### Response:
def next(self):
"""
Handles the next debug event.
@see: L{cont}, L{dispatch}, L{wait}, L{stop}
@raise WindowsError: Raises an exception on error.
If the wait operation causes an error, debugging is stopped
(meaning all debugees are either killed or detached from).
If the event dispatching causes an error, the event is still
continued before returning. This may happen, for example, if the
event handler raises an exception nobody catches.
"""
try:
event = self.wait()
except Exception:
self.stop()
raise
try:
self.dispatch()
finally:
self.cont() |
def from_entry_dict(cls, entry_dict):
"""
This is a "constructor" for the LogEntry class.
:param entry_dict: A dict we get from the REST API
:return: An instance of LogEntry.
"""
# Debug helper
# https://circleci.com/gh/andresriancho/w3af-api-docker/30
try:
_type = entry_dict['type']
_id = entry_dict['id']
_time = entry_dict['time']
message = entry_dict['message']
severity = entry_dict['severity']
except KeyError:
msg = ('Missing expected log entry attribute. Log entry'
' object is:\n\n%s')
raise APIException(msg % json.dumps(entry_dict, indent=4))
return cls(_type, message, _time, severity, _id) | This is a "constructor" for the LogEntry class.
:param entry_dict: A dict we get from the REST API
:return: An instance of LogEntry. | Below is the the instruction that describes the task:
### Input:
This is a "constructor" for the LogEntry class.
:param entry_dict: A dict we get from the REST API
:return: An instance of LogEntry.
### Response:
def from_entry_dict(cls, entry_dict):
"""
This is a "constructor" for the LogEntry class.
:param entry_dict: A dict we get from the REST API
:return: An instance of LogEntry.
"""
# Debug helper
# https://circleci.com/gh/andresriancho/w3af-api-docker/30
try:
_type = entry_dict['type']
_id = entry_dict['id']
_time = entry_dict['time']
message = entry_dict['message']
severity = entry_dict['severity']
except KeyError:
msg = ('Missing expected log entry attribute. Log entry'
' object is:\n\n%s')
raise APIException(msg % json.dumps(entry_dict, indent=4))
return cls(_type, message, _time, severity, _id) |
def check_action_type(self, value):
"""Set the value for the CheckActionType, validating input"""
if value is not None:
if not isinstance(value, ActionType):
raise AttributeError("Invalid check action %s" % value)
self._check_action_type = value | Set the value for the CheckActionType, validating input | Below is the the instruction that describes the task:
### Input:
Set the value for the CheckActionType, validating input
### Response:
def check_action_type(self, value):
"""Set the value for the CheckActionType, validating input"""
if value is not None:
if not isinstance(value, ActionType):
raise AttributeError("Invalid check action %s" % value)
self._check_action_type = value |
def rl_quotes(x):
"""
Replace quotes by typographic quotes
"""
patterns = (
# открывающие кавычки ставятся обычно вплотную к слову слева
# а закрывающие -- вплотную справа
# открывающие русские кавычки-ёлочки
(re.compile(r'((?:^|\s))(")((?u))', re.UNICODE), u'\\1\xab\\3'),
# закрывающие русские кавычки-ёлочки
(re.compile(r'(\S)(")((?u))', re.UNICODE), u'\\1\xbb\\3'),
# открывающие кавычки-лапки, вместо одинарных кавычек
(re.compile(r'((?:^|\s))(\')((?u))', re.UNICODE), u'\\1\u201c\\3'),
# закрывающие кавычки-лапки
(re.compile(r'(\S)(\')((?u))', re.UNICODE), u'\\1\u201d\\3'),
)
return _sub_patterns(patterns, x) | Replace quotes by typographic quotes | Below is the the instruction that describes the task:
### Input:
Replace quotes by typographic quotes
### Response:
def rl_quotes(x):
"""
Replace quotes by typographic quotes
"""
patterns = (
# открывающие кавычки ставятся обычно вплотную к слову слева
# а закрывающие -- вплотную справа
# открывающие русские кавычки-ёлочки
(re.compile(r'((?:^|\s))(")((?u))', re.UNICODE), u'\\1\xab\\3'),
# закрывающие русские кавычки-ёлочки
(re.compile(r'(\S)(")((?u))', re.UNICODE), u'\\1\xbb\\3'),
# открывающие кавычки-лапки, вместо одинарных кавычек
(re.compile(r'((?:^|\s))(\')((?u))', re.UNICODE), u'\\1\u201c\\3'),
# закрывающие кавычки-лапки
(re.compile(r'(\S)(\')((?u))', re.UNICODE), u'\\1\u201d\\3'),
)
return _sub_patterns(patterns, x) |
def ggnn_fast_dense(node_states,
adjacency_matrix,
num_edge_types,
total_value_depth,
name=None):
"""ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
"""
# between the same nodes (with only one edge of each type. adjacency_matrix
# will need to be converted to shape [B, T, N, N].
with tf.variable_scope(
name,
default_name="ggnn_fast_dense",
values=[node_states, adjacency_matrix, num_edge_types]):
nodes_shape = common_layers.shape_list(node_states)
v = _compute_edge_transforms(node_states,
total_value_depth,
num_edge_types,
name="v_mpnn")
v = tf.reshape(v, [nodes_shape[0], nodes_shape[1], num_edge_types,
total_value_depth
]) # Shape [B, N, T, V].
v = tf.transpose(v, [0, 2, 1, 3]) # Shape [B, T, N, V].
# Rearranging the dimensions to match the shape of all_edge_logits.
edge_vectors = tf.transpose(adjacency_matrix, [0, 3, 1, 2])
output = compute_values(edge_vectors, v)
return output | ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum. | Below is the the instruction that describes the task:
### Input:
ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
### Response:
def ggnn_fast_dense(node_states,
adjacency_matrix,
num_edge_types,
total_value_depth,
name=None):
"""ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
"""
# between the same nodes (with only one edge of each type. adjacency_matrix
# will need to be converted to shape [B, T, N, N].
with tf.variable_scope(
name,
default_name="ggnn_fast_dense",
values=[node_states, adjacency_matrix, num_edge_types]):
nodes_shape = common_layers.shape_list(node_states)
v = _compute_edge_transforms(node_states,
total_value_depth,
num_edge_types,
name="v_mpnn")
v = tf.reshape(v, [nodes_shape[0], nodes_shape[1], num_edge_types,
total_value_depth
]) # Shape [B, N, T, V].
v = tf.transpose(v, [0, 2, 1, 3]) # Shape [B, T, N, V].
# Rearranging the dimensions to match the shape of all_edge_logits.
edge_vectors = tf.transpose(adjacency_matrix, [0, 3, 1, 2])
output = compute_values(edge_vectors, v)
return output |
def textFileStream(self, directory):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
The text files must be encoded as UTF-8.
"""
return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer()) | Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
The text files must be encoded as UTF-8. | Below is the the instruction that describes the task:
### Input:
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
The text files must be encoded as UTF-8.
### Response:
def textFileStream(self, directory):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
The text files must be encoded as UTF-8.
"""
return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer()) |
def attach(self, canvas):
"""Attach this tranform to a canvas
Parameters
----------
canvas : instance of Canvas
The canvas.
"""
self._canvas = canvas
canvas.events.resize.connect(self.on_resize)
canvas.events.mouse_wheel.connect(self.on_mouse_wheel)
canvas.events.mouse_move.connect(self.on_mouse_move) | Attach this tranform to a canvas
Parameters
----------
canvas : instance of Canvas
The canvas. | Below is the the instruction that describes the task:
### Input:
Attach this tranform to a canvas
Parameters
----------
canvas : instance of Canvas
The canvas.
### Response:
def attach(self, canvas):
"""Attach this tranform to a canvas
Parameters
----------
canvas : instance of Canvas
The canvas.
"""
self._canvas = canvas
canvas.events.resize.connect(self.on_resize)
canvas.events.mouse_wheel.connect(self.on_mouse_wheel)
canvas.events.mouse_move.connect(self.on_mouse_move) |
def set_terminal_size(fd, size):
"""Set the (width, height) size tuple for the given pty fd."""
sizebuf = array.array('h', reversed(size))
fcntl.ioctl(fd, termios.TIOCSWINSZ, sizebuf) | Set the (width, height) size tuple for the given pty fd. | Below is the the instruction that describes the task:
### Input:
Set the (width, height) size tuple for the given pty fd.
### Response:
def set_terminal_size(fd, size):
"""Set the (width, height) size tuple for the given pty fd."""
sizebuf = array.array('h', reversed(size))
fcntl.ioctl(fd, termios.TIOCSWINSZ, sizebuf) |
def apex(self, axis):
'''
Find the most extreme vertex in the direction of the axis provided.
axis: A vector, which is an 3x1 np.array.
'''
from blmath.geometry.apex import apex
return apex(self.v, axis) | Find the most extreme vertex in the direction of the axis provided.
axis: A vector, which is an 3x1 np.array. | Below is the the instruction that describes the task:
### Input:
Find the most extreme vertex in the direction of the axis provided.
axis: A vector, which is an 3x1 np.array.
### Response:
def apex(self, axis):
'''
Find the most extreme vertex in the direction of the axis provided.
axis: A vector, which is an 3x1 np.array.
'''
from blmath.geometry.apex import apex
return apex(self.v, axis) |
def get_source_and_pgp_key(source_and_key):
"""Look for a pgp key ID or ascii-armor key in the given input.
:param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
optional.
:returns (source_spec, key_id OR None) as a tuple. Returns None for key_id
if there was no '|' in the source_and_key string.
"""
try:
source, key = source_and_key.split('|', 2)
return source, key or None
except ValueError:
return source_and_key, None | Look for a pgp key ID or ascii-armor key in the given input.
:param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
optional.
:returns (source_spec, key_id OR None) as a tuple. Returns None for key_id
if there was no '|' in the source_and_key string. | Below is the the instruction that describes the task:
### Input:
Look for a pgp key ID or ascii-armor key in the given input.
:param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
optional.
:returns (source_spec, key_id OR None) as a tuple. Returns None for key_id
if there was no '|' in the source_and_key string.
### Response:
def get_source_and_pgp_key(source_and_key):
"""Look for a pgp key ID or ascii-armor key in the given input.
:param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
optional.
:returns (source_spec, key_id OR None) as a tuple. Returns None for key_id
if there was no '|' in the source_and_key string.
"""
try:
source, key = source_and_key.split('|', 2)
return source, key or None
except ValueError:
return source_and_key, None |
def _fetch_datatype(self, transport, bucket, key, r=None, pr=None,
basic_quorum=None, notfound_ok=None,
timeout=None, include_context=None):
"""
_fetch_datatype(bucket, key, r=None, pr=None, basic_quorum=None,
notfound_ok=None, timeout=None, include_context=None)
Fetches the value of a Riak Datatype as raw data. This is used
internally to update already reified Datatype objects. Use the
public version to fetch a reified type.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the datatype, which must belong to a
:class:`~riak.BucketType`
:type bucket: RiakBucket
:param key: the key of the datatype
:type key: string, None
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param timeout: a timeout value in milliseconds
:type timeout: int
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool
:rtype: tuple of type, value and context
"""
_validate_timeout(timeout)
return transport.fetch_datatype(bucket, key, r=r, pr=pr,
basic_quorum=basic_quorum,
notfound_ok=notfound_ok,
timeout=timeout,
include_context=include_context) | _fetch_datatype(bucket, key, r=None, pr=None, basic_quorum=None,
notfound_ok=None, timeout=None, include_context=None)
Fetches the value of a Riak Datatype as raw data. This is used
internally to update already reified Datatype objects. Use the
public version to fetch a reified type.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the datatype, which must belong to a
:class:`~riak.BucketType`
:type bucket: RiakBucket
:param key: the key of the datatype
:type key: string, None
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param timeout: a timeout value in milliseconds
:type timeout: int
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool
:rtype: tuple of type, value and context | Below is the the instruction that describes the task:
### Input:
_fetch_datatype(bucket, key, r=None, pr=None, basic_quorum=None,
notfound_ok=None, timeout=None, include_context=None)
Fetches the value of a Riak Datatype as raw data. This is used
internally to update already reified Datatype objects. Use the
public version to fetch a reified type.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the datatype, which must belong to a
:class:`~riak.BucketType`
:type bucket: RiakBucket
:param key: the key of the datatype
:type key: string, None
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param timeout: a timeout value in milliseconds
:type timeout: int
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool
:rtype: tuple of type, value and context
### Response:
def _fetch_datatype(self, transport, bucket, key, r=None, pr=None,
basic_quorum=None, notfound_ok=None,
timeout=None, include_context=None):
"""
_fetch_datatype(bucket, key, r=None, pr=None, basic_quorum=None,
notfound_ok=None, timeout=None, include_context=None)
Fetches the value of a Riak Datatype as raw data. This is used
internally to update already reified Datatype objects. Use the
public version to fetch a reified type.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the datatype, which must belong to a
:class:`~riak.BucketType`
:type bucket: RiakBucket
:param key: the key of the datatype
:type key: string, None
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param timeout: a timeout value in milliseconds
:type timeout: int
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool
:rtype: tuple of type, value and context
"""
_validate_timeout(timeout)
return transport.fetch_datatype(bucket, key, r=r, pr=pr,
basic_quorum=basic_quorum,
notfound_ok=notfound_ok,
timeout=timeout,
include_context=include_context) |
def get_version_records(self):
"""
Yield RASH version information stored in DB. Latest first.
:rtype: [VersionRecord]
"""
keys = ['id', 'rash_version', 'schema_version', 'updated']
sql = """
SELECT id, rash_version, schema_version, updated
FROM rash_info
ORDER BY id DESC
"""
with self.connection() as connection:
for row in connection.execute(sql):
yield VersionRecord(**dict(zip(keys, row))) | Yield RASH version information stored in DB. Latest first.
:rtype: [VersionRecord] | Below is the the instruction that describes the task:
### Input:
Yield RASH version information stored in DB. Latest first.
:rtype: [VersionRecord]
### Response:
def get_version_records(self):
"""
Yield RASH version information stored in DB. Latest first.
:rtype: [VersionRecord]
"""
keys = ['id', 'rash_version', 'schema_version', 'updated']
sql = """
SELECT id, rash_version, schema_version, updated
FROM rash_info
ORDER BY id DESC
"""
with self.connection() as connection:
for row in connection.execute(sql):
yield VersionRecord(**dict(zip(keys, row))) |
def emph(txt, rval=None):
"""Print, emphasized based on rval"""
if rval is None: # rval is not specified, use 'neutral'
info(txt)
elif rval == 0: # rval is 0, by convention, this is 'good'
good(txt)
else: # any other value, considered 'bad'
err(txt) | Print, emphasized based on rval | Below is the the instruction that describes the task:
### Input:
Print, emphasized based on rval
### Response:
def emph(txt, rval=None):
"""Print, emphasized based on rval"""
if rval is None: # rval is not specified, use 'neutral'
info(txt)
elif rval == 0: # rval is 0, by convention, this is 'good'
good(txt)
else: # any other value, considered 'bad'
err(txt) |
def _create_wcs (fitsheader):
"""For compatibility between astropy and pywcs."""
wcsmodule = _load_wcs_module ()
is_pywcs = hasattr (wcsmodule, 'UnitConverter')
wcs = wcsmodule.WCS (fitsheader)
wcs.wcs.set ()
wcs.wcs.fix () # I'm interested in MJD computation via datfix()
if hasattr (wcs, 'wcs_pix2sky'):
wcs.wcs_pix2world = wcs.wcs_pix2sky
wcs.wcs_world2pix = wcs.wcs_sky2pix
return wcs | For compatibility between astropy and pywcs. | Below is the the instruction that describes the task:
### Input:
For compatibility between astropy and pywcs.
### Response:
def _create_wcs (fitsheader):
"""For compatibility between astropy and pywcs."""
wcsmodule = _load_wcs_module ()
is_pywcs = hasattr (wcsmodule, 'UnitConverter')
wcs = wcsmodule.WCS (fitsheader)
wcs.wcs.set ()
wcs.wcs.fix () # I'm interested in MJD computation via datfix()
if hasattr (wcs, 'wcs_pix2sky'):
wcs.wcs_pix2world = wcs.wcs_pix2sky
wcs.wcs_world2pix = wcs.wcs_sky2pix
return wcs |
def _retrieve_html_page(self):
"""
Download the requested player's stats page.
Download the requested page and strip all of the comment tags before
returning a pyquery object which will be used to parse the data.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = PLAYER_URL % self._player_id
try:
url_data = pq(url)
except HTTPError:
return None
return pq(utils._remove_html_comment_tags(url_data)) | Download the requested player's stats page.
Download the requested page and strip all of the comment tags before
returning a pyquery object which will be used to parse the data.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed. | Below is the the instruction that describes the task:
### Input:
Download the requested player's stats page.
Download the requested page and strip all of the comment tags before
returning a pyquery object which will be used to parse the data.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
### Response:
def _retrieve_html_page(self):
"""
Download the requested player's stats page.
Download the requested page and strip all of the comment tags before
returning a pyquery object which will be used to parse the data.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = PLAYER_URL % self._player_id
try:
url_data = pq(url)
except HTTPError:
return None
return pq(utils._remove_html_comment_tags(url_data)) |
def _dict_to_fields(d, jsonify=True):
"""
Convert dict to tuple, for faster sqlite3 import
"""
x = []
for k in constants._keys:
v = d[k]
if jsonify and (k in ('attributes', 'extra')):
x.append(_jsonify(v))
else:
x.append(v)
return tuple(x) | Convert dict to tuple, for faster sqlite3 import | Below is the the instruction that describes the task:
### Input:
Convert dict to tuple, for faster sqlite3 import
### Response:
def _dict_to_fields(d, jsonify=True):
"""
Convert dict to tuple, for faster sqlite3 import
"""
x = []
for k in constants._keys:
v = d[k]
if jsonify and (k in ('attributes', 'extra')):
x.append(_jsonify(v))
else:
x.append(v)
return tuple(x) |
def _build_primitive_cell(self):
"""
primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T
"""
self._primitive = self._get_primitive_cell(
self._supercell, self._supercell_matrix, self._primitive_matrix) | primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T | Below is the the instruction that describes the task:
### Input:
primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T
### Response:
def _build_primitive_cell(self):
"""
primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T
"""
self._primitive = self._get_primitive_cell(
self._supercell, self._supercell_matrix, self._primitive_matrix) |
def write_packages(self, diagram):
"""write a package diagram"""
# sorted to get predictable (hence testable) results
for i, obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)):
self.printer.emit_node(i, label=self.get_title(obj), shape="box")
obj.fig_id = i
# package dependencies
for rel in diagram.get_relationships("depends"):
self.printer.emit_edge(
rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges
) | write a package diagram | Below is the the instruction that describes the task:
### Input:
write a package diagram
### Response:
def write_packages(self, diagram):
"""write a package diagram"""
# sorted to get predictable (hence testable) results
for i, obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)):
self.printer.emit_node(i, label=self.get_title(obj), shape="box")
obj.fig_id = i
# package dependencies
for rel in diagram.get_relationships("depends"):
self.printer.emit_edge(
rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges
) |
def create_model(text_in,
labels,
timesteps,
per_example_weights,
phase=pt.Phase.train):
"""Creates a model for running baby names."""
with pt.defaults_scope(phase=phase, l2loss=0.00001):
# The embedding lookup must be placed on a cpu.
with tf.device('/cpu:0'):
embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])
# We need to cleave the sequence because sequence lstm expect each
# timestep to be in its own Tensor.
lstm = (embedded.cleave_sequence(timesteps).sequence_lstm(CHARS))
# The classifier is much more efficient if it runs across the entire
# batch at once, so we want to squash (i.e. uncleave).
#
# Hidden nodes is set to 32 because it seems to work well.
return (lstm.squash_sequence().fully_connected(32,
activation_fn=tf.nn.relu)
.dropout(0.7)
.softmax_classifier(SEXES,
labels,
per_example_weights=per_example_weights)) | Creates a model for running baby names. | Below is the the instruction that describes the task:
### Input:
Creates a model for running baby names.
### Response:
def create_model(text_in,
labels,
timesteps,
per_example_weights,
phase=pt.Phase.train):
"""Creates a model for running baby names."""
with pt.defaults_scope(phase=phase, l2loss=0.00001):
# The embedding lookup must be placed on a cpu.
with tf.device('/cpu:0'):
embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])
# We need to cleave the sequence because sequence lstm expect each
# timestep to be in its own Tensor.
lstm = (embedded.cleave_sequence(timesteps).sequence_lstm(CHARS))
# The classifier is much more efficient if it runs across the entire
# batch at once, so we want to squash (i.e. uncleave).
#
# Hidden nodes is set to 32 because it seems to work well.
return (lstm.squash_sequence().fully_connected(32,
activation_fn=tf.nn.relu)
.dropout(0.7)
.softmax_classifier(SEXES,
labels,
per_example_weights=per_example_weights)) |
def write_error(*args, **kwargs):
"""Redirectable wrapper for print sys.stderr statements."""
if _logger:
kwargs.pop("end", None)
kwargs.pop("file", None)
_logger.error(*args, **kwargs)
else:
print(*args, file=sys.stderr, **kwargs) | Redirectable wrapper for print sys.stderr statements. | Below is the the instruction that describes the task:
### Input:
Redirectable wrapper for print sys.stderr statements.
### Response:
def write_error(*args, **kwargs):
"""Redirectable wrapper for print sys.stderr statements."""
if _logger:
kwargs.pop("end", None)
kwargs.pop("file", None)
_logger.error(*args, **kwargs)
else:
print(*args, file=sys.stderr, **kwargs) |
def _process_file(self):
'''Process rebase file into dict with name and cut site information.'''
print 'Processing file'
with open(self._rebase_file, 'r') as f:
raw = f.readlines()
names = [line.strip()[3:] for line in raw if line.startswith('<1>')]
seqs = [line.strip()[3:] for line in raw if line.startswith('<5>')]
if len(names) != len(seqs):
raise Exception('Found different number of enzyme names and '
'sequences.')
self._enzyme_dict = {}
for name, seq in zip(names, seqs):
if '?' in seq:
# Is unknown sequence, don't keep it
pass
elif seq.startswith('(') and seq.endswith(')'):
# Has four+ cut sites, don't keep it
pass
elif '^' in seq:
# Has reasonable internal cut sites, keep it
top_cut = seq.index('^')
bottom_cut = len(seq) - top_cut - 1
site = seq.replace('^', '')
self._enzyme_dict[name] = (site, (top_cut, bottom_cut))
elif seq.endswith(')'):
# Has reasonable external cut sites, keep it
# (4-cutter also starts with '(')
# separate site and cut locations
site, cuts = seq.split('(')
cuts = cuts.replace(')', '')
top_cut, bottom_cut = [int(x) + len(site) for x in
cuts.split('/')]
self._enzyme_dict[name] = (site, (top_cut, bottom_cut))
shutil.rmtree(self._tmpdir) | Process rebase file into dict with name and cut site information. | Below is the the instruction that describes the task:
### Input:
Process rebase file into dict with name and cut site information.
### Response:
def _process_file(self):
'''Process rebase file into dict with name and cut site information.'''
print 'Processing file'
with open(self._rebase_file, 'r') as f:
raw = f.readlines()
names = [line.strip()[3:] for line in raw if line.startswith('<1>')]
seqs = [line.strip()[3:] for line in raw if line.startswith('<5>')]
if len(names) != len(seqs):
raise Exception('Found different number of enzyme names and '
'sequences.')
self._enzyme_dict = {}
for name, seq in zip(names, seqs):
if '?' in seq:
# Is unknown sequence, don't keep it
pass
elif seq.startswith('(') and seq.endswith(')'):
# Has four+ cut sites, don't keep it
pass
elif '^' in seq:
# Has reasonable internal cut sites, keep it
top_cut = seq.index('^')
bottom_cut = len(seq) - top_cut - 1
site = seq.replace('^', '')
self._enzyme_dict[name] = (site, (top_cut, bottom_cut))
elif seq.endswith(')'):
# Has reasonable external cut sites, keep it
# (4-cutter also starts with '(')
# separate site and cut locations
site, cuts = seq.split('(')
cuts = cuts.replace(')', '')
top_cut, bottom_cut = [int(x) + len(site) for x in
cuts.split('/')]
self._enzyme_dict[name] = (site, (top_cut, bottom_cut))
shutil.rmtree(self._tmpdir) |
def login(request, template="accounts/account_login.html",
form_class=LoginForm, extra_context=None):
"""
Login form.
"""
form = form_class(request.POST or None)
if request.method == "POST" and form.is_valid():
authenticated_user = form.save()
info(request, _("Successfully logged in"))
auth_login(request, authenticated_user)
return login_redirect(request)
context = {"form": form, "title": _("Log in")}
context.update(extra_context or {})
return TemplateResponse(request, template, context) | Login form. | Below is the the instruction that describes the task:
### Input:
Login form.
### Response:
def login(request, template="accounts/account_login.html",
form_class=LoginForm, extra_context=None):
"""
Login form.
"""
form = form_class(request.POST or None)
if request.method == "POST" and form.is_valid():
authenticated_user = form.save()
info(request, _("Successfully logged in"))
auth_login(request, authenticated_user)
return login_redirect(request)
context = {"form": form, "title": _("Log in")}
context.update(extra_context or {})
return TemplateResponse(request, template, context) |
def load(path, variable='Datamat'):
"""
Load datamat at path.
Parameters:
path : string
Absolute path of the file to load from.
"""
f = h5py.File(path,'r')
try:
dm = fromhdf5(f[variable])
finally:
f.close()
return dm | Load datamat at path.
Parameters:
path : string
Absolute path of the file to load from. | Below is the the instruction that describes the task:
### Input:
Load datamat at path.
Parameters:
path : string
Absolute path of the file to load from.
### Response:
def load(path, variable='Datamat'):
"""
Load datamat at path.
Parameters:
path : string
Absolute path of the file to load from.
"""
f = h5py.File(path,'r')
try:
dm = fromhdf5(f[variable])
finally:
f.close()
return dm |
def update(self, **kwargs):
"""Update a specific alarm definition."""
url_str = self.base_url + '/%s' % kwargs['alarm_id']
del kwargs['alarm_id']
resp = self.client.create(url=url_str,
method='PUT',
json=kwargs)
return resp | Update a specific alarm definition. | Below is the the instruction that describes the task:
### Input:
Update a specific alarm definition.
### Response:
def update(self, **kwargs):
"""Update a specific alarm definition."""
url_str = self.base_url + '/%s' % kwargs['alarm_id']
del kwargs['alarm_id']
resp = self.client.create(url=url_str,
method='PUT',
json=kwargs)
return resp |
def lower_unary_transformations(ir_blocks):
"""Raise exception if any unary transformation block encountered."""
def visitor_fn(expression):
"""Raise error if current expression is a UnaryTransformation."""
if not isinstance(expression, expressions.UnaryTransformation):
return expression
raise NotImplementedError(
u'UnaryTransformation expression "{}" encountered with IR blocks {} is unsupported by '
u'the SQL backend.'.format(expression, ir_blocks)
)
new_ir_blocks = [
block.visit_and_update_expressions(visitor_fn)
for block in ir_blocks
]
return new_ir_blocks | Raise exception if any unary transformation block encountered. | Below is the the instruction that describes the task:
### Input:
Raise exception if any unary transformation block encountered.
### Response:
def lower_unary_transformations(ir_blocks):
"""Raise exception if any unary transformation block encountered."""
def visitor_fn(expression):
"""Raise error if current expression is a UnaryTransformation."""
if not isinstance(expression, expressions.UnaryTransformation):
return expression
raise NotImplementedError(
u'UnaryTransformation expression "{}" encountered with IR blocks {} is unsupported by '
u'the SQL backend.'.format(expression, ir_blocks)
)
new_ir_blocks = [
block.visit_and_update_expressions(visitor_fn)
for block in ir_blocks
]
return new_ir_blocks |
def is_empty_shape(sh: ShExJ.Shape) -> bool:
""" Determine whether sh has any value """
return sh.closed is None and sh.expression is None and sh.extra is None and \
sh.semActs is None | Determine whether sh has any value | Below is the the instruction that describes the task:
### Input:
Determine whether sh has any value
### Response:
def is_empty_shape(sh: ShExJ.Shape) -> bool:
""" Determine whether sh has any value """
return sh.closed is None and sh.expression is None and sh.extra is None and \
sh.semActs is None |
def _get_ami_dict(json_url):
"""Get ami from a web url.
Args:
region (str): AWS Region to find AMI ID.
Returns:
dict: Contents in dictionary format.
"""
LOG.info("Getting AMI from %s", json_url)
response = requests.get(json_url)
assert response.ok, "Error getting ami info from {}".format(json_url)
ami_dict = response.json()
LOG.debug('AMI json contents: %s', ami_dict)
return ami_dict | Get ami from a web url.
Args:
region (str): AWS Region to find AMI ID.
Returns:
dict: Contents in dictionary format. | Below is the the instruction that describes the task:
### Input:
Get ami from a web url.
Args:
region (str): AWS Region to find AMI ID.
Returns:
dict: Contents in dictionary format.
### Response:
def _get_ami_dict(json_url):
"""Get ami from a web url.
Args:
region (str): AWS Region to find AMI ID.
Returns:
dict: Contents in dictionary format.
"""
LOG.info("Getting AMI from %s", json_url)
response = requests.get(json_url)
assert response.ok, "Error getting ami info from {}".format(json_url)
ami_dict = response.json()
LOG.debug('AMI json contents: %s', ami_dict)
return ami_dict |
def _load_image_set_index(self, shuffle):
"""
get total number of images, init indices
Parameters
----------
shuffle : bool
whether to shuffle the initial indices
"""
self.num_images = 0
for db in self.imdbs:
self.num_images += db.num_images
indices = list(range(self.num_images))
if shuffle:
random.shuffle(indices)
return indices | get total number of images, init indices
Parameters
----------
shuffle : bool
whether to shuffle the initial indices | Below is the the instruction that describes the task:
### Input:
get total number of images, init indices
Parameters
----------
shuffle : bool
whether to shuffle the initial indices
### Response:
def _load_image_set_index(self, shuffle):
"""
get total number of images, init indices
Parameters
----------
shuffle : bool
whether to shuffle the initial indices
"""
self.num_images = 0
for db in self.imdbs:
self.num_images += db.num_images
indices = list(range(self.num_images))
if shuffle:
random.shuffle(indices)
return indices |
def create_items(portal_type=None, uid=None, endpoint=None, **kw):
""" create items
1. If the uid is given, get the object and create the content in there
(assumed that it is folderish)
2. If the uid is 0, the target folder is assumed the portal.
3. If there is no uid given, the payload is checked for either a key
- `parent_uid` specifies the *uid* of the target folder
- `parent_path` specifies the *physical path* of the target folder
"""
# disable CSRF
req.disable_csrf_protection()
# destination where to create the content
container = uid and get_object_by_uid(uid) or None
# extract the data from the request
records = req.get_request_data()
results = []
for record in records:
# get the portal_type
if portal_type is None:
# try to fetch the portal type out of the request data
portal_type = record.pop("portal_type", None)
# check if it is allowed to create the portal_type
if not is_creation_allowed(portal_type):
fail(401, "Creation of '{}' is not allowed".format(portal_type))
if container is None:
# find the container for content creation
container = find_target_container(portal_type, record)
# Check if we have a container and a portal_type
if not all([container, portal_type]):
fail(400, "Please provide a container path/uid and portal_type")
# create the object and pass in the record data
obj = create_object(container, portal_type, **record)
results.append(obj)
if not results:
fail(400, "No Objects could be created")
return make_items_for(results, endpoint=endpoint) | create items
1. If the uid is given, get the object and create the content in there
(assumed that it is folderish)
2. If the uid is 0, the target folder is assumed the portal.
3. If there is no uid given, the payload is checked for either a key
- `parent_uid` specifies the *uid* of the target folder
- `parent_path` specifies the *physical path* of the target folder | Below is the the instruction that describes the task:
### Input:
create items
1. If the uid is given, get the object and create the content in there
(assumed that it is folderish)
2. If the uid is 0, the target folder is assumed the portal.
3. If there is no uid given, the payload is checked for either a key
- `parent_uid` specifies the *uid* of the target folder
- `parent_path` specifies the *physical path* of the target folder
### Response:
def create_items(portal_type=None, uid=None, endpoint=None, **kw):
""" create items
1. If the uid is given, get the object and create the content in there
(assumed that it is folderish)
2. If the uid is 0, the target folder is assumed the portal.
3. If there is no uid given, the payload is checked for either a key
- `parent_uid` specifies the *uid* of the target folder
- `parent_path` specifies the *physical path* of the target folder
"""
# disable CSRF
req.disable_csrf_protection()
# destination where to create the content
container = uid and get_object_by_uid(uid) or None
# extract the data from the request
records = req.get_request_data()
results = []
for record in records:
# get the portal_type
if portal_type is None:
# try to fetch the portal type out of the request data
portal_type = record.pop("portal_type", None)
# check if it is allowed to create the portal_type
if not is_creation_allowed(portal_type):
fail(401, "Creation of '{}' is not allowed".format(portal_type))
if container is None:
# find the container for content creation
container = find_target_container(portal_type, record)
# Check if we have a container and a portal_type
if not all([container, portal_type]):
fail(400, "Please provide a container path/uid and portal_type")
# create the object and pass in the record data
obj = create_object(container, portal_type, **record)
results.append(obj)
if not results:
fail(400, "No Objects could be created")
return make_items_for(results, endpoint=endpoint) |
def get_last_updated(self, node_id=None):
"""
Returns the time a particular node has been last refreshed.
:param string node_id: optional, the connection id of the node to retrieve
:rtype: int
:returns: Returns a unix timestamp if it exists, otherwise None
"""
if not node_id:
node_id = self.conn.id
dt = self.conn.client.hget(self.nodelist_key, node_id)
return int(dt) if dt else None | Returns the time a particular node has been last refreshed.
:param string node_id: optional, the connection id of the node to retrieve
:rtype: int
:returns: Returns a unix timestamp if it exists, otherwise None | Below is the the instruction that describes the task:
### Input:
Returns the time a particular node has been last refreshed.
:param string node_id: optional, the connection id of the node to retrieve
:rtype: int
:returns: Returns a unix timestamp if it exists, otherwise None
### Response:
def get_last_updated(self, node_id=None):
"""
Returns the time a particular node has been last refreshed.
:param string node_id: optional, the connection id of the node to retrieve
:rtype: int
:returns: Returns a unix timestamp if it exists, otherwise None
"""
if not node_id:
node_id = self.conn.id
dt = self.conn.client.hget(self.nodelist_key, node_id)
return int(dt) if dt else None |
def json_response_schema( expected_object_schema ):
"""
Make a schema for a "standard" server response.
Standard server responses have 'status': True
and possibly 'indexing': True set.
"""
schema = {
'type': 'object',
'properties': {
'status': {
'type': 'boolean',
},
'indexing': {
'type': 'boolean',
},
'lastblock': {
'anyOf': [
{
'type': 'integer',
'minimum': 0,
},
{
'type': 'null',
},
],
},
},
'required': [
'status',
'indexing',
'lastblock'
],
}
# fold in the given object schema
schema['properties'].update( expected_object_schema['properties'] )
schema['required'] = list(set( schema['required'] + expected_object_schema['required'] ))
return schema | Make a schema for a "standard" server response.
Standard server responses have 'status': True
and possibly 'indexing': True set. | Below is the the instruction that describes the task:
### Input:
Make a schema for a "standard" server response.
Standard server responses have 'status': True
and possibly 'indexing': True set.
### Response:
def json_response_schema( expected_object_schema ):
"""
Make a schema for a "standard" server response.
Standard server responses have 'status': True
and possibly 'indexing': True set.
"""
schema = {
'type': 'object',
'properties': {
'status': {
'type': 'boolean',
},
'indexing': {
'type': 'boolean',
},
'lastblock': {
'anyOf': [
{
'type': 'integer',
'minimum': 0,
},
{
'type': 'null',
},
],
},
},
'required': [
'status',
'indexing',
'lastblock'
],
}
# fold in the given object schema
schema['properties'].update( expected_object_schema['properties'] )
schema['required'] = list(set( schema['required'] + expected_object_schema['required'] ))
return schema |
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset') | Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. | Below is the the instruction that describes the task:
### Input:
Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected.
### Response:
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset') |
def multi_load_data_custom(Channel, TraceTitle, RunNos, directoryPath='.', calcPSD=True, NPerSegmentPSD=1000000):
"""
Lets you load multiple datasets named with the LeCroy's custom naming scheme at once.
Parameters
----------
Channel : int
The channel you want to load
TraceTitle : string
The custom trace title of the files.
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data : list
A list containing the DataObjects that were loaded.
"""
# files = glob('{}/*'.format(directoryPath))
# files_CorrectChannel = []
# for file_ in files:
# if 'C{}'.format(Channel) in file_:
# files_CorrectChannel.append(file_)
# files_CorrectRunNo = []
# for RunNo in RunNos:
# files_match = _fnmatch.filter(
# files_CorrectChannel, '*C{}'.format(Channel)+TraceTitle+str(RunNo).zfill(5)+'.*')
# for file_ in files_match:
# files_CorrectRunNo.append(file_)
matching_files = search_data_custom(Channel, TraceTitle, RunNos, directoryPath)
cpu_count = _cpu_count()
workerPool = _Pool(cpu_count)
# for filepath in files_CorrectRepeatNo:
# print(filepath)
# data.append(load_data(filepath))
load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)
data = workerPool.map(load_data_partial, matching_files)
workerPool.close()
workerPool.terminate()
workerPool.join()
return data | Lets you load multiple datasets named with the LeCroy's custom naming scheme at once.
Parameters
----------
Channel : int
The channel you want to load
TraceTitle : string
The custom trace title of the files.
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data : list
A list containing the DataObjects that were loaded. | Below is the the instruction that describes the task:
### Input:
Lets you load multiple datasets named with the LeCroy's custom naming scheme at once.
Parameters
----------
Channel : int
The channel you want to load
TraceTitle : string
The custom trace title of the files.
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data : list
A list containing the DataObjects that were loaded.
### Response:
def multi_load_data_custom(Channel, TraceTitle, RunNos, directoryPath='.', calcPSD=True, NPerSegmentPSD=1000000):
"""
Lets you load multiple datasets named with the LeCroy's custom naming scheme at once.
Parameters
----------
Channel : int
The channel you want to load
TraceTitle : string
The custom trace title of the files.
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string, optional
The path to the directory housing the data
The default is the current directory
Returns
-------
Data : list
A list containing the DataObjects that were loaded.
"""
# files = glob('{}/*'.format(directoryPath))
# files_CorrectChannel = []
# for file_ in files:
# if 'C{}'.format(Channel) in file_:
# files_CorrectChannel.append(file_)
# files_CorrectRunNo = []
# for RunNo in RunNos:
# files_match = _fnmatch.filter(
# files_CorrectChannel, '*C{}'.format(Channel)+TraceTitle+str(RunNo).zfill(5)+'.*')
# for file_ in files_match:
# files_CorrectRunNo.append(file_)
matching_files = search_data_custom(Channel, TraceTitle, RunNos, directoryPath)
cpu_count = _cpu_count()
workerPool = _Pool(cpu_count)
# for filepath in files_CorrectRepeatNo:
# print(filepath)
# data.append(load_data(filepath))
load_data_partial = _partial(load_data, calcPSD=calcPSD, NPerSegmentPSD=NPerSegmentPSD)
data = workerPool.map(load_data_partial, matching_files)
workerPool.close()
workerPool.terminate()
workerPool.join()
return data |
def paintEvent(self, event):
"""QWidget.paintEvent() implementation
"""
painter = QPainter(self)
painter.fillRect(event.rect(), self.palette().color(QPalette.Window))
painter.setPen(Qt.black)
block = self._qpart.firstVisibleBlock()
blockNumber = block.blockNumber()
top = int(self._qpart.blockBoundingGeometry(block).translated(self._qpart.contentOffset()).top())
bottom = top + int(self._qpart.blockBoundingRect(block).height())
singleBlockHeight = self._qpart.cursorRect().height()
boundingRect = self._qpart.blockBoundingRect(block)
availableWidth = self.__width - self._RIGHT_MARGIN - self._LEFT_MARGIN
availableHeight = self._qpart.fontMetrics().height()
while block.isValid() and top <= event.rect().bottom():
if block.isVisible() and bottom >= event.rect().top():
number = str(blockNumber + 1)
painter.drawText(self._LEFT_MARGIN, top,
availableWidth, availableHeight,
Qt.AlignRight, number)
if boundingRect.height() >= singleBlockHeight * 2: # wrapped block
painter.fillRect(1, top + singleBlockHeight,
self.__width - 2, boundingRect.height() - singleBlockHeight - 2,
Qt.darkGreen)
block = block.next()
boundingRect = self._qpart.blockBoundingRect(block)
top = bottom
bottom = top + int(boundingRect.height())
blockNumber += 1 | QWidget.paintEvent() implementation | Below is the the instruction that describes the task:
### Input:
QWidget.paintEvent() implementation
### Response:
def paintEvent(self, event):
"""QWidget.paintEvent() implementation
"""
painter = QPainter(self)
painter.fillRect(event.rect(), self.palette().color(QPalette.Window))
painter.setPen(Qt.black)
block = self._qpart.firstVisibleBlock()
blockNumber = block.blockNumber()
top = int(self._qpart.blockBoundingGeometry(block).translated(self._qpart.contentOffset()).top())
bottom = top + int(self._qpart.blockBoundingRect(block).height())
singleBlockHeight = self._qpart.cursorRect().height()
boundingRect = self._qpart.blockBoundingRect(block)
availableWidth = self.__width - self._RIGHT_MARGIN - self._LEFT_MARGIN
availableHeight = self._qpart.fontMetrics().height()
while block.isValid() and top <= event.rect().bottom():
if block.isVisible() and bottom >= event.rect().top():
number = str(blockNumber + 1)
painter.drawText(self._LEFT_MARGIN, top,
availableWidth, availableHeight,
Qt.AlignRight, number)
if boundingRect.height() >= singleBlockHeight * 2: # wrapped block
painter.fillRect(1, top + singleBlockHeight,
self.__width - 2, boundingRect.height() - singleBlockHeight - 2,
Qt.darkGreen)
block = block.next()
boundingRect = self._qpart.blockBoundingRect(block)
top = bottom
bottom = top + int(boundingRect.height())
blockNumber += 1 |
def labels(self, hs_dims=None, prune=False):
"""Get labels for the cube slice, and perform pruning by slice."""
if self.ca_as_0th:
labels = self._cube.labels(include_transforms_for_dims=hs_dims)[1:]
else:
labels = self._cube.labels(include_transforms_for_dims=hs_dims)[-2:]
if not prune:
return labels
def prune_dimension_labels(labels, prune_indices):
"""Get pruned labels for single dimension, besed on prune inds."""
labels = [label for label, prune in zip(labels, prune_indices) if not prune]
return labels
labels = [
prune_dimension_labels(dim_labels, dim_prune_inds)
for dim_labels, dim_prune_inds in zip(labels, self._prune_indices(hs_dims))
]
return labels | Get labels for the cube slice, and perform pruning by slice. | Below is the the instruction that describes the task:
### Input:
Get labels for the cube slice, and perform pruning by slice.
### Response:
def labels(self, hs_dims=None, prune=False):
"""Get labels for the cube slice, and perform pruning by slice."""
if self.ca_as_0th:
labels = self._cube.labels(include_transforms_for_dims=hs_dims)[1:]
else:
labels = self._cube.labels(include_transforms_for_dims=hs_dims)[-2:]
if not prune:
return labels
def prune_dimension_labels(labels, prune_indices):
"""Get pruned labels for single dimension, besed on prune inds."""
labels = [label for label, prune in zip(labels, prune_indices) if not prune]
return labels
labels = [
prune_dimension_labels(dim_labels, dim_prune_inds)
for dim_labels, dim_prune_inds in zip(labels, self._prune_indices(hs_dims))
]
return labels |
def is_current(self):
"""``True`` if current request has same endpoint with the item.
The property should be used in a bound request context, or the
:class:`RuntimeError` may be raised.
"""
if not self.is_internal:
return False # always false for external url
has_same_endpoint = (request.endpoint == self.endpoint)
has_same_args = (request.view_args == self.args)
return has_same_endpoint and has_same_args | ``True`` if current request has same endpoint with the item.
The property should be used in a bound request context, or the
:class:`RuntimeError` may be raised. | Below is the the instruction that describes the task:
### Input:
``True`` if current request has same endpoint with the item.
The property should be used in a bound request context, or the
:class:`RuntimeError` may be raised.
### Response:
def is_current(self):
"""``True`` if current request has same endpoint with the item.
The property should be used in a bound request context, or the
:class:`RuntimeError` may be raised.
"""
if not self.is_internal:
return False # always false for external url
has_same_endpoint = (request.endpoint == self.endpoint)
has_same_args = (request.view_args == self.args)
return has_same_endpoint and has_same_args |
def create_tenant(self, tenant_id, retentions=None):
"""
Create a tenant. Currently nothing can be set (to be fixed after the master
version of Hawkular-Metrics has fixed implementation.
:param retentions: A set of retention settings, see Hawkular-Metrics documentation for more info
"""
item = { 'id': tenant_id }
if retentions is not None:
item['retentions'] = retentions
self._post(self._get_tenants_url(), json.dumps(item, indent=2)) | Create a tenant. Currently nothing can be set (to be fixed after the master
version of Hawkular-Metrics has fixed implementation.
:param retentions: A set of retention settings, see Hawkular-Metrics documentation for more info | Below is the the instruction that describes the task:
### Input:
Create a tenant. Currently nothing can be set (to be fixed after the master
version of Hawkular-Metrics has fixed implementation.
:param retentions: A set of retention settings, see Hawkular-Metrics documentation for more info
### Response:
def create_tenant(self, tenant_id, retentions=None):
"""
Create a tenant. Currently nothing can be set (to be fixed after the master
version of Hawkular-Metrics has fixed implementation.
:param retentions: A set of retention settings, see Hawkular-Metrics documentation for more info
"""
item = { 'id': tenant_id }
if retentions is not None:
item['retentions'] = retentions
self._post(self._get_tenants_url(), json.dumps(item, indent=2)) |
def _split_line(s, parts):
"""
Parameters
----------
s: string
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start:start + length].strip()
start += length
del out['_']
return out | Parameters
----------
s: string
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
s: string
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
### Response:
def _split_line(s, parts):
"""
Parameters
----------
s: string
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start:start + length].strip()
start += length
del out['_']
return out |
def build_undercloud_on_libvirt(self, image_path,
rhsm=None, repositories=[]):
"""Build the Undercloud by using instack-virt-setup script."""
self.run('sysctl net.ipv4.ip_forward=1')
self.fetch_image(path=image_path, dest='/home/stack/guest_image.qcow2',
user='stack')
# NOTE(Gonéri): this is a hack for our OpenStack, the MTU of its outgoing route
# is 1400 and libvirt do not provide a mechanism to adjust the guests MTU.
self.run("LIBGUESTFS_BACKEND=direct virt-customize -a /home/stack/guest_image.qcow2 --run-command 'echo MTU=\"1400\" >> /etc/sysconfig/network-scripts/ifcfg-eth0'")
env = Environment()
env.loader = FileSystemLoader(pkg_data_filename('template'))
template = env.get_template('virt-setup-env.j2')
self.run('mkdir -p /home/stack/DIB', user='stack')
self.run('cp -v /etc/yum.repos.d/*.repo /home/stack/DIB', user='stack')
# NOTE(Gonéri): Hack to be sure DIB won't complain because of missing gpg files
# self.run('sed -i "s,gpgcheck=1,gpgcheck=0," /home/stack/DIB/*.repo', user='stack')
dib_yum_repo_conf = self.run('find /home/stack/DIB -type f', user='stack')[0].split()
virt_setup_template = {
'dib_yum_repo_conf': dib_yum_repo_conf,
'node': {
'count': 2,
'mem': 6144,
'cpu': 2
},
'undercloud_node_mem': 8192,
'guest_image_name': '/home/stack/guest_image.qcow2'
}
if rhsm is not None:
virt_setup_template['rhsm'] = {
'login': rhsm.get('login'),
'password': rhsm.get('password', os.environ.get('RHN_PW')),
'pool_id': rhsm.get('pool_id', ''),
'repositories': [i['name'] for i in repositories if i['type'] == 'rhsm_channel']
}
virt_setup_env = template.render(virt_setup_template)
self.create_file('virt-setup-env', virt_setup_env, user='stack')
self.run('virsh destroy instack', ignore_error=True)
self.run('virsh undefine instack --remove-all-storage', ignore_error=True)
self.run('source virt-setup-env; instack-virt-setup', user='stack')
undercloud_ip = self.run(
'/sbin/ip n | grep $(tripleo get-vm-mac instack) | awk \'{print $1;}\'',
user='stack')[0]
assert undercloud_ip, 'undercloud should have an IP'
undercloud = Undercloud(hostname=undercloud_ip,
via_ip=self.hostname,
user='root',
key_filename=self._key_filename)
return undercloud | Build the Undercloud by using instack-virt-setup script. | Below is the the instruction that describes the task:
### Input:
Build the Undercloud by using instack-virt-setup script.
### Response:
def build_undercloud_on_libvirt(self, image_path,
rhsm=None, repositories=[]):
"""Build the Undercloud by using instack-virt-setup script."""
self.run('sysctl net.ipv4.ip_forward=1')
self.fetch_image(path=image_path, dest='/home/stack/guest_image.qcow2',
user='stack')
# NOTE(Gonéri): this is a hack for our OpenStack, the MTU of its outgoing route
# is 1400 and libvirt do not provide a mechanism to adjust the guests MTU.
self.run("LIBGUESTFS_BACKEND=direct virt-customize -a /home/stack/guest_image.qcow2 --run-command 'echo MTU=\"1400\" >> /etc/sysconfig/network-scripts/ifcfg-eth0'")
env = Environment()
env.loader = FileSystemLoader(pkg_data_filename('template'))
template = env.get_template('virt-setup-env.j2')
self.run('mkdir -p /home/stack/DIB', user='stack')
self.run('cp -v /etc/yum.repos.d/*.repo /home/stack/DIB', user='stack')
# NOTE(Gonéri): Hack to be sure DIB won't complain because of missing gpg files
# self.run('sed -i "s,gpgcheck=1,gpgcheck=0," /home/stack/DIB/*.repo', user='stack')
dib_yum_repo_conf = self.run('find /home/stack/DIB -type f', user='stack')[0].split()
virt_setup_template = {
'dib_yum_repo_conf': dib_yum_repo_conf,
'node': {
'count': 2,
'mem': 6144,
'cpu': 2
},
'undercloud_node_mem': 8192,
'guest_image_name': '/home/stack/guest_image.qcow2'
}
if rhsm is not None:
virt_setup_template['rhsm'] = {
'login': rhsm.get('login'),
'password': rhsm.get('password', os.environ.get('RHN_PW')),
'pool_id': rhsm.get('pool_id', ''),
'repositories': [i['name'] for i in repositories if i['type'] == 'rhsm_channel']
}
virt_setup_env = template.render(virt_setup_template)
self.create_file('virt-setup-env', virt_setup_env, user='stack')
self.run('virsh destroy instack', ignore_error=True)
self.run('virsh undefine instack --remove-all-storage', ignore_error=True)
self.run('source virt-setup-env; instack-virt-setup', user='stack')
undercloud_ip = self.run(
'/sbin/ip n | grep $(tripleo get-vm-mac instack) | awk \'{print $1;}\'',
user='stack')[0]
assert undercloud_ip, 'undercloud should have an IP'
undercloud = Undercloud(hostname=undercloud_ip,
via_ip=self.hostname,
user='root',
key_filename=self._key_filename)
return undercloud |
def Rackett_mixture(T, xs, MWs, Tcs, Pcs, Zrs):
r'''Calculate mixture liquid density using the Rackett-derived mixing rule
as shown in [2]_.
.. math::
V_m = \sum_i\frac{x_i T_{ci}}{MW_i P_{ci}} Z_{R,m}^{(1 + (1 - T_r)^{2/7})} R \sum_i x_i MW_i
Parameters
----------
T : float
Temperature of liquid [K]
xs: list
Mole fractions of each component, []
MWs : list
Molecular weights of each component [g/mol]
Tcs : list
Critical temperatures of each component [K]
Pcs : list
Critical pressures of each component [Pa]
Zrs : list
Rackett parameters of each component []
Returns
-------
Vm : float
Mixture liquid volume [m^3/mol]
Notes
-----
Model for pure compounds in [1]_ forms the basis for this model, shown in
[2]_. Molecular weights are used as weighing by such has been found to
provide higher accuracy in [2]_. The model can also be used without
molecular weights, but results are somewhat different.
As with the Rackett model, critical compressibilities may be used if
Rackett parameters have not been regressed.
Critical mixture temperature, and compressibility are all obtained with
simple mixing rules.
Examples
--------
Calculation in [2]_ for methanol and water mixture. Result matches example.
>>> Rackett_mixture(T=298., xs=[0.4576, 0.5424], MWs=[32.04, 18.01], Tcs=[512.58, 647.29], Pcs=[8.096E6, 2.209E7], Zrs=[0.2332, 0.2374])
2.625288603174508e-05
References
----------
.. [1] Rackett, Harold G. "Equation of State for Saturated Liquids."
Journal of Chemical & Engineering Data 15, no. 4 (1970): 514-517.
doi:10.1021/je60047a012
.. [2] Danner, Ronald P, and Design Institute for Physical Property Data.
Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.
'''
if not none_and_length_check([xs, MWs, Tcs, Pcs, Zrs]):
raise Exception('Function inputs are incorrect format')
Tc = mixing_simple(xs, Tcs)
Zr = mixing_simple(xs, Zrs)
MW = mixing_simple(xs, MWs)
Tr = T/Tc
bigsum = sum(xs[i]*Tcs[i]/Pcs[i]/MWs[i] for i in range(len(xs)))
return (R*bigsum*Zr**(1. + (1. - Tr)**(2/7.)))*MW | r'''Calculate mixture liquid density using the Rackett-derived mixing rule
as shown in [2]_.
.. math::
V_m = \sum_i\frac{x_i T_{ci}}{MW_i P_{ci}} Z_{R,m}^{(1 + (1 - T_r)^{2/7})} R \sum_i x_i MW_i
Parameters
----------
T : float
Temperature of liquid [K]
xs: list
Mole fractions of each component, []
MWs : list
Molecular weights of each component [g/mol]
Tcs : list
Critical temperatures of each component [K]
Pcs : list
Critical pressures of each component [Pa]
Zrs : list
Rackett parameters of each component []
Returns
-------
Vm : float
Mixture liquid volume [m^3/mol]
Notes
-----
Model for pure compounds in [1]_ forms the basis for this model, shown in
[2]_. Molecular weights are used as weighing by such has been found to
provide higher accuracy in [2]_. The model can also be used without
molecular weights, but results are somewhat different.
As with the Rackett model, critical compressibilities may be used if
Rackett parameters have not been regressed.
Critical mixture temperature, and compressibility are all obtained with
simple mixing rules.
Examples
--------
Calculation in [2]_ for methanol and water mixture. Result matches example.
>>> Rackett_mixture(T=298., xs=[0.4576, 0.5424], MWs=[32.04, 18.01], Tcs=[512.58, 647.29], Pcs=[8.096E6, 2.209E7], Zrs=[0.2332, 0.2374])
2.625288603174508e-05
References
----------
.. [1] Rackett, Harold G. "Equation of State for Saturated Liquids."
Journal of Chemical & Engineering Data 15, no. 4 (1970): 514-517.
doi:10.1021/je60047a012
.. [2] Danner, Ronald P, and Design Institute for Physical Property Data.
Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982. | Below is the the instruction that describes the task:
### Input:
r'''Calculate mixture liquid density using the Rackett-derived mixing rule
as shown in [2]_.
.. math::
V_m = \sum_i\frac{x_i T_{ci}}{MW_i P_{ci}} Z_{R,m}^{(1 + (1 - T_r)^{2/7})} R \sum_i x_i MW_i
Parameters
----------
T : float
Temperature of liquid [K]
xs: list
Mole fractions of each component, []
MWs : list
Molecular weights of each component [g/mol]
Tcs : list
Critical temperatures of each component [K]
Pcs : list
Critical pressures of each component [Pa]
Zrs : list
Rackett parameters of each component []
Returns
-------
Vm : float
Mixture liquid volume [m^3/mol]
Notes
-----
Model for pure compounds in [1]_ forms the basis for this model, shown in
[2]_. Molecular weights are used as weighing by such has been found to
provide higher accuracy in [2]_. The model can also be used without
molecular weights, but results are somewhat different.
As with the Rackett model, critical compressibilities may be used if
Rackett parameters have not been regressed.
Critical mixture temperature, and compressibility are all obtained with
simple mixing rules.
Examples
--------
Calculation in [2]_ for methanol and water mixture. Result matches example.
>>> Rackett_mixture(T=298., xs=[0.4576, 0.5424], MWs=[32.04, 18.01], Tcs=[512.58, 647.29], Pcs=[8.096E6, 2.209E7], Zrs=[0.2332, 0.2374])
2.625288603174508e-05
References
----------
.. [1] Rackett, Harold G. "Equation of State for Saturated Liquids."
Journal of Chemical & Engineering Data 15, no. 4 (1970): 514-517.
doi:10.1021/je60047a012
.. [2] Danner, Ronald P, and Design Institute for Physical Property Data.
Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.
### Response:
def Rackett_mixture(T, xs, MWs, Tcs, Pcs, Zrs):
r'''Calculate mixture liquid density using the Rackett-derived mixing rule
as shown in [2]_.
.. math::
V_m = \sum_i\frac{x_i T_{ci}}{MW_i P_{ci}} Z_{R,m}^{(1 + (1 - T_r)^{2/7})} R \sum_i x_i MW_i
Parameters
----------
T : float
Temperature of liquid [K]
xs: list
Mole fractions of each component, []
MWs : list
Molecular weights of each component [g/mol]
Tcs : list
Critical temperatures of each component [K]
Pcs : list
Critical pressures of each component [Pa]
Zrs : list
Rackett parameters of each component []
Returns
-------
Vm : float
Mixture liquid volume [m^3/mol]
Notes
-----
Model for pure compounds in [1]_ forms the basis for this model, shown in
[2]_. Molecular weights are used as weighing by such has been found to
provide higher accuracy in [2]_. The model can also be used without
molecular weights, but results are somewhat different.
As with the Rackett model, critical compressibilities may be used if
Rackett parameters have not been regressed.
Critical mixture temperature, and compressibility are all obtained with
simple mixing rules.
Examples
--------
Calculation in [2]_ for methanol and water mixture. Result matches example.
>>> Rackett_mixture(T=298., xs=[0.4576, 0.5424], MWs=[32.04, 18.01], Tcs=[512.58, 647.29], Pcs=[8.096E6, 2.209E7], Zrs=[0.2332, 0.2374])
2.625288603174508e-05
References
----------
.. [1] Rackett, Harold G. "Equation of State for Saturated Liquids."
Journal of Chemical & Engineering Data 15, no. 4 (1970): 514-517.
doi:10.1021/je60047a012
.. [2] Danner, Ronald P, and Design Institute for Physical Property Data.
Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.
'''
if not none_and_length_check([xs, MWs, Tcs, Pcs, Zrs]):
raise Exception('Function inputs are incorrect format')
Tc = mixing_simple(xs, Tcs)
Zr = mixing_simple(xs, Zrs)
MW = mixing_simple(xs, MWs)
Tr = T/Tc
bigsum = sum(xs[i]*Tcs[i]/Pcs[i]/MWs[i] for i in range(len(xs)))
return (R*bigsum*Zr**(1. + (1. - Tr)**(2/7.)))*MW |
def generate_query_key(self, serializer):
"""Get the key that can be passed to Django's filter method.
To account for serialier field name rewrites, this method
translates serializer field names to model field names
by inspecting `serializer`.
For example, a query like `filter{users.events}` would be
returned as `users__events`.
Arguments:
serializer: A DRF serializer
Returns:
A filter key.
"""
rewritten = []
last = len(self.field) - 1
s = serializer
field = None
for i, field_name in enumerate(self.field):
# Note: .fields can be empty for related serializers that aren't
# sideloaded. Fields that are deferred also won't be present.
# If field name isn't in serializer.fields, get full list from
# get_all_fields() method. This is somewhat expensive, so only do
# this if we have to.
fields = s.fields
if field_name not in fields:
fields = getattr(s, 'get_all_fields', lambda: {})()
if field_name == 'pk':
rewritten.append('pk')
continue
if field_name not in fields:
raise ValidationError(
"Invalid filter field: %s" % field_name
)
field = fields[field_name]
# For remote fields, strip off '_set' for filtering. This is a
# weird Django inconsistency.
model_field_name = field.source or field_name
model_field = get_model_field(s.get_model(), model_field_name)
if isinstance(model_field, RelatedObject):
model_field_name = model_field.field.related_query_name()
# If get_all_fields() was used above, field could be unbound,
# and field.source would be None
rewritten.append(model_field_name)
if i == last:
break
# Recurse into nested field
s = getattr(field, 'serializer', None)
if isinstance(s, serializers.ListSerializer):
s = s.child
if not s:
raise ValidationError(
"Invalid nested filter field: %s" % field_name
)
if self.operator:
rewritten.append(self.operator)
return ('__'.join(rewritten), field) | Get the key that can be passed to Django's filter method.
To account for serialier field name rewrites, this method
translates serializer field names to model field names
by inspecting `serializer`.
For example, a query like `filter{users.events}` would be
returned as `users__events`.
Arguments:
serializer: A DRF serializer
Returns:
A filter key. | Below is the the instruction that describes the task:
### Input:
Get the key that can be passed to Django's filter method.
To account for serialier field name rewrites, this method
translates serializer field names to model field names
by inspecting `serializer`.
For example, a query like `filter{users.events}` would be
returned as `users__events`.
Arguments:
serializer: A DRF serializer
Returns:
A filter key.
### Response:
def generate_query_key(self, serializer):
"""Get the key that can be passed to Django's filter method.
To account for serialier field name rewrites, this method
translates serializer field names to model field names
by inspecting `serializer`.
For example, a query like `filter{users.events}` would be
returned as `users__events`.
Arguments:
serializer: A DRF serializer
Returns:
A filter key.
"""
rewritten = []
last = len(self.field) - 1
s = serializer
field = None
for i, field_name in enumerate(self.field):
# Note: .fields can be empty for related serializers that aren't
# sideloaded. Fields that are deferred also won't be present.
# If field name isn't in serializer.fields, get full list from
# get_all_fields() method. This is somewhat expensive, so only do
# this if we have to.
fields = s.fields
if field_name not in fields:
fields = getattr(s, 'get_all_fields', lambda: {})()
if field_name == 'pk':
rewritten.append('pk')
continue
if field_name not in fields:
raise ValidationError(
"Invalid filter field: %s" % field_name
)
field = fields[field_name]
# For remote fields, strip off '_set' for filtering. This is a
# weird Django inconsistency.
model_field_name = field.source or field_name
model_field = get_model_field(s.get_model(), model_field_name)
if isinstance(model_field, RelatedObject):
model_field_name = model_field.field.related_query_name()
# If get_all_fields() was used above, field could be unbound,
# and field.source would be None
rewritten.append(model_field_name)
if i == last:
break
# Recurse into nested field
s = getattr(field, 'serializer', None)
if isinstance(s, serializers.ListSerializer):
s = s.child
if not s:
raise ValidationError(
"Invalid nested filter field: %s" % field_name
)
if self.operator:
rewritten.append(self.operator)
return ('__'.join(rewritten), field) |
def validate(self):
"""Check self.data. Raise InvalidConfig on error
:return: None
"""
if (self.data.get('content-type') or self.data.get('body')) and \
self.data.get('method', '').lower() not in CONTENT_TYPE_METHODS:
raise InvalidConfig(
extra_body='The body/content-type option only can be used with the {} methods. The device is {}. '
'Check the configuration file.'.format(', '.join(CONTENT_TYPE_METHODS), self.name)
)
self.data['content-type'] = CONTENT_TYPE_ALIASES.get(self.data.get('content-type'),
self.data.get('content-type'))
form_type = CONTENT_TYPE_ALIASES['form']
if self.data.get('body') and (self.data.get('content-type') or form_type) == form_type:
try:
self.data['body'] = json.loads(self.data['body'])
except JSONDecodeError:
raise InvalidConfig(
extra_body='Invalid JSON body on {} device.'.format(self.name)
) | Check self.data. Raise InvalidConfig on error
:return: None | Below is the the instruction that describes the task:
### Input:
Check self.data. Raise InvalidConfig on error
:return: None
### Response:
def validate(self):
"""Check self.data. Raise InvalidConfig on error
:return: None
"""
if (self.data.get('content-type') or self.data.get('body')) and \
self.data.get('method', '').lower() not in CONTENT_TYPE_METHODS:
raise InvalidConfig(
extra_body='The body/content-type option only can be used with the {} methods. The device is {}. '
'Check the configuration file.'.format(', '.join(CONTENT_TYPE_METHODS), self.name)
)
self.data['content-type'] = CONTENT_TYPE_ALIASES.get(self.data.get('content-type'),
self.data.get('content-type'))
form_type = CONTENT_TYPE_ALIASES['form']
if self.data.get('body') and (self.data.get('content-type') or form_type) == form_type:
try:
self.data['body'] = json.loads(self.data['body'])
except JSONDecodeError:
raise InvalidConfig(
extra_body='Invalid JSON body on {} device.'.format(self.name)
) |
def get_cel_to_gal_angle(skydir):
"""Calculate the rotation angle in radians between the longitude
axes of a local projection in celestial and galactic coordinates.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Direction of projection center.
Returns
-------
angle : float
Rotation angle in radians.
"""
wcs0 = create_wcs(skydir, coordsys='CEL')
wcs1 = create_wcs(skydir, coordsys='GAL')
x, y = SkyCoord.to_pixel(SkyCoord.from_pixel(1.0, 0.0, wcs0), wcs1)
return np.arctan2(y, x) | Calculate the rotation angle in radians between the longitude
axes of a local projection in celestial and galactic coordinates.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Direction of projection center.
Returns
-------
angle : float
Rotation angle in radians. | Below is the the instruction that describes the task:
### Input:
Calculate the rotation angle in radians between the longitude
axes of a local projection in celestial and galactic coordinates.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Direction of projection center.
Returns
-------
angle : float
Rotation angle in radians.
### Response:
def get_cel_to_gal_angle(skydir):
"""Calculate the rotation angle in radians between the longitude
axes of a local projection in celestial and galactic coordinates.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Direction of projection center.
Returns
-------
angle : float
Rotation angle in radians.
"""
wcs0 = create_wcs(skydir, coordsys='CEL')
wcs1 = create_wcs(skydir, coordsys='GAL')
x, y = SkyCoord.to_pixel(SkyCoord.from_pixel(1.0, 0.0, wcs0), wcs1)
return np.arctan2(y, x) |
def pre_filter(self):
""" Return rTorrent condition to speed up data transfer.
"""
inner = self._inner.pre_filter()
if inner:
if inner.startswith('"not=$') and inner.endswith('"') and '\\' not in inner:
return inner[6:-1] # double negation, return inner command
elif inner.startswith('"'):
inner = '"$' + inner[1:]
else:
inner = '$' + inner
return 'not=' + inner
else:
return '' | Return rTorrent condition to speed up data transfer. | Below is the the instruction that describes the task:
### Input:
Return rTorrent condition to speed up data transfer.
### Response:
def pre_filter(self):
""" Return rTorrent condition to speed up data transfer.
"""
inner = self._inner.pre_filter()
if inner:
if inner.startswith('"not=$') and inner.endswith('"') and '\\' not in inner:
return inner[6:-1] # double negation, return inner command
elif inner.startswith('"'):
inner = '"$' + inner[1:]
else:
inner = '$' + inner
return 'not=' + inner
else:
return '' |
def anticlockwise_sort(pps):
"""
Sort a list of 2D points in anticlockwise order
:param pps: List of points to be sorted
:return: Sorted list of points
"""
newpps = list()
angles = np.zeros(len(pps), np.float)
for ipp, pp in enumerate(pps):
angles[ipp] = np.arctan2(pp[1], pp[0])
iisorted = np.argsort(angles)
for ii in range(len(pps)):
newpps.append(pps[iisorted[ii]])
return newpps | Sort a list of 2D points in anticlockwise order
:param pps: List of points to be sorted
:return: Sorted list of points | Below is the the instruction that describes the task:
### Input:
Sort a list of 2D points in anticlockwise order
:param pps: List of points to be sorted
:return: Sorted list of points
### Response:
def anticlockwise_sort(pps):
"""
Sort a list of 2D points in anticlockwise order
:param pps: List of points to be sorted
:return: Sorted list of points
"""
newpps = list()
angles = np.zeros(len(pps), np.float)
for ipp, pp in enumerate(pps):
angles[ipp] = np.arctan2(pp[1], pp[0])
iisorted = np.argsort(angles)
for ii in range(len(pps)):
newpps.append(pps[iisorted[ii]])
return newpps |
def is_namedtuple(type_: Type[Any]) -> bool:
'''
Generated with typing.NamedTuple
'''
return _issubclass(type_, tuple) and hasattr(type_, '_field_types') and hasattr(type_, '_fields') | Generated with typing.NamedTuple | Below is the the instruction that describes the task:
### Input:
Generated with typing.NamedTuple
### Response:
def is_namedtuple(type_: Type[Any]) -> bool:
'''
Generated with typing.NamedTuple
'''
return _issubclass(type_, tuple) and hasattr(type_, '_field_types') and hasattr(type_, '_fields') |
def _tokenize(self, text):
"""Tokenize the text into a list of sentences with a list of words.
:param text: raw text
:return: tokenized text
:rtype : list
"""
sentences = []
tokens = []
for word in self._clean_accents(text).split(' '):
tokens.append(word)
if '.' in word:
sentences.append(tokens)
tokens = []
return sentences | Tokenize the text into a list of sentences with a list of words.
:param text: raw text
:return: tokenized text
:rtype : list | Below is the the instruction that describes the task:
### Input:
Tokenize the text into a list of sentences with a list of words.
:param text: raw text
:return: tokenized text
:rtype : list
### Response:
def _tokenize(self, text):
"""Tokenize the text into a list of sentences with a list of words.
:param text: raw text
:return: tokenized text
:rtype : list
"""
sentences = []
tokens = []
for word in self._clean_accents(text).split(' '):
tokens.append(word)
if '.' in word:
sentences.append(tokens)
tokens = []
return sentences |
def gauss_fltr_astropy(dem, size=None, sigma=None, origmask=False, fill_interior=False):
"""Astropy gaussian filter properly handles convolution with NaN
http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab
width1 = 3; sigma1 = (width1-1) / 6;
Specify width for smallest feature of interest and determine sigma appropriately
sigma is width of 1 std in pixels (not multiplier)
scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994%
3*sigma on either side of kernel - 99.7%
If sigma is specified, filter width will be a multiple of 8 times sigma
Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8.
If size is < the required width for 6-8 sigma, need to use different mode to create kernel
mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3
The sigma/size calculations below should work for non-integer sigma
"""
#import astropy.nddata
import astropy.convolution
dem = malib.checkma(dem)
#Generate 2D gaussian kernel for input sigma and size
#Default size is 8*sigma in x and y directions
#kernel = astropy.nddata.make_kernel([size, size], sigma, 'gaussian')
#Size must be odd
if size is not None:
size = int(np.floor(size/2)*2 + 1)
size = max(size, 3)
#Truncate the filter at this many standard deviations. Default is 4.0
truncate = 3.0
if size is not None and sigma is None:
sigma = (size - 1) / (2*truncate)
elif size is None and sigma is not None:
#Round up to nearest odd int
size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)
elif size is None and sigma is None:
#Use default parameters
sigma = 1
size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)
size = max(size, 3)
kernel = astropy.convolution.Gaussian2DKernel(sigma, x_size=size, y_size=size, mode='oversample')
print("Applying gaussian smoothing filter with size %i and sigma %0.3f (sum %0.3f)" % \
(size, sigma, kernel.array.sum()))
#This will fill holes
#np.nan is float
#dem_filt_gauss = astropy.nddata.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan)
#dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan)
#Added normalization to ensure filtered values are not brightened/darkened if kernelsum != 1
dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan, normalize_kernel=True)
#This will preserve original ndv pixels, applying original mask after filtering
if origmask:
print("Applying original mask")
#Allow filling of interior holes, but use original outer edge
if fill_interior:
mask = malib.maskfill(dem)
else:
mask = dem.mask
dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=mask, fill_value=dem.fill_value)
out = np.ma.fix_invalid(dem_filt_gauss, copy=False, fill_value=dem.fill_value)
out.set_fill_value(dem.fill_value.astype(dem.dtype))
return out.astype(dem.dtype) | Astropy gaussian filter properly handles convolution with NaN
http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab
width1 = 3; sigma1 = (width1-1) / 6;
Specify width for smallest feature of interest and determine sigma appropriately
sigma is width of 1 std in pixels (not multiplier)
scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994%
3*sigma on either side of kernel - 99.7%
If sigma is specified, filter width will be a multiple of 8 times sigma
Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8.
If size is < the required width for 6-8 sigma, need to use different mode to create kernel
mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3
The sigma/size calculations below should work for non-integer sigma | Below is the the instruction that describes the task:
### Input:
Astropy gaussian filter properly handles convolution with NaN
http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab
width1 = 3; sigma1 = (width1-1) / 6;
Specify width for smallest feature of interest and determine sigma appropriately
sigma is width of 1 std in pixels (not multiplier)
scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994%
3*sigma on either side of kernel - 99.7%
If sigma is specified, filter width will be a multiple of 8 times sigma
Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8.
If size is < the required width for 6-8 sigma, need to use different mode to create kernel
mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3
The sigma/size calculations below should work for non-integer sigma
### Response:
def gauss_fltr_astropy(dem, size=None, sigma=None, origmask=False, fill_interior=False):
"""Astropy gaussian filter properly handles convolution with NaN
http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab
width1 = 3; sigma1 = (width1-1) / 6;
Specify width for smallest feature of interest and determine sigma appropriately
sigma is width of 1 std in pixels (not multiplier)
scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994%
3*sigma on either side of kernel - 99.7%
If sigma is specified, filter width will be a multiple of 8 times sigma
Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8.
If size is < the required width for 6-8 sigma, need to use different mode to create kernel
mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3
The sigma/size calculations below should work for non-integer sigma
"""
#import astropy.nddata
import astropy.convolution
dem = malib.checkma(dem)
#Generate 2D gaussian kernel for input sigma and size
#Default size is 8*sigma in x and y directions
#kernel = astropy.nddata.make_kernel([size, size], sigma, 'gaussian')
#Size must be odd
if size is not None:
size = int(np.floor(size/2)*2 + 1)
size = max(size, 3)
#Truncate the filter at this many standard deviations. Default is 4.0
truncate = 3.0
if size is not None and sigma is None:
sigma = (size - 1) / (2*truncate)
elif size is None and sigma is not None:
#Round up to nearest odd int
size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)
elif size is None and sigma is None:
#Use default parameters
sigma = 1
size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)
size = max(size, 3)
kernel = astropy.convolution.Gaussian2DKernel(sigma, x_size=size, y_size=size, mode='oversample')
print("Applying gaussian smoothing filter with size %i and sigma %0.3f (sum %0.3f)" % \
(size, sigma, kernel.array.sum()))
#This will fill holes
#np.nan is float
#dem_filt_gauss = astropy.nddata.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan)
#dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan)
#Added normalization to ensure filtered values are not brightened/darkened if kernelsum != 1
dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan, normalize_kernel=True)
#This will preserve original ndv pixels, applying original mask after filtering
if origmask:
print("Applying original mask")
#Allow filling of interior holes, but use original outer edge
if fill_interior:
mask = malib.maskfill(dem)
else:
mask = dem.mask
dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=mask, fill_value=dem.fill_value)
out = np.ma.fix_invalid(dem_filt_gauss, copy=False, fill_value=dem.fill_value)
out.set_fill_value(dem.fill_value.astype(dem.dtype))
return out.astype(dem.dtype) |
def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):
"""Calculate the length of the path up to a certain position"""
if t0 == 0 and t1 == 1:
if self._length_info['bpoints'] == self.bpoints() \
and self._length_info['error'] >= error \
and self._length_info['min_depth'] >= min_depth:
return self._length_info['length']
# using scipy.integrate.quad is quick
if _quad_available:
s = quad(lambda tau: abs(self.derivative(tau)), t0, t1,
epsabs=error, limit=1000)[0]
else:
s = segment_length(self, t0, t1, self.point(t0), self.point(t1),
error, min_depth, 0)
if t0 == 0 and t1 == 1:
self._length_info['length'] = s
self._length_info['bpoints'] = self.bpoints()
self._length_info['error'] = error
self._length_info['min_depth'] = min_depth
return self._length_info['length']
else:
return s | Calculate the length of the path up to a certain position | Below is the the instruction that describes the task:
### Input:
Calculate the length of the path up to a certain position
### Response:
def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):
"""Calculate the length of the path up to a certain position"""
if t0 == 0 and t1 == 1:
if self._length_info['bpoints'] == self.bpoints() \
and self._length_info['error'] >= error \
and self._length_info['min_depth'] >= min_depth:
return self._length_info['length']
# using scipy.integrate.quad is quick
if _quad_available:
s = quad(lambda tau: abs(self.derivative(tau)), t0, t1,
epsabs=error, limit=1000)[0]
else:
s = segment_length(self, t0, t1, self.point(t0), self.point(t1),
error, min_depth, 0)
if t0 == 0 and t1 == 1:
self._length_info['length'] = s
self._length_info['bpoints'] = self.bpoints()
self._length_info['error'] = error
self._length_info['min_depth'] = min_depth
return self._length_info['length']
else:
return s |
def olympic_sprints(data_set='rogers_girolami_data'):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
cats = {}
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data['X']
time = data['Y']
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
cats[dataset.__name__] = i
data['X'] = X
data['Y'] = Y
data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return data_details_return({
'X': X,
'Y': Y,
'covariates' : [decimalyear('year', '%Y'), discrete(cats, 'event')],
'response' : ['time'],
'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
'output_info': {
0:'100m Men',
1:'100m Women',
2:'200m Men',
3:'200m Women',
4:'400m Men',
5:'400m Women'}
}, data_set) | All olympics sprint winning times for multiple output prediction. | Below is the the instruction that describes the task:
### Input:
All olympics sprint winning times for multiple output prediction.
### Response:
def olympic_sprints(data_set='rogers_girolami_data'):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
cats = {}
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data['X']
time = data['Y']
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
cats[dataset.__name__] = i
data['X'] = X
data['Y'] = Y
data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return data_details_return({
'X': X,
'Y': Y,
'covariates' : [decimalyear('year', '%Y'), discrete(cats, 'event')],
'response' : ['time'],
'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
'output_info': {
0:'100m Men',
1:'100m Women',
2:'200m Men',
3:'200m Women',
4:'400m Men',
5:'400m Women'}
}, data_set) |
def from_start_and_end(cls, start, end, aa=None, major_pitch=225.8,
major_radius=5.07, major_handedness='l',
minor_helix_type='alpha', orientation=1,
phi_c_alpha=0.0, minor_repeat=None):
"""Creates a `HelicalHelix` between a `start` and `end` point."""
start = numpy.array(start)
end = numpy.array(end)
if aa is None:
minor_rise_per_residue = _helix_parameters[minor_helix_type][1]
aa = int((numpy.linalg.norm(end - start) /
minor_rise_per_residue) + 1)
instance = cls(
aa=aa, major_pitch=major_pitch, major_radius=major_radius,
major_handedness=major_handedness,
minor_helix_type=minor_helix_type, orientation=orientation,
phi_c_alpha=phi_c_alpha, minor_repeat=minor_repeat)
instance.move_to(start=start, end=end)
return instance | Creates a `HelicalHelix` between a `start` and `end` point. | Below is the the instruction that describes the task:
### Input:
Creates a `HelicalHelix` between a `start` and `end` point.
### Response:
def from_start_and_end(cls, start, end, aa=None, major_pitch=225.8,
major_radius=5.07, major_handedness='l',
minor_helix_type='alpha', orientation=1,
phi_c_alpha=0.0, minor_repeat=None):
"""Creates a `HelicalHelix` between a `start` and `end` point."""
start = numpy.array(start)
end = numpy.array(end)
if aa is None:
minor_rise_per_residue = _helix_parameters[minor_helix_type][1]
aa = int((numpy.linalg.norm(end - start) /
minor_rise_per_residue) + 1)
instance = cls(
aa=aa, major_pitch=major_pitch, major_radius=major_radius,
major_handedness=major_handedness,
minor_helix_type=minor_helix_type, orientation=orientation,
phi_c_alpha=phi_c_alpha, minor_repeat=minor_repeat)
instance.move_to(start=start, end=end)
return instance |
def parse(self, extent, desc_tag):
# type: (int, UDFTag) -> None
'''
Parse the passed in data into a UDF Terminating Descriptor.
Parameters:
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Terminating Descriptor already initialized')
self.desc_tag = desc_tag
self.orig_extent_loc = extent
self._initialized = True | Parse the passed in data into a UDF Terminating Descriptor.
Parameters:
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
Parse the passed in data into a UDF Terminating Descriptor.
Parameters:
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
### Response:
def parse(self, extent, desc_tag):
# type: (int, UDFTag) -> None
'''
Parse the passed in data into a UDF Terminating Descriptor.
Parameters:
extent - The extent that this descriptor currently lives at.
desc_tag - A UDFTag object that represents the Descriptor Tag.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Terminating Descriptor already initialized')
self.desc_tag = desc_tag
self.orig_extent_loc = extent
self._initialized = True |
def _gates_from_cli(opts, gate_opt):
"""Parses the given `gate_opt` into something understandable by
`strain.gate_data`.
"""
gates = {}
if getattr(opts, gate_opt) is None:
return gates
for gate in getattr(opts, gate_opt):
try:
ifo, central_time, half_dur, taper_dur = gate.split(':')
central_time = float(central_time)
half_dur = float(half_dur)
taper_dur = float(taper_dur)
except ValueError:
raise ValueError("--gate {} not formatted correctly; ".format(
gate) + "see help")
try:
gates[ifo].append((central_time, half_dur, taper_dur))
except KeyError:
gates[ifo] = [(central_time, half_dur, taper_dur)]
return gates | Parses the given `gate_opt` into something understandable by
`strain.gate_data`. | Below is the the instruction that describes the task:
### Input:
Parses the given `gate_opt` into something understandable by
`strain.gate_data`.
### Response:
def _gates_from_cli(opts, gate_opt):
"""Parses the given `gate_opt` into something understandable by
`strain.gate_data`.
"""
gates = {}
if getattr(opts, gate_opt) is None:
return gates
for gate in getattr(opts, gate_opt):
try:
ifo, central_time, half_dur, taper_dur = gate.split(':')
central_time = float(central_time)
half_dur = float(half_dur)
taper_dur = float(taper_dur)
except ValueError:
raise ValueError("--gate {} not formatted correctly; ".format(
gate) + "see help")
try:
gates[ifo].append((central_time, half_dur, taper_dur))
except KeyError:
gates[ifo] = [(central_time, half_dur, taper_dur)]
return gates |
def _unique_id(self, prefix):
"""
Generate a unique (within the graph) identifer
internal to graph generation.
"""
_id = self._id_gen
self._id_gen += 1
return prefix + str(_id) | Generate a unique (within the graph) identifer
internal to graph generation. | Below is the the instruction that describes the task:
### Input:
Generate a unique (within the graph) identifer
internal to graph generation.
### Response:
def _unique_id(self, prefix):
"""
Generate a unique (within the graph) identifer
internal to graph generation.
"""
_id = self._id_gen
self._id_gen += 1
return prefix + str(_id) |
def _insert_html_configs(c, *, project_name, short_project_name):
"""Insert HTML theme configurations.
"""
# Use the lsst-sphinx-bootstrap-theme
c['templates_path'] = [
'_templates',
lsst_sphinx_bootstrap_theme.get_html_templates_path()]
c['html_theme'] = 'lsst_sphinx_bootstrap_theme'
c['html_theme_path'] = [lsst_sphinx_bootstrap_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
c['html_theme_options'] = {'logotext': short_project_name}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
c['html_title'] = project_name
# A shorter title for the navigation bar. Default is the same as
# html_title.
c['html_short_title'] = short_project_name
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
c['html_logo'] = None
# The name of an image file (within the static path) to use as favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
c['html_favicon'] = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
if os.path.isdir('_static'):
c['html_static_path'] = ['_static']
else:
# If a project does not have a _static/ directory, don't list it
# so that there isn't a warning.
c['html_static_path'] = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
c['html_last_updated_fmt'] = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
c['html_use_smartypants'] = True
# If false, no module index is generated.
c['html_domain_indices'] = False
# If false, no index is generated.
c['html_use_index'] = False
# If true, the index is split into individual pages for each letter.
c['html_split_index'] = False
# If true, links to the reST sources are added to the pages.
c['html_show_sourcelink'] = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is
# True.
c['html_show_sphinx'] = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is
# True.
c['html_show_copyright'] = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option must
# be the base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
c['html_file_suffix'] = '.html'
# Language to be used for generating the HTML full-text search index.
c['html_search_language'] = 'en'
# A dictionary with options for the search language support, empty by
# default. Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory)
# that implements a search results scorer. If empty, the default will be
# used.
# html_search_scorer = 'scorer.js'
return c | Insert HTML theme configurations. | Below is the the instruction that describes the task:
### Input:
Insert HTML theme configurations.
### Response:
def _insert_html_configs(c, *, project_name, short_project_name):
"""Insert HTML theme configurations.
"""
# Use the lsst-sphinx-bootstrap-theme
c['templates_path'] = [
'_templates',
lsst_sphinx_bootstrap_theme.get_html_templates_path()]
c['html_theme'] = 'lsst_sphinx_bootstrap_theme'
c['html_theme_path'] = [lsst_sphinx_bootstrap_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
c['html_theme_options'] = {'logotext': short_project_name}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
c['html_title'] = project_name
# A shorter title for the navigation bar. Default is the same as
# html_title.
c['html_short_title'] = short_project_name
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
c['html_logo'] = None
# The name of an image file (within the static path) to use as favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
c['html_favicon'] = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
if os.path.isdir('_static'):
c['html_static_path'] = ['_static']
else:
# If a project does not have a _static/ directory, don't list it
# so that there isn't a warning.
c['html_static_path'] = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
c['html_last_updated_fmt'] = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
c['html_use_smartypants'] = True
# If false, no module index is generated.
c['html_domain_indices'] = False
# If false, no index is generated.
c['html_use_index'] = False
# If true, the index is split into individual pages for each letter.
c['html_split_index'] = False
# If true, links to the reST sources are added to the pages.
c['html_show_sourcelink'] = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is
# True.
c['html_show_sphinx'] = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is
# True.
c['html_show_copyright'] = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option must
# be the base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
c['html_file_suffix'] = '.html'
# Language to be used for generating the HTML full-text search index.
c['html_search_language'] = 'en'
# A dictionary with options for the search language support, empty by
# default. Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory)
# that implements a search results scorer. If empty, the default will be
# used.
# html_search_scorer = 'scorer.js'
return c |
def vm_registered(vm_name, datacenter, placement, vm_file, power_on=False):
'''
Registers a virtual machine if the machine files are available on
the main datastore.
'''
result = {'name': vm_name,
'result': None,
'changes': {},
'comment': ''}
vmx_path = '{0}{1}'.format(vm_file.folderPath, vm_file.file[0].path)
log.trace('Registering virtual machine with vmx file: %s', vmx_path)
service_instance = __salt__['vsphere.get_service_instance_via_proxy']()
try:
__salt__['vsphere.register_vm'](vm_name, datacenter,
placement, vmx_path,
service_instance=service_instance)
except salt.exceptions.VMwareMultipleObjectsError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({'result': False,
'comment': six.text_type(exc)})
return result
except salt.exceptions.VMwareVmRegisterError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({'result': False,
'comment': six.text_type(exc)})
return result
if power_on:
try:
__salt__['vsphere.power_on_vm'](vm_name, datacenter,
service_instance=service_instance)
except salt.exceptions.VMwarePowerOnError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({
'result': False,
'comment': six.text_type(exc)})
return result
__salt__['vsphere.disconnect'](service_instance)
result.update({'result': True,
'changes': {'name': vm_name, 'power_on': power_on},
'comment': 'Virtual machine '
'{0} registered successfully'.format(vm_name)})
return result | Registers a virtual machine if the machine files are available on
the main datastore. | Below is the the instruction that describes the task:
### Input:
Registers a virtual machine if the machine files are available on
the main datastore.
### Response:
def vm_registered(vm_name, datacenter, placement, vm_file, power_on=False):
'''
Registers a virtual machine if the machine files are available on
the main datastore.
'''
result = {'name': vm_name,
'result': None,
'changes': {},
'comment': ''}
vmx_path = '{0}{1}'.format(vm_file.folderPath, vm_file.file[0].path)
log.trace('Registering virtual machine with vmx file: %s', vmx_path)
service_instance = __salt__['vsphere.get_service_instance_via_proxy']()
try:
__salt__['vsphere.register_vm'](vm_name, datacenter,
placement, vmx_path,
service_instance=service_instance)
except salt.exceptions.VMwareMultipleObjectsError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({'result': False,
'comment': six.text_type(exc)})
return result
except salt.exceptions.VMwareVmRegisterError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({'result': False,
'comment': six.text_type(exc)})
return result
if power_on:
try:
__salt__['vsphere.power_on_vm'](vm_name, datacenter,
service_instance=service_instance)
except salt.exceptions.VMwarePowerOnError as exc:
log.error('Error: %s', exc)
if service_instance:
__salt__['vsphere.disconnect'](service_instance)
result.update({
'result': False,
'comment': six.text_type(exc)})
return result
__salt__['vsphere.disconnect'](service_instance)
result.update({'result': True,
'changes': {'name': vm_name, 'power_on': power_on},
'comment': 'Virtual machine '
'{0} registered successfully'.format(vm_name)})
return result |
def _generate_pack_target_class(dev):
"""! @brief Generates a new target class from a CmsisPackDevice.
@param dev A CmsisPackDevice object.
@return A new subclass of either CoreSightTarget or one of the family classes.
"""
try:
# Look up the target family superclass.
superklass = PackTargets._find_family_class(dev)
# Replace spaces and dashes with underscores on the new target subclass name.
subclassName = dev.part_number.replace(' ', '_').replace('-', '_')
# Create a new subclass for this target.
targetClass = type(subclassName, (superklass,), {
"_pack_device": dev,
"__init__": _PackTargetMethods._pack_target__init__,
"create_init_sequence": _PackTargetMethods._pack_target_create_init_sequence,
"set_default_reset_type": _PackTargetMethods._pack_target_set_default_reset_type,
})
return targetClass
except (MalformedCmsisPackError, FileNotFoundError_) as err:
LOG.warning(err)
return None | ! @brief Generates a new target class from a CmsisPackDevice.
@param dev A CmsisPackDevice object.
@return A new subclass of either CoreSightTarget or one of the family classes. | Below is the the instruction that describes the task:
### Input:
! @brief Generates a new target class from a CmsisPackDevice.
@param dev A CmsisPackDevice object.
@return A new subclass of either CoreSightTarget or one of the family classes.
### Response:
def _generate_pack_target_class(dev):
"""! @brief Generates a new target class from a CmsisPackDevice.
@param dev A CmsisPackDevice object.
@return A new subclass of either CoreSightTarget or one of the family classes.
"""
try:
# Look up the target family superclass.
superklass = PackTargets._find_family_class(dev)
# Replace spaces and dashes with underscores on the new target subclass name.
subclassName = dev.part_number.replace(' ', '_').replace('-', '_')
# Create a new subclass for this target.
targetClass = type(subclassName, (superklass,), {
"_pack_device": dev,
"__init__": _PackTargetMethods._pack_target__init__,
"create_init_sequence": _PackTargetMethods._pack_target_create_init_sequence,
"set_default_reset_type": _PackTargetMethods._pack_target_set_default_reset_type,
})
return targetClass
except (MalformedCmsisPackError, FileNotFoundError_) as err:
LOG.warning(err)
return None |
def set_level(self, level):
"""
Set the log level of the g8os
Note: this level is for messages that ends up on screen or on log file
:param level: the level to be set can be one of ("CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG")
"""
args = {
'level': level,
}
self._level_chk.check(args)
return self._client.json('logger.set_level', args) | Set the log level of the g8os
Note: this level is for messages that ends up on screen or on log file
:param level: the level to be set can be one of ("CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG") | Below is the the instruction that describes the task:
### Input:
Set the log level of the g8os
Note: this level is for messages that ends up on screen or on log file
:param level: the level to be set can be one of ("CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG")
### Response:
def set_level(self, level):
"""
Set the log level of the g8os
Note: this level is for messages that ends up on screen or on log file
:param level: the level to be set can be one of ("CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG")
"""
args = {
'level': level,
}
self._level_chk.check(args)
return self._client.json('logger.set_level', args) |
def serialtoflat(self, bytes, width=None):
"""Convert serial format (byte stream) pixel data to flat row
flat pixel.
"""
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array('H',
struct.unpack('!%dH' % (len(bytes)//2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
l = width
for o in bytes:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out | Convert serial format (byte stream) pixel data to flat row
flat pixel. | Below is the the instruction that describes the task:
### Input:
Convert serial format (byte stream) pixel data to flat row
flat pixel.
### Response:
def serialtoflat(self, bytes, width=None):
"""Convert serial format (byte stream) pixel data to flat row
flat pixel.
"""
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array('H',
struct.unpack('!%dH' % (len(bytes)//2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
l = width
for o in bytes:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out |
def handle_database_error(cls, session, exception):
"""Rollback changes made and handle any type of error raised by the DBMS."""
session.rollback()
if isinstance(exception, IntegrityError):
cls.handle_integrity_error(exception)
elif isinstance(exception, FlushError):
cls.handle_flush_error(exception)
else:
raise exception | Rollback changes made and handle any type of error raised by the DBMS. | Below is the the instruction that describes the task:
### Input:
Rollback changes made and handle any type of error raised by the DBMS.
### Response:
def handle_database_error(cls, session, exception):
"""Rollback changes made and handle any type of error raised by the DBMS."""
session.rollback()
if isinstance(exception, IntegrityError):
cls.handle_integrity_error(exception)
elif isinstance(exception, FlushError):
cls.handle_flush_error(exception)
else:
raise exception |
def get_plan_from_dual(alpha, beta, C, regul):
"""
Retrieve optimal transportation plan from optimal dual potentials.
Parameters
----------
alpha: array, shape = len(a)
beta: array, shape = len(b)
Optimal dual potentials.
C: array, shape = len(a) x len(b)
Ground cost matrix.
regul: Regularization object
Should implement a delta_Omega(X) method.
Returns
-------
T: array, shape = len(a) x len(b)
Optimal transportation plan.
"""
X = alpha[:, np.newaxis] + beta - C
return regul.delta_Omega(X)[1] | Retrieve optimal transportation plan from optimal dual potentials.
Parameters
----------
alpha: array, shape = len(a)
beta: array, shape = len(b)
Optimal dual potentials.
C: array, shape = len(a) x len(b)
Ground cost matrix.
regul: Regularization object
Should implement a delta_Omega(X) method.
Returns
-------
T: array, shape = len(a) x len(b)
Optimal transportation plan. | Below is the the instruction that describes the task:
### Input:
Retrieve optimal transportation plan from optimal dual potentials.
Parameters
----------
alpha: array, shape = len(a)
beta: array, shape = len(b)
Optimal dual potentials.
C: array, shape = len(a) x len(b)
Ground cost matrix.
regul: Regularization object
Should implement a delta_Omega(X) method.
Returns
-------
T: array, shape = len(a) x len(b)
Optimal transportation plan.
### Response:
def get_plan_from_dual(alpha, beta, C, regul):
"""
Retrieve optimal transportation plan from optimal dual potentials.
Parameters
----------
alpha: array, shape = len(a)
beta: array, shape = len(b)
Optimal dual potentials.
C: array, shape = len(a) x len(b)
Ground cost matrix.
regul: Regularization object
Should implement a delta_Omega(X) method.
Returns
-------
T: array, shape = len(a) x len(b)
Optimal transportation plan.
"""
X = alpha[:, np.newaxis] + beta - C
return regul.delta_Omega(X)[1] |
async def release(self, task_id, *, delay=None):
"""
Release task (return to queue) with delay if specified
:param task_id: Task id
:param delay: Time in seconds before task will become ready again
:return: Task instance
"""
opts = {}
if delay is not None:
opts['delay'] = delay
args = (task_id, opts)
res = await self.conn.call(self.__funcs['release'], args)
return self._create_task(res.body) | Release task (return to queue) with delay if specified
:param task_id: Task id
:param delay: Time in seconds before task will become ready again
:return: Task instance | Below is the the instruction that describes the task:
### Input:
Release task (return to queue) with delay if specified
:param task_id: Task id
:param delay: Time in seconds before task will become ready again
:return: Task instance
### Response:
async def release(self, task_id, *, delay=None):
"""
Release task (return to queue) with delay if specified
:param task_id: Task id
:param delay: Time in seconds before task will become ready again
:return: Task instance
"""
opts = {}
if delay is not None:
opts['delay'] = delay
args = (task_id, opts)
res = await self.conn.call(self.__funcs['release'], args)
return self._create_task(res.body) |
def pre_save_config(sender, instance, *args, **kwargs):
"""
Checks if enable was toggled on group config and
deletes groups if necessary.
"""
logger.debug("Received pre_save from {}".format(instance))
if not instance.pk:
# new model being created
return
try:
old_instance = AutogroupsConfig.objects.get(pk=instance.pk)
# Check if enable was toggled, delete groups?
if old_instance.alliance_groups is True and instance.alliance_groups is False:
instance.delete_alliance_managed_groups()
if old_instance.corp_groups is True and instance.corp_groups is False:
instance.delete_corp_managed_groups()
except AutogroupsConfig.DoesNotExist:
pass | Checks if enable was toggled on group config and
deletes groups if necessary. | Below is the the instruction that describes the task:
### Input:
Checks if enable was toggled on group config and
deletes groups if necessary.
### Response:
def pre_save_config(sender, instance, *args, **kwargs):
"""
Checks if enable was toggled on group config and
deletes groups if necessary.
"""
logger.debug("Received pre_save from {}".format(instance))
if not instance.pk:
# new model being created
return
try:
old_instance = AutogroupsConfig.objects.get(pk=instance.pk)
# Check if enable was toggled, delete groups?
if old_instance.alliance_groups is True and instance.alliance_groups is False:
instance.delete_alliance_managed_groups()
if old_instance.corp_groups is True and instance.corp_groups is False:
instance.delete_corp_managed_groups()
except AutogroupsConfig.DoesNotExist:
pass |
def write_f90(self):
"""Writes the F90 module file to the specified directory.
"""
from os import path
self._check_dir()
#Find the list of executables that we actually need to write wrappers for.
self._find_executables()
lines = []
lines.append("!!<summary>Auto-generated Fortran module for interaction with ctypes\n"
"!!through python. Generated for module {}.</summary>".format(self.module.name))
lines.append("MODULE {}_c".format(self.module.name))
#Some of the variables and parameters will have special kinds that need to be imported.
#Check each of the executables to find additional dependencies.
lines.append(" use {}".format(self.module.name))
lines.append(" use ISO_C_BINDING")
for modname in self.needs:
lines.append(" use {}".format(modname))
lines.append(" implicit none")
lines.append("CONTAINS")
#We want everything in these wrapper modules to be public, so we just exclude the 'private'.
for execkey in self.uses:
self._write_executable_f90(execkey, lines)
lines.append("END MODULE {}_c".format(self.module.name))
fullpath = path.join(self.f90path, "{}_c.f90".format(self.module.name))
with open(fullpath, 'w') as f:
f.write('\n'.join(lines)) | Writes the F90 module file to the specified directory. | Below is the the instruction that describes the task:
### Input:
Writes the F90 module file to the specified directory.
### Response:
def write_f90(self):
"""Writes the F90 module file to the specified directory.
"""
from os import path
self._check_dir()
#Find the list of executables that we actually need to write wrappers for.
self._find_executables()
lines = []
lines.append("!!<summary>Auto-generated Fortran module for interaction with ctypes\n"
"!!through python. Generated for module {}.</summary>".format(self.module.name))
lines.append("MODULE {}_c".format(self.module.name))
#Some of the variables and parameters will have special kinds that need to be imported.
#Check each of the executables to find additional dependencies.
lines.append(" use {}".format(self.module.name))
lines.append(" use ISO_C_BINDING")
for modname in self.needs:
lines.append(" use {}".format(modname))
lines.append(" implicit none")
lines.append("CONTAINS")
#We want everything in these wrapper modules to be public, so we just exclude the 'private'.
for execkey in self.uses:
self._write_executable_f90(execkey, lines)
lines.append("END MODULE {}_c".format(self.module.name))
fullpath = path.join(self.f90path, "{}_c.f90".format(self.module.name))
with open(fullpath, 'w') as f:
f.write('\n'.join(lines)) |
def _ReadUnionDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads an union data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UnionDefinition: union data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadDataTypeDefinitionWithMembers(
definitions_registry, definition_values, data_types.UnionDefinition,
definition_name, supports_conditions=False) | Reads an union data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UnionDefinition: union data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect. | Below is the the instruction that describes the task:
### Input:
Reads an union data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UnionDefinition: union data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
### Response:
def _ReadUnionDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads an union data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UnionDefinition: union data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadDataTypeDefinitionWithMembers(
definitions_registry, definition_values, data_types.UnionDefinition,
definition_name, supports_conditions=False) |
def getMaskIndices(mask):
"""get lower and upper index of mask"""
return [
list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True)
] | get lower and upper index of mask | Below is the the instruction that describes the task:
### Input:
get lower and upper index of mask
### Response:
def getMaskIndices(mask):
"""get lower and upper index of mask"""
return [
list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True)
] |
def honor_stop_request(self, site):
"""Raises brozzler.CrawlStopped if stop has been requested."""
site.refresh()
if (site.stop_requested
and site.stop_requested <= doublethink.utcnow()):
self.logger.info("stop requested for site %s", site.id)
raise brozzler.CrawlStopped
if site.job_id:
job = brozzler.Job.load(self.rr, site.job_id)
if (job and job.stop_requested
and job.stop_requested <= doublethink.utcnow()):
self.logger.info("stop requested for job %s", site.job_id)
raise brozzler.CrawlStopped | Raises brozzler.CrawlStopped if stop has been requested. | Below is the the instruction that describes the task:
### Input:
Raises brozzler.CrawlStopped if stop has been requested.
### Response:
def honor_stop_request(self, site):
"""Raises brozzler.CrawlStopped if stop has been requested."""
site.refresh()
if (site.stop_requested
and site.stop_requested <= doublethink.utcnow()):
self.logger.info("stop requested for site %s", site.id)
raise brozzler.CrawlStopped
if site.job_id:
job = brozzler.Job.load(self.rr, site.job_id)
if (job and job.stop_requested
and job.stop_requested <= doublethink.utcnow()):
self.logger.info("stop requested for job %s", site.job_id)
raise brozzler.CrawlStopped |
def partition(args):
"""
%prog partition happy.txt synteny.graph
Select edges from another graph and merge it with the certain edges built
from the HAPPY mapping data.
"""
allowed_format = ("png", "ps")
p = OptionParser(partition.__doc__)
p.add_option("--prefix", help="Add prefix to the name [default: %default]")
p.add_option("--namestart", default=0, type="int",
help="Use a shorter name, starting index [default: %default]")
p.add_option("--format", default="png", choices=allowed_format,
help="Generate image of format [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
happyfile, graphfile = args
bg = BiGraph()
bg.read(graphfile, color="red")
prefix = opts.prefix
fp = open(happyfile)
for i, row in enumerate(fp):
nns = happy_nodes(row, prefix=prefix)
nodes = set(nns)
edges = happy_edges(row, prefix=prefix)
small_graph = BiGraph()
for (a, b, oa, ob), is_uncertain in edges:
color = "gray" if is_uncertain else "black"
small_graph.add_edge(a, b, oa, ob, color=color)
for (u, v), e in bg.edges.items():
# Grab edge if both vertices are on the same line
if u in nodes and v in nodes:
uv = (str(u), str(v))
if uv in small_graph.edges:
e = small_graph.edges[uv]
e.color = "blue" # supported by both evidences
else:
small_graph.add_edge(e)
print(small_graph, file=sys.stderr)
pngfile = "A{0:02d}.{1}".format(i + 1, opts.format)
telomeres = (nns[0], nns[-1])
small_graph.draw(pngfile, namestart=opts.namestart,
nodehighlight=telomeres, dpi=72)
legend = ["Edge colors:"]
legend.append("[BLUE] Experimental + Synteny")
legend.append("[BLACK] Experimental certain")
legend.append("[GRAY] Experimental uncertain")
legend.append("[RED] Synteny only")
legend.append("Rectangle nodes are telomeres.")
print("\n".join(legend), file=sys.stderr) | %prog partition happy.txt synteny.graph
Select edges from another graph and merge it with the certain edges built
from the HAPPY mapping data. | Below is the the instruction that describes the task:
### Input:
%prog partition happy.txt synteny.graph
Select edges from another graph and merge it with the certain edges built
from the HAPPY mapping data.
### Response:
def partition(args):
"""
%prog partition happy.txt synteny.graph
Select edges from another graph and merge it with the certain edges built
from the HAPPY mapping data.
"""
allowed_format = ("png", "ps")
p = OptionParser(partition.__doc__)
p.add_option("--prefix", help="Add prefix to the name [default: %default]")
p.add_option("--namestart", default=0, type="int",
help="Use a shorter name, starting index [default: %default]")
p.add_option("--format", default="png", choices=allowed_format,
help="Generate image of format [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
happyfile, graphfile = args
bg = BiGraph()
bg.read(graphfile, color="red")
prefix = opts.prefix
fp = open(happyfile)
for i, row in enumerate(fp):
nns = happy_nodes(row, prefix=prefix)
nodes = set(nns)
edges = happy_edges(row, prefix=prefix)
small_graph = BiGraph()
for (a, b, oa, ob), is_uncertain in edges:
color = "gray" if is_uncertain else "black"
small_graph.add_edge(a, b, oa, ob, color=color)
for (u, v), e in bg.edges.items():
# Grab edge if both vertices are on the same line
if u in nodes and v in nodes:
uv = (str(u), str(v))
if uv in small_graph.edges:
e = small_graph.edges[uv]
e.color = "blue" # supported by both evidences
else:
small_graph.add_edge(e)
print(small_graph, file=sys.stderr)
pngfile = "A{0:02d}.{1}".format(i + 1, opts.format)
telomeres = (nns[0], nns[-1])
small_graph.draw(pngfile, namestart=opts.namestart,
nodehighlight=telomeres, dpi=72)
legend = ["Edge colors:"]
legend.append("[BLUE] Experimental + Synteny")
legend.append("[BLACK] Experimental certain")
legend.append("[GRAY] Experimental uncertain")
legend.append("[RED] Synteny only")
legend.append("Rectangle nodes are telomeres.")
print("\n".join(legend), file=sys.stderr) |
def run(self):
""" The main program skeleton.
"""
log_total = True
try:
try:
# Preparation steps
self.get_options()
# Template method with the tool's main loop
self.mainloop()
except error.LoggableError, exc:
if self.options.debug:
raise
# Log errors caused by invalid user input
try:
msg = str(exc)
except UnicodeError:
msg = unicode(exc, "UTF-8")
self.LOG.error(msg)
sys.exit(error.EX_SOFTWARE)
except KeyboardInterrupt, exc:
if self.options.debug:
raise
sys.stderr.write("\n\nAborted by CTRL-C!\n")
sys.stderr.flush()
sys.exit(error.EX_TEMPFAIL)
except IOError, exc:
# [Errno 32] Broken pipe?
if exc.errno == errno.EPIPE:
sys.stderr.write("\n%s, exiting!\n" % exc)
sys.stderr.flush()
# Monkey patch to prevent an exception during logging shutdown
try:
handlers = logging._handlerList
except AttributeError:
pass
else:
for handler in handlers:
try:
handler.flush = lambda *_: None
except AttributeError:
pass # skip special handlers
log_total = False
sys.exit(error.EX_IOERR)
else:
raise
finally:
# Shut down
if log_total and self.options: ## No time logging on --version and such
running_time = time.time() - self.startup
self.LOG.log(self.STD_LOG_LEVEL, "Total time: %.3f seconds." % running_time)
logging.shutdown()
# Special exit code?
if self.return_code:
sys.exit(self.return_code) | The main program skeleton. | Below is the the instruction that describes the task:
### Input:
The main program skeleton.
### Response:
def run(self):
""" The main program skeleton.
"""
log_total = True
try:
try:
# Preparation steps
self.get_options()
# Template method with the tool's main loop
self.mainloop()
except error.LoggableError, exc:
if self.options.debug:
raise
# Log errors caused by invalid user input
try:
msg = str(exc)
except UnicodeError:
msg = unicode(exc, "UTF-8")
self.LOG.error(msg)
sys.exit(error.EX_SOFTWARE)
except KeyboardInterrupt, exc:
if self.options.debug:
raise
sys.stderr.write("\n\nAborted by CTRL-C!\n")
sys.stderr.flush()
sys.exit(error.EX_TEMPFAIL)
except IOError, exc:
# [Errno 32] Broken pipe?
if exc.errno == errno.EPIPE:
sys.stderr.write("\n%s, exiting!\n" % exc)
sys.stderr.flush()
# Monkey patch to prevent an exception during logging shutdown
try:
handlers = logging._handlerList
except AttributeError:
pass
else:
for handler in handlers:
try:
handler.flush = lambda *_: None
except AttributeError:
pass # skip special handlers
log_total = False
sys.exit(error.EX_IOERR)
else:
raise
finally:
# Shut down
if log_total and self.options: ## No time logging on --version and such
running_time = time.time() - self.startup
self.LOG.log(self.STD_LOG_LEVEL, "Total time: %.3f seconds." % running_time)
logging.shutdown()
# Special exit code?
if self.return_code:
sys.exit(self.return_code) |
def merge_sort(arr):
""" Merge Sort
Complexity: O(n log(n))
"""
# Our recursive base case
if len(arr) <= 1:
return arr
mid = len(arr) // 2
# Perform merge_sort recursively on both halves
left, right = merge_sort(arr[:mid]), merge_sort(arr[mid:])
# Merge each side together
return merge(left, right, arr.copy()) | Merge Sort
Complexity: O(n log(n)) | Below is the the instruction that describes the task:
### Input:
Merge Sort
Complexity: O(n log(n))
### Response:
def merge_sort(arr):
""" Merge Sort
Complexity: O(n log(n))
"""
# Our recursive base case
if len(arr) <= 1:
return arr
mid = len(arr) // 2
# Perform merge_sort recursively on both halves
left, right = merge_sort(arr[:mid]), merge_sort(arr[mid:])
# Merge each side together
return merge(left, right, arr.copy()) |
def running(self):
"""
Returns true if job still in running state
:return:
"""
r = self._client._redis
flag = '{}:flag'.format(self._queue)
if bool(r.exists(flag)):
return r.ttl(flag) is None
return False | Returns true if job still in running state
:return: | Below is the the instruction that describes the task:
### Input:
Returns true if job still in running state
:return:
### Response:
def running(self):
"""
Returns true if job still in running state
:return:
"""
r = self._client._redis
flag = '{}:flag'.format(self._queue)
if bool(r.exists(flag)):
return r.ttl(flag) is None
return False |
def get_agents(self):
"""Return list of INDRA Agents corresponding to TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents.
Returns
-------
agents : list[indra.statements.Agent]
List of INDRA Agents extracted from EKB.
"""
agents_dict = self.get_term_agents()
agents = [a for a in agents_dict.values() if a is not None]
return agents | Return list of INDRA Agents corresponding to TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents.
Returns
-------
agents : list[indra.statements.Agent]
List of INDRA Agents extracted from EKB. | Below is the the instruction that describes the task:
### Input:
Return list of INDRA Agents corresponding to TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents.
Returns
-------
agents : list[indra.statements.Agent]
List of INDRA Agents extracted from EKB.
### Response:
def get_agents(self):
"""Return list of INDRA Agents corresponding to TERMs in the EKB.
This is meant to be used when entities e.g. "phosphorylated ERK",
rather than events need to be extracted from processed natural
language. These entities with their respective states are represented
as INDRA Agents.
Returns
-------
agents : list[indra.statements.Agent]
List of INDRA Agents extracted from EKB.
"""
agents_dict = self.get_term_agents()
agents = [a for a in agents_dict.values() if a is not None]
return agents |
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction='previous')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = pd.Timestamp(dts[-1], tz='UTC')
# In the event that we've already written some minutely data to the
# ctable, guard against overwriting that data.
if num_rec_mins > 0:
last_recorded_minute = all_minutes[num_rec_mins - 1]
if last_minute_to_write <= last_recorded_minute:
raise BcolzMinuteOverlappingData(dedent("""
Data with last_date={0} already includes input start={1} for
sid={2}""".strip()).format(last_date, input_first_day, sid))
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint32)
high_col = np.zeros(minutes_count, dtype=np.uint32)
low_col = np.zeros(minutes_count, dtype=np.uint32)
close_col = np.zeros(minutes_count, dtype=np.uint32)
vol_col = np.zeros(minutes_count, dtype=np.uint32)
dt_ixs = np.searchsorted(all_minutes_in_window.values,
dts.astype('datetime64[ns]'))
ohlc_ratio = self.ohlc_ratio_for_sid(sid)
(
open_col[dt_ixs],
high_col[dt_ixs],
low_col[dt_ixs],
close_col[dt_ixs],
vol_col[dt_ixs],
) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior)
table.append([
open_col,
high_col,
low_col,
close_col,
vol_col
])
table.flush() | Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64 | Below is the the instruction that describes the task:
### Input:
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
### Response:
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction='previous')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = pd.Timestamp(dts[-1], tz='UTC')
# In the event that we've already written some minutely data to the
# ctable, guard against overwriting that data.
if num_rec_mins > 0:
last_recorded_minute = all_minutes[num_rec_mins - 1]
if last_minute_to_write <= last_recorded_minute:
raise BcolzMinuteOverlappingData(dedent("""
Data with last_date={0} already includes input start={1} for
sid={2}""".strip()).format(last_date, input_first_day, sid))
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint32)
high_col = np.zeros(minutes_count, dtype=np.uint32)
low_col = np.zeros(minutes_count, dtype=np.uint32)
close_col = np.zeros(minutes_count, dtype=np.uint32)
vol_col = np.zeros(minutes_count, dtype=np.uint32)
dt_ixs = np.searchsorted(all_minutes_in_window.values,
dts.astype('datetime64[ns]'))
ohlc_ratio = self.ohlc_ratio_for_sid(sid)
(
open_col[dt_ixs],
high_col[dt_ixs],
low_col[dt_ixs],
close_col[dt_ixs],
vol_col[dt_ixs],
) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior)
table.append([
open_col,
high_col,
low_col,
close_col,
vol_col
])
table.flush() |
def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary | Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero | Below is the the instruction that describes the task:
### Input:
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
### Response:
def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary |
def merge_into(self, other):
"""Merge two simple selectors together. This is expected to be the
selector being injected into `other` -- that is, `other` is the
selector for a block using ``@extend``, and `self` is a selector being
extended.
Element tokens must come first, and pseudo-element tokens must come
last, and there can only be one of each. The final selector thus looks
something like::
[element] [misc self tokens] [misc other tokens] [pseudo-element]
This method does not check for duplicate tokens; those are assumed to
have been removed earlier, during the search for a hinge.
"""
# TODO it shouldn't be possible to merge two elements or two pseudo
# elements, /but/ it shouldn't just be a fatal error here -- it
# shouldn't even be considered a candidate for extending!
# TODO this is slightly inconsistent with ruby, which treats a trailing
# set of self tokens like ':before.foo' as a single unit to be stuck at
# the end. but that's completely bogus anyway.
element = []
middle = []
pseudo = []
for token in self.tokens + other.tokens:
if token in CSS2_PSEUDO_ELEMENTS or token.startswith('::'):
pseudo.append(token)
elif token[0] in BODY_TOKEN_SIGILS:
middle.append(token)
else:
element.append(token)
new_tokens = element + middle + pseudo
if self.combinator == ' ' or self.combinator == other.combinator:
combinator = other.combinator
elif other.combinator == ' ':
combinator = self.combinator
else:
raise ValueError(
"Don't know how to merge conflicting combinators: "
"{0!r} and {1!r}"
.format(self, other))
return type(self)(combinator, new_tokens) | Merge two simple selectors together. This is expected to be the
selector being injected into `other` -- that is, `other` is the
selector for a block using ``@extend``, and `self` is a selector being
extended.
Element tokens must come first, and pseudo-element tokens must come
last, and there can only be one of each. The final selector thus looks
something like::
[element] [misc self tokens] [misc other tokens] [pseudo-element]
This method does not check for duplicate tokens; those are assumed to
have been removed earlier, during the search for a hinge. | Below is the the instruction that describes the task:
### Input:
Merge two simple selectors together. This is expected to be the
selector being injected into `other` -- that is, `other` is the
selector for a block using ``@extend``, and `self` is a selector being
extended.
Element tokens must come first, and pseudo-element tokens must come
last, and there can only be one of each. The final selector thus looks
something like::
[element] [misc self tokens] [misc other tokens] [pseudo-element]
This method does not check for duplicate tokens; those are assumed to
have been removed earlier, during the search for a hinge.
### Response:
def merge_into(self, other):
"""Merge two simple selectors together. This is expected to be the
selector being injected into `other` -- that is, `other` is the
selector for a block using ``@extend``, and `self` is a selector being
extended.
Element tokens must come first, and pseudo-element tokens must come
last, and there can only be one of each. The final selector thus looks
something like::
[element] [misc self tokens] [misc other tokens] [pseudo-element]
This method does not check for duplicate tokens; those are assumed to
have been removed earlier, during the search for a hinge.
"""
# TODO it shouldn't be possible to merge two elements or two pseudo
# elements, /but/ it shouldn't just be a fatal error here -- it
# shouldn't even be considered a candidate for extending!
# TODO this is slightly inconsistent with ruby, which treats a trailing
# set of self tokens like ':before.foo' as a single unit to be stuck at
# the end. but that's completely bogus anyway.
element = []
middle = []
pseudo = []
for token in self.tokens + other.tokens:
if token in CSS2_PSEUDO_ELEMENTS or token.startswith('::'):
pseudo.append(token)
elif token[0] in BODY_TOKEN_SIGILS:
middle.append(token)
else:
element.append(token)
new_tokens = element + middle + pseudo
if self.combinator == ' ' or self.combinator == other.combinator:
combinator = other.combinator
elif other.combinator == ' ':
combinator = self.combinator
else:
raise ValueError(
"Don't know how to merge conflicting combinators: "
"{0!r} and {1!r}"
.format(self, other))
return type(self)(combinator, new_tokens) |
async def get_capability_report(self):
"""
This method requests and returns a Firmata capability query report
:returns: A capability report in the form of a list
"""
if self.query_reply_data.get(
PrivateConstants.CAPABILITY_RESPONSE) is None:
await self._send_sysex(PrivateConstants.CAPABILITY_QUERY)
while self.query_reply_data.get(
PrivateConstants.CAPABILITY_RESPONSE) is None:
await asyncio.sleep(self.sleep_tune)
return self.query_reply_data.get(PrivateConstants.CAPABILITY_RESPONSE) | This method requests and returns a Firmata capability query report
:returns: A capability report in the form of a list | Below is the the instruction that describes the task:
### Input:
This method requests and returns a Firmata capability query report
:returns: A capability report in the form of a list
### Response:
async def get_capability_report(self):
"""
This method requests and returns a Firmata capability query report
:returns: A capability report in the form of a list
"""
if self.query_reply_data.get(
PrivateConstants.CAPABILITY_RESPONSE) is None:
await self._send_sysex(PrivateConstants.CAPABILITY_QUERY)
while self.query_reply_data.get(
PrivateConstants.CAPABILITY_RESPONSE) is None:
await asyncio.sleep(self.sleep_tune)
return self.query_reply_data.get(PrivateConstants.CAPABILITY_RESPONSE) |
def set_tlsext_servername_callback(self, callback):
"""
Specify a callback function to be called when clients specify a server
name.
:param callback: The callback function. It will be invoked with one
argument, the Connection instance.
.. versionadded:: 0.13
"""
@wraps(callback)
def wrapper(ssl, alert, arg):
callback(Connection._reverse_mapping[ssl])
return 0
self._tlsext_servername_callback = _ffi.callback(
"int (*)(SSL *, int *, void *)", wrapper)
_lib.SSL_CTX_set_tlsext_servername_callback(
self._context, self._tlsext_servername_callback) | Specify a callback function to be called when clients specify a server
name.
:param callback: The callback function. It will be invoked with one
argument, the Connection instance.
.. versionadded:: 0.13 | Below is the the instruction that describes the task:
### Input:
Specify a callback function to be called when clients specify a server
name.
:param callback: The callback function. It will be invoked with one
argument, the Connection instance.
.. versionadded:: 0.13
### Response:
def set_tlsext_servername_callback(self, callback):
"""
Specify a callback function to be called when clients specify a server
name.
:param callback: The callback function. It will be invoked with one
argument, the Connection instance.
.. versionadded:: 0.13
"""
@wraps(callback)
def wrapper(ssl, alert, arg):
callback(Connection._reverse_mapping[ssl])
return 0
self._tlsext_servername_callback = _ffi.callback(
"int (*)(SSL *, int *, void *)", wrapper)
_lib.SSL_CTX_set_tlsext_servername_callback(
self._context, self._tlsext_servername_callback) |
def read_string(self):
"""Reads and returns a length-delimited string."""
length = self._stream.read_var_uint32()
return self._stream.read_string(length) | Reads and returns a length-delimited string. | Below is the the instruction that describes the task:
### Input:
Reads and returns a length-delimited string.
### Response:
def read_string(self):
"""Reads and returns a length-delimited string."""
length = self._stream.read_var_uint32()
return self._stream.read_string(length) |
def update(self, _attributes=None, **attributes):
"""
Update the parent model on the relationship.
:param attributes: The update attributes
:type attributes: dict
:rtype: mixed
"""
if _attributes is not None:
attributes.update(_attributes)
instance = self.get_results()
return instance.fill(attributes).save() | Update the parent model on the relationship.
:param attributes: The update attributes
:type attributes: dict
:rtype: mixed | Below is the the instruction that describes the task:
### Input:
Update the parent model on the relationship.
:param attributes: The update attributes
:type attributes: dict
:rtype: mixed
### Response:
def update(self, _attributes=None, **attributes):
"""
Update the parent model on the relationship.
:param attributes: The update attributes
:type attributes: dict
:rtype: mixed
"""
if _attributes is not None:
attributes.update(_attributes)
instance = self.get_results()
return instance.fill(attributes).save() |
def lag(expr, offset, default=None, sort=None, ascending=True):
"""
Get value in the row ``offset`` rows prior to the current row.
:param offset: the offset value
:param default: default value for the function, when there are no rows satisfying the offset
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
"""
return _shift_op(expr, Lag, offset, default=default,
sort=sort, ascending=ascending) | Get value in the row ``offset`` rows prior to the current row.
:param offset: the offset value
:param default: default value for the function, when there are no rows satisfying the offset
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column | Below is the the instruction that describes the task:
### Input:
Get value in the row ``offset`` rows prior to the current row.
:param offset: the offset value
:param default: default value for the function, when there are no rows satisfying the offset
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
### Response:
def lag(expr, offset, default=None, sort=None, ascending=True):
"""
Get value in the row ``offset`` rows prior to the current row.
:param offset: the offset value
:param default: default value for the function, when there are no rows satisfying the offset
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
"""
return _shift_op(expr, Lag, offset, default=default,
sort=sort, ascending=ascending) |
def show_pushable(collector, **kwargs):
"""Show what images we have"""
collector.configuration['harpoon'].only_pushable = True
show(collector, **kwargs) | Show what images we have | Below is the the instruction that describes the task:
### Input:
Show what images we have
### Response:
def show_pushable(collector, **kwargs):
"""Show what images we have"""
collector.configuration['harpoon'].only_pushable = True
show(collector, **kwargs) |
def get_data_urls(deployment_name,
endpoint_type='juttle',
token_manager=None,
app_url=defaults.APP_URL):
"""
get all of the data urls for a specified endpoint_type, currently supported types
are:
* http-import: for importing data points
* juttle: for running juttle programs
"""
deployment_details = deployments.get_deployment_details(deployment_name,
token_manager=token_manager,
app_url=app_url)
# use a random juttle endpoint
data_urls = []
for endpoint in deployment_details['endpoints']:
if endpoint_type in endpoint['type']:
data_urls.append(endpoint['uri'])
if len(data_urls) == 0:
raise JutException('No data engine currently configured for '
'deployment "%s"' % deployment_name)
return data_urls | get all of the data urls for a specified endpoint_type, currently supported types
are:
* http-import: for importing data points
* juttle: for running juttle programs | Below is the the instruction that describes the task:
### Input:
get all of the data urls for a specified endpoint_type, currently supported types
are:
* http-import: for importing data points
* juttle: for running juttle programs
### Response:
def get_data_urls(deployment_name,
endpoint_type='juttle',
token_manager=None,
app_url=defaults.APP_URL):
"""
get all of the data urls for a specified endpoint_type, currently supported types
are:
* http-import: for importing data points
* juttle: for running juttle programs
"""
deployment_details = deployments.get_deployment_details(deployment_name,
token_manager=token_manager,
app_url=app_url)
# use a random juttle endpoint
data_urls = []
for endpoint in deployment_details['endpoints']:
if endpoint_type in endpoint['type']:
data_urls.append(endpoint['uri'])
if len(data_urls) == 0:
raise JutException('No data engine currently configured for '
'deployment "%s"' % deployment_name)
return data_urls |
def indices(self, data):
'''Generate patch start indices
Parameters
----------
data : dict of np.ndarray
As produced by pumpp.transform
Yields
------
start : int >= 0
The start index of a sample patch
'''
duration = self.data_duration(data)
for start in range(0, duration - self.duration, self.stride):
yield start | Generate patch start indices
Parameters
----------
data : dict of np.ndarray
As produced by pumpp.transform
Yields
------
start : int >= 0
The start index of a sample patch | Below is the the instruction that describes the task:
### Input:
Generate patch start indices
Parameters
----------
data : dict of np.ndarray
As produced by pumpp.transform
Yields
------
start : int >= 0
The start index of a sample patch
### Response:
def indices(self, data):
'''Generate patch start indices
Parameters
----------
data : dict of np.ndarray
As produced by pumpp.transform
Yields
------
start : int >= 0
The start index of a sample patch
'''
duration = self.data_duration(data)
for start in range(0, duration - self.duration, self.stride):
yield start |
def video_augmentation(features, hue=False, saturate=False, contrast=False):
"""Augments video with optional hue, saturation and constrast.
Args:
features: dict, with keys "inputs", "targets".
features["inputs"], 4-D Tensor, shape=(THWC)
features["targets"], 4-D Tensor, shape=(THWC)
hue: bool, apply hue_transform.
saturate: bool, apply saturation transform.
contrast: bool, apply constrast transform.
Returns:
augment_features: dict with transformed "inputs" and "targets".
"""
inputs, targets = features["inputs"], features["targets"]
in_steps = common_layers.shape_list(inputs)[0]
# makes sure that the same augmentation is applied to both input and targets.
# if input is 4-D, then tf.image applies the same transform across the batch.
video = tf.concat((inputs, targets), axis=0)
if hue:
video = tf.image.random_hue(video, max_delta=0.2)
if saturate:
video = tf.image.random_saturation(video, lower=0.5, upper=1.5)
if contrast:
video = tf.image.random_contrast(video, lower=0.5, upper=1.5)
features["inputs"], features["targets"] = video[:in_steps], video[in_steps:]
return features | Augments video with optional hue, saturation and constrast.
Args:
features: dict, with keys "inputs", "targets".
features["inputs"], 4-D Tensor, shape=(THWC)
features["targets"], 4-D Tensor, shape=(THWC)
hue: bool, apply hue_transform.
saturate: bool, apply saturation transform.
contrast: bool, apply constrast transform.
Returns:
augment_features: dict with transformed "inputs" and "targets". | Below is the the instruction that describes the task:
### Input:
Augments video with optional hue, saturation and constrast.
Args:
features: dict, with keys "inputs", "targets".
features["inputs"], 4-D Tensor, shape=(THWC)
features["targets"], 4-D Tensor, shape=(THWC)
hue: bool, apply hue_transform.
saturate: bool, apply saturation transform.
contrast: bool, apply constrast transform.
Returns:
augment_features: dict with transformed "inputs" and "targets".
### Response:
def video_augmentation(features, hue=False, saturate=False, contrast=False):
"""Augments video with optional hue, saturation and constrast.
Args:
features: dict, with keys "inputs", "targets".
features["inputs"], 4-D Tensor, shape=(THWC)
features["targets"], 4-D Tensor, shape=(THWC)
hue: bool, apply hue_transform.
saturate: bool, apply saturation transform.
contrast: bool, apply constrast transform.
Returns:
augment_features: dict with transformed "inputs" and "targets".
"""
inputs, targets = features["inputs"], features["targets"]
in_steps = common_layers.shape_list(inputs)[0]
# makes sure that the same augmentation is applied to both input and targets.
# if input is 4-D, then tf.image applies the same transform across the batch.
video = tf.concat((inputs, targets), axis=0)
if hue:
video = tf.image.random_hue(video, max_delta=0.2)
if saturate:
video = tf.image.random_saturation(video, lower=0.5, upper=1.5)
if contrast:
video = tf.image.random_contrast(video, lower=0.5, upper=1.5)
features["inputs"], features["targets"] = video[:in_steps], video[in_steps:]
return features |
def free_temp(self, v):
"""Release the GeneratedTempVar v so it can be reused."""
self.used_temps.remove(v)
self.free_temps.add(v) | Release the GeneratedTempVar v so it can be reused. | Below is the the instruction that describes the task:
### Input:
Release the GeneratedTempVar v so it can be reused.
### Response:
def free_temp(self, v):
"""Release the GeneratedTempVar v so it can be reused."""
self.used_temps.remove(v)
self.free_temps.add(v) |
def arg(self, arg_name, *args, **kwargs):
"""Decorator function configures any arg by given ``arg_name`` with
supplied ``args`` and ``kwargs`` passing them transparently to
:py:func:``argparse.ArgumentParser.add_argument`` function
:param arg_name: arg name to configure
:param type: str
"""
def wrapper(func):
if not getattr(func, 'argopts', None):
func.argopts = {}
func.argopts[arg_name] = (args, kwargs)
return func
return wrapper | Decorator function configures any arg by given ``arg_name`` with
supplied ``args`` and ``kwargs`` passing them transparently to
:py:func:``argparse.ArgumentParser.add_argument`` function
:param arg_name: arg name to configure
:param type: str | Below is the the instruction that describes the task:
### Input:
Decorator function configures any arg by given ``arg_name`` with
supplied ``args`` and ``kwargs`` passing them transparently to
:py:func:``argparse.ArgumentParser.add_argument`` function
:param arg_name: arg name to configure
:param type: str
### Response:
def arg(self, arg_name, *args, **kwargs):
"""Decorator function configures any arg by given ``arg_name`` with
supplied ``args`` and ``kwargs`` passing them transparently to
:py:func:``argparse.ArgumentParser.add_argument`` function
:param arg_name: arg name to configure
:param type: str
"""
def wrapper(func):
if not getattr(func, 'argopts', None):
func.argopts = {}
func.argopts[arg_name] = (args, kwargs)
return func
return wrapper |
def set_source_ip_for_interface(source_ip_address, desired_source_ip_address, device_num=0):
"""Configures the source IP address for a Linux interface
:param source_ip_address: (str) Source IP address to change
:param desired_source_ip_address: (str) IP address to configure as the source in outgoing packets
:param device_num: (int) Integer interface device number to configure
:return: None
:raises: TypeError, ValueError, OSError
"""
log = logging.getLogger(mod_logger + '.set_source_ip_for_interface')
if not isinstance(source_ip_address, basestring):
msg = 'arg source_ip_address must be a string'
log.error(msg)
raise TypeError(msg)
if not isinstance(desired_source_ip_address, basestring):
msg = 'arg desired_source_ip_address must be a string'
log.error(msg)
raise TypeError(msg)
if not validate_ip_address(ip_address=source_ip_address):
msg = 'The arg source_ip_address was found to be an invalid IP address. Please pass a valid IP address'
log.error(msg)
raise ValueError(msg)
if not validate_ip_address(ip_address=desired_source_ip_address):
msg = 'The arg desired_source_ip_address was found to be an invalid IP address. Please pass a valid IP address'
log.error(msg)
raise ValueError(msg)
# Determine the device name based on the device_num
log.debug('Attempting to determine the device name based on the device_num arg...')
try:
int(device_num)
except ValueError:
if isinstance(device_num, basestring):
device_name = device_num
log.info('Provided device_num is not an int, assuming it is the full device name: {d}'.format(
d=device_name))
else:
raise TypeError('device_num arg must be a string or int')
else:
device_name = 'eth{n}'.format(n=str(device_num))
log.info('Provided device_num is an int, assuming device name is: {d}'.format(d=device_name))
# Build the command
# iptables -t nat -I POSTROUTING -o eth0 -s ${RA_ORIGINAL_IP} -j SNAT --to-source
command = ['iptables', '-t', 'nat', '-I', 'POSTROUTING', '-o', device_name, '-s',
source_ip_address, '-j', 'SNAT', '--to-source', desired_source_ip_address]
log.info('Running command: {c}'.format(c=command))
try:
result = run_command(command, timeout_sec=20)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running iptables command: {c}\n{e}'.format(c=' '.join(command), e=str(ex))
log.error(msg)
raise OSError, msg, trace
if int(result['code']) != 0:
msg = 'The iptables command produced an error with exit code: {c}, and output:\n{o}'.format(
c=result['code'], o=result['output'])
log.error(msg)
raise OSError(msg)
log.info('Successfully configured the source IP for {d} to be: {i}'.format(
d=device_name, i=desired_source_ip_address)) | Configures the source IP address for a Linux interface
:param source_ip_address: (str) Source IP address to change
:param desired_source_ip_address: (str) IP address to configure as the source in outgoing packets
:param device_num: (int) Integer interface device number to configure
:return: None
:raises: TypeError, ValueError, OSError | Below is the the instruction that describes the task:
### Input:
Configures the source IP address for a Linux interface
:param source_ip_address: (str) Source IP address to change
:param desired_source_ip_address: (str) IP address to configure as the source in outgoing packets
:param device_num: (int) Integer interface device number to configure
:return: None
:raises: TypeError, ValueError, OSError
### Response:
def set_source_ip_for_interface(source_ip_address, desired_source_ip_address, device_num=0):
"""Configures the source IP address for a Linux interface
:param source_ip_address: (str) Source IP address to change
:param desired_source_ip_address: (str) IP address to configure as the source in outgoing packets
:param device_num: (int) Integer interface device number to configure
:return: None
:raises: TypeError, ValueError, OSError
"""
log = logging.getLogger(mod_logger + '.set_source_ip_for_interface')
if not isinstance(source_ip_address, basestring):
msg = 'arg source_ip_address must be a string'
log.error(msg)
raise TypeError(msg)
if not isinstance(desired_source_ip_address, basestring):
msg = 'arg desired_source_ip_address must be a string'
log.error(msg)
raise TypeError(msg)
if not validate_ip_address(ip_address=source_ip_address):
msg = 'The arg source_ip_address was found to be an invalid IP address. Please pass a valid IP address'
log.error(msg)
raise ValueError(msg)
if not validate_ip_address(ip_address=desired_source_ip_address):
msg = 'The arg desired_source_ip_address was found to be an invalid IP address. Please pass a valid IP address'
log.error(msg)
raise ValueError(msg)
# Determine the device name based on the device_num
log.debug('Attempting to determine the device name based on the device_num arg...')
try:
int(device_num)
except ValueError:
if isinstance(device_num, basestring):
device_name = device_num
log.info('Provided device_num is not an int, assuming it is the full device name: {d}'.format(
d=device_name))
else:
raise TypeError('device_num arg must be a string or int')
else:
device_name = 'eth{n}'.format(n=str(device_num))
log.info('Provided device_num is an int, assuming device name is: {d}'.format(d=device_name))
# Build the command
# iptables -t nat -I POSTROUTING -o eth0 -s ${RA_ORIGINAL_IP} -j SNAT --to-source
command = ['iptables', '-t', 'nat', '-I', 'POSTROUTING', '-o', device_name, '-s',
source_ip_address, '-j', 'SNAT', '--to-source', desired_source_ip_address]
log.info('Running command: {c}'.format(c=command))
try:
result = run_command(command, timeout_sec=20)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running iptables command: {c}\n{e}'.format(c=' '.join(command), e=str(ex))
log.error(msg)
raise OSError, msg, trace
if int(result['code']) != 0:
msg = 'The iptables command produced an error with exit code: {c}, and output:\n{o}'.format(
c=result['code'], o=result['output'])
log.error(msg)
raise OSError(msg)
log.info('Successfully configured the source IP for {d} to be: {i}'.format(
d=device_name, i=desired_source_ip_address)) |
def split_sequence(seq, n):
"""Generates tokens of length n from a sequence.
The last token may be of smaller length."""
tokens = []
while seq:
tokens.append(seq[:n])
seq = seq[n:]
return tokens | Generates tokens of length n from a sequence.
The last token may be of smaller length. | Below is the the instruction that describes the task:
### Input:
Generates tokens of length n from a sequence.
The last token may be of smaller length.
### Response:
def split_sequence(seq, n):
"""Generates tokens of length n from a sequence.
The last token may be of smaller length."""
tokens = []
while seq:
tokens.append(seq[:n])
seq = seq[n:]
return tokens |
def dict_cursor(func):
"""
Decorator that provides a dictionary cursor to the calling function
Adds the cursor as the second argument to the calling functions
Requires that the function being decorated is an instance of a class or object
that yields a cursor from a get_cursor(cursor_type=CursorType.DICT) coroutine or provides such an object
as the first argument in its signature
Yields:
A client-side dictionary cursor
"""
@wraps(func)
def wrapper(cls, *args, **kwargs):
with (yield from cls.get_cursor(_CursorType.DICT)) as c:
return (yield from func(cls, c, *args, **kwargs))
return wrapper | Decorator that provides a dictionary cursor to the calling function
Adds the cursor as the second argument to the calling functions
Requires that the function being decorated is an instance of a class or object
that yields a cursor from a get_cursor(cursor_type=CursorType.DICT) coroutine or provides such an object
as the first argument in its signature
Yields:
A client-side dictionary cursor | Below is the the instruction that describes the task:
### Input:
Decorator that provides a dictionary cursor to the calling function
Adds the cursor as the second argument to the calling functions
Requires that the function being decorated is an instance of a class or object
that yields a cursor from a get_cursor(cursor_type=CursorType.DICT) coroutine or provides such an object
as the first argument in its signature
Yields:
A client-side dictionary cursor
### Response:
def dict_cursor(func):
"""
Decorator that provides a dictionary cursor to the calling function
Adds the cursor as the second argument to the calling functions
Requires that the function being decorated is an instance of a class or object
that yields a cursor from a get_cursor(cursor_type=CursorType.DICT) coroutine or provides such an object
as the first argument in its signature
Yields:
A client-side dictionary cursor
"""
@wraps(func)
def wrapper(cls, *args, **kwargs):
with (yield from cls.get_cursor(_CursorType.DICT)) as c:
return (yield from func(cls, c, *args, **kwargs))
return wrapper |
def _taper2coeffs(self, itaper):
"""
Return the spherical harmonic coefficients of the unrotated taper i
as an array, where i = 0 is the best concentrated.
"""
taperm = self.orders[itaper]
coeffs = _np.zeros((2, self.lwin + 1, self.lwin + 1))
if taperm < 0:
coeffs[1, :, abs(taperm)] = self.tapers[:, itaper]
else:
coeffs[0, :, abs(taperm)] = self.tapers[:, itaper]
return coeffs | Return the spherical harmonic coefficients of the unrotated taper i
as an array, where i = 0 is the best concentrated. | Below is the the instruction that describes the task:
### Input:
Return the spherical harmonic coefficients of the unrotated taper i
as an array, where i = 0 is the best concentrated.
### Response:
def _taper2coeffs(self, itaper):
"""
Return the spherical harmonic coefficients of the unrotated taper i
as an array, where i = 0 is the best concentrated.
"""
taperm = self.orders[itaper]
coeffs = _np.zeros((2, self.lwin + 1, self.lwin + 1))
if taperm < 0:
coeffs[1, :, abs(taperm)] = self.tapers[:, itaper]
else:
coeffs[0, :, abs(taperm)] = self.tapers[:, itaper]
return coeffs |
def islice_extended(iterable, *args):
"""An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
"""
s = slice(*args)
start = s.start
stop = s.stop
if s.step == 0:
raise ValueError('step argument must be a non-zero integer or None.')
step = s.step or 1
it = iter(iterable)
if step > 0:
start = 0 if (start is None) else start
if (start < 0):
# Consume all but the last -start items
cache = deque(enumerate(it, 1), maxlen=-start)
len_iter = cache[-1][0] if cache else 0
# Adjust start to be positive
i = max(len_iter + start, 0)
# Adjust stop to be positive
if stop is None:
j = len_iter
elif stop >= 0:
j = min(stop, len_iter)
else:
j = max(len_iter + stop, 0)
# Slice the cache
n = j - i
if n <= 0:
return
for index, item in islice(cache, 0, n, step):
yield item
elif (stop is not None) and (stop < 0):
# Advance to the start position
next(islice(it, start, start), None)
# When stop is negative, we have to carry -stop items while
# iterating
cache = deque(islice(it, -stop), maxlen=-stop)
for index, item in enumerate(it):
cached_item = cache.popleft()
if index % step == 0:
yield cached_item
cache.append(item)
else:
# When both start and stop are positive we have the normal case
yield from islice(it, start, stop, step)
else:
start = -1 if (start is None) else start
if (stop is not None) and (stop < 0):
# Consume all but the last items
n = -stop - 1
cache = deque(enumerate(it, 1), maxlen=n)
len_iter = cache[-1][0] if cache else 0
# If start and stop are both negative they are comparable and
# we can just slice. Otherwise we can adjust start to be negative
# and then slice.
if start < 0:
i, j = start, stop
else:
i, j = min(start - len_iter, -1), None
for index, item in list(cache)[i:j:step]:
yield item
else:
# Advance to the stop position
if stop is not None:
m = stop + 1
next(islice(it, m, m), None)
# stop is positive, so if start is negative they are not comparable
# and we need the rest of the items.
if start < 0:
i = start
n = None
# stop is None and start is positive, so we just need items up to
# the start index.
elif stop is None:
i = None
n = start + 1
# Both stop and start are positive, so they are comparable.
else:
i = None
n = start - stop
if n <= 0:
return
cache = list(islice(it, n))
yield from cache[i::step] | An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100] | Below is the the instruction that describes the task:
### Input:
An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
### Response:
def islice_extended(iterable, *args):
"""An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
"""
s = slice(*args)
start = s.start
stop = s.stop
if s.step == 0:
raise ValueError('step argument must be a non-zero integer or None.')
step = s.step or 1
it = iter(iterable)
if step > 0:
start = 0 if (start is None) else start
if (start < 0):
# Consume all but the last -start items
cache = deque(enumerate(it, 1), maxlen=-start)
len_iter = cache[-1][0] if cache else 0
# Adjust start to be positive
i = max(len_iter + start, 0)
# Adjust stop to be positive
if stop is None:
j = len_iter
elif stop >= 0:
j = min(stop, len_iter)
else:
j = max(len_iter + stop, 0)
# Slice the cache
n = j - i
if n <= 0:
return
for index, item in islice(cache, 0, n, step):
yield item
elif (stop is not None) and (stop < 0):
# Advance to the start position
next(islice(it, start, start), None)
# When stop is negative, we have to carry -stop items while
# iterating
cache = deque(islice(it, -stop), maxlen=-stop)
for index, item in enumerate(it):
cached_item = cache.popleft()
if index % step == 0:
yield cached_item
cache.append(item)
else:
# When both start and stop are positive we have the normal case
yield from islice(it, start, stop, step)
else:
start = -1 if (start is None) else start
if (stop is not None) and (stop < 0):
# Consume all but the last items
n = -stop - 1
cache = deque(enumerate(it, 1), maxlen=n)
len_iter = cache[-1][0] if cache else 0
# If start and stop are both negative they are comparable and
# we can just slice. Otherwise we can adjust start to be negative
# and then slice.
if start < 0:
i, j = start, stop
else:
i, j = min(start - len_iter, -1), None
for index, item in list(cache)[i:j:step]:
yield item
else:
# Advance to the stop position
if stop is not None:
m = stop + 1
next(islice(it, m, m), None)
# stop is positive, so if start is negative they are not comparable
# and we need the rest of the items.
if start < 0:
i = start
n = None
# stop is None and start is positive, so we just need items up to
# the start index.
elif stop is None:
i = None
n = start + 1
# Both stop and start are positive, so they are comparable.
else:
i = None
n = start - stop
if n <= 0:
return
cache = list(islice(it, n))
yield from cache[i::step] |
def Compare(fromMo, toMo, diff):
""" Internal method to support CompareManagedObject functionality. """
from UcsBase import UcsUtils
if (fromMo.classId != toMo.classId):
return CompareStatus.TypesDifferent
for prop in UcsUtils.GetUcsPropertyMetaAttributeList(str(fromMo.classId)):
propMeta = UcsUtils.IsPropertyInMetaIgnoreCase(fromMo.classId, prop)
if propMeta != None:
if ((propMeta.access == UcsPropertyMeta.Internal) or (propMeta.access == UcsPropertyMeta.ReadOnly) or (
prop in toMo._excludePropList)):
continue
if ((toMo.__dict__.has_key(prop)) and (fromMo.getattr(prop) != toMo.getattr(prop))):
diff.append(prop)
if (len(diff) > 0):
return CompareStatus.PropsDifferent
return CompareStatus.Equal | Internal method to support CompareManagedObject functionality. | Below is the the instruction that describes the task:
### Input:
Internal method to support CompareManagedObject functionality.
### Response:
def Compare(fromMo, toMo, diff):
""" Internal method to support CompareManagedObject functionality. """
from UcsBase import UcsUtils
if (fromMo.classId != toMo.classId):
return CompareStatus.TypesDifferent
for prop in UcsUtils.GetUcsPropertyMetaAttributeList(str(fromMo.classId)):
propMeta = UcsUtils.IsPropertyInMetaIgnoreCase(fromMo.classId, prop)
if propMeta != None:
if ((propMeta.access == UcsPropertyMeta.Internal) or (propMeta.access == UcsPropertyMeta.ReadOnly) or (
prop in toMo._excludePropList)):
continue
if ((toMo.__dict__.has_key(prop)) and (fromMo.getattr(prop) != toMo.getattr(prop))):
diff.append(prop)
if (len(diff) > 0):
return CompareStatus.PropsDifferent
return CompareStatus.Equal |
def detailed_string(self):
"""
Create a long string with all transcript effects for each mutation,
grouped by gene (if a mutation affects multiple genes).
"""
lines = []
# TODO: annoying to always write `groupby_result.items()`,
# consider makings a GroupBy class which iterates over pairs
# and also common helper methods like `map_values`.
for variant, variant_effects in self.groupby_variant().items():
lines.append("\n%s" % variant)
gene_effects_groups = variant_effects.groupby_gene_id()
for (gene_id, gene_effects) in gene_effects_groups.items():
if gene_id:
gene_name = variant.ensembl.gene_name_of_gene_id(gene_id)
lines.append(" Gene: %s (%s)" % (gene_name, gene_id))
# place transcript effects with more significant impact
# on top (e.g. FrameShift should go before NoncodingTranscript)
for effect in sorted(
gene_effects,
key=effect_priority,
reverse=True):
lines.append(" -- %s" % effect)
# if we only printed one effect for this gene then
# it's redundant to print it again as the highest priority effect
if len(variant_effects) > 1:
best = variant_effects.top_priority_effect()
lines.append(" Highest Priority Effect: %s" % best)
return "\n".join(lines) | Create a long string with all transcript effects for each mutation,
grouped by gene (if a mutation affects multiple genes). | Below is the the instruction that describes the task:
### Input:
Create a long string with all transcript effects for each mutation,
grouped by gene (if a mutation affects multiple genes).
### Response:
def detailed_string(self):
"""
Create a long string with all transcript effects for each mutation,
grouped by gene (if a mutation affects multiple genes).
"""
lines = []
# TODO: annoying to always write `groupby_result.items()`,
# consider makings a GroupBy class which iterates over pairs
# and also common helper methods like `map_values`.
for variant, variant_effects in self.groupby_variant().items():
lines.append("\n%s" % variant)
gene_effects_groups = variant_effects.groupby_gene_id()
for (gene_id, gene_effects) in gene_effects_groups.items():
if gene_id:
gene_name = variant.ensembl.gene_name_of_gene_id(gene_id)
lines.append(" Gene: %s (%s)" % (gene_name, gene_id))
# place transcript effects with more significant impact
# on top (e.g. FrameShift should go before NoncodingTranscript)
for effect in sorted(
gene_effects,
key=effect_priority,
reverse=True):
lines.append(" -- %s" % effect)
# if we only printed one effect for this gene then
# it's redundant to print it again as the highest priority effect
if len(variant_effects) > 1:
best = variant_effects.top_priority_effect()
lines.append(" Highest Priority Effect: %s" % best)
return "\n".join(lines) |
def startProducing(self, consumer):
""" Start producing.
Args:
consumer: Consumer
"""
self._consumer = consumer
self._current_deferred = defer.Deferred()
self._sent = 0
self._paused = False
if not hasattr(self, "_chunk_headers"):
self._build_chunk_headers()
if self._data:
block = ""
for field in self._data:
block += self._chunk_headers[field]
block += self._data[field]
block += "\r\n"
self._send_to_consumer(block)
if self._files:
self._files_iterator = self._files.iterkeys()
self._files_sent = 0
self._files_length = len(self._files)
self._current_file_path = None
self._current_file_handle = None
self._current_file_length = None
self._current_file_sent = 0
result = self._produce()
if result:
return result
else:
return defer.succeed(None)
return self._current_deferred | Start producing.
Args:
consumer: Consumer | Below is the the instruction that describes the task:
### Input:
Start producing.
Args:
consumer: Consumer
### Response:
def startProducing(self, consumer):
""" Start producing.
Args:
consumer: Consumer
"""
self._consumer = consumer
self._current_deferred = defer.Deferred()
self._sent = 0
self._paused = False
if not hasattr(self, "_chunk_headers"):
self._build_chunk_headers()
if self._data:
block = ""
for field in self._data:
block += self._chunk_headers[field]
block += self._data[field]
block += "\r\n"
self._send_to_consumer(block)
if self._files:
self._files_iterator = self._files.iterkeys()
self._files_sent = 0
self._files_length = len(self._files)
self._current_file_path = None
self._current_file_handle = None
self._current_file_length = None
self._current_file_sent = 0
result = self._produce()
if result:
return result
else:
return defer.succeed(None)
return self._current_deferred |
def scheduledTimes(self, runnable):
"""
Return an iterable of the times at which the given item is scheduled to
run.
"""
events = self.store.query(
TimedEvent, TimedEvent.runnable == runnable)
return (event.time for event in events if not event.running) | Return an iterable of the times at which the given item is scheduled to
run. | Below is the the instruction that describes the task:
### Input:
Return an iterable of the times at which the given item is scheduled to
run.
### Response:
def scheduledTimes(self, runnable):
"""
Return an iterable of the times at which the given item is scheduled to
run.
"""
events = self.store.query(
TimedEvent, TimedEvent.runnable == runnable)
return (event.time for event in events if not event.running) |
def image(self, fname, group=None, scaled=None, dtype=None, idxexp=None,
zoom=None, gray=None):
"""Get named image.
Parameters
----------
fname : string
Filename of image
group : string or None, optional (default None)
Name of image group
scaled : bool or None, optional (default None)
Flag indicating whether images should be on the range [0,...,255]
with np.uint8 dtype (False), or on the range [0,...,1] with
np.float32 dtype (True). If the value is None, scaling behaviour
is determined by the `scaling` parameter passed to the object
initializer, otherwise that selection is overridden.
dtype : data-type or None, optional (default None)
Desired data type of images. If `scaled` is True and `dtype` is an
integer type, the output data type is np.float32. If the value is
None, the data type is determined by the `dtype` parameter passed to
the object initializer, otherwise that selection is overridden.
idxexp : index expression or None, optional (default None)
An index expression selecting, for example, a cropped region of
the requested image. This selection is applied *before* any
`zoom` rescaling so the expression does not need to be modified when
the zoom factor is changed.
zoom : float or None, optional (default None)
Optional rescaling factor to apply to the images. If the value is
None, support rescaling behaviour is determined by the `zoom`
parameter passed to the object initializer, otherwise that selection
is overridden.
gray : bool or None, optional (default None)
Flag indicating whether RGB images should be converted to grayscale.
If the value is None, behaviour is determined by the `gray`
parameter passed to the object initializer.
Returns
-------
img : ndarray
Image array
Raises
------
IOError
If the image is not accessible
"""
if scaled is None:
scaled = self.scaled
if dtype is None:
if self.dtype is None:
dtype = np.uint8
else:
dtype = self.dtype
if scaled and np.issubdtype(dtype, np.integer):
dtype = np.float32
if zoom is None:
zoom = self.zoom
if gray is None:
gray = self.gray
if group is None:
pth = os.path.join(self.bpth, fname)
else:
pth = os.path.join(self.bpth, group, fname)
try:
img = np.asarray(imageio.imread(pth), dtype=dtype)
except IOError:
raise IOError('Could not access image %s in group %s' %
(fname, group))
if scaled:
img /= 255.0
if idxexp is not None:
img = img[idxexp]
if zoom is not None:
if img.ndim == 2:
img = sni.zoom(img, zoom)
else:
img = sni.zoom(img, (zoom,)*2 + (1,)*(img.ndim-2))
if gray:
img = rgb2gray(img)
return img | Get named image.
Parameters
----------
fname : string
Filename of image
group : string or None, optional (default None)
Name of image group
scaled : bool or None, optional (default None)
Flag indicating whether images should be on the range [0,...,255]
with np.uint8 dtype (False), or on the range [0,...,1] with
np.float32 dtype (True). If the value is None, scaling behaviour
is determined by the `scaling` parameter passed to the object
initializer, otherwise that selection is overridden.
dtype : data-type or None, optional (default None)
Desired data type of images. If `scaled` is True and `dtype` is an
integer type, the output data type is np.float32. If the value is
None, the data type is determined by the `dtype` parameter passed to
the object initializer, otherwise that selection is overridden.
idxexp : index expression or None, optional (default None)
An index expression selecting, for example, a cropped region of
the requested image. This selection is applied *before* any
`zoom` rescaling so the expression does not need to be modified when
the zoom factor is changed.
zoom : float or None, optional (default None)
Optional rescaling factor to apply to the images. If the value is
None, support rescaling behaviour is determined by the `zoom`
parameter passed to the object initializer, otherwise that selection
is overridden.
gray : bool or None, optional (default None)
Flag indicating whether RGB images should be converted to grayscale.
If the value is None, behaviour is determined by the `gray`
parameter passed to the object initializer.
Returns
-------
img : ndarray
Image array
Raises
------
IOError
If the image is not accessible | Below is the the instruction that describes the task:
### Input:
Get named image.
Parameters
----------
fname : string
Filename of image
group : string or None, optional (default None)
Name of image group
scaled : bool or None, optional (default None)
Flag indicating whether images should be on the range [0,...,255]
with np.uint8 dtype (False), or on the range [0,...,1] with
np.float32 dtype (True). If the value is None, scaling behaviour
is determined by the `scaling` parameter passed to the object
initializer, otherwise that selection is overridden.
dtype : data-type or None, optional (default None)
Desired data type of images. If `scaled` is True and `dtype` is an
integer type, the output data type is np.float32. If the value is
None, the data type is determined by the `dtype` parameter passed to
the object initializer, otherwise that selection is overridden.
idxexp : index expression or None, optional (default None)
An index expression selecting, for example, a cropped region of
the requested image. This selection is applied *before* any
`zoom` rescaling so the expression does not need to be modified when
the zoom factor is changed.
zoom : float or None, optional (default None)
Optional rescaling factor to apply to the images. If the value is
None, support rescaling behaviour is determined by the `zoom`
parameter passed to the object initializer, otherwise that selection
is overridden.
gray : bool or None, optional (default None)
Flag indicating whether RGB images should be converted to grayscale.
If the value is None, behaviour is determined by the `gray`
parameter passed to the object initializer.
Returns
-------
img : ndarray
Image array
Raises
------
IOError
If the image is not accessible
### Response:
def image(self, fname, group=None, scaled=None, dtype=None, idxexp=None,
zoom=None, gray=None):
"""Get named image.
Parameters
----------
fname : string
Filename of image
group : string or None, optional (default None)
Name of image group
scaled : bool or None, optional (default None)
Flag indicating whether images should be on the range [0,...,255]
with np.uint8 dtype (False), or on the range [0,...,1] with
np.float32 dtype (True). If the value is None, scaling behaviour
is determined by the `scaling` parameter passed to the object
initializer, otherwise that selection is overridden.
dtype : data-type or None, optional (default None)
Desired data type of images. If `scaled` is True and `dtype` is an
integer type, the output data type is np.float32. If the value is
None, the data type is determined by the `dtype` parameter passed to
the object initializer, otherwise that selection is overridden.
idxexp : index expression or None, optional (default None)
An index expression selecting, for example, a cropped region of
the requested image. This selection is applied *before* any
`zoom` rescaling so the expression does not need to be modified when
the zoom factor is changed.
zoom : float or None, optional (default None)
Optional rescaling factor to apply to the images. If the value is
None, support rescaling behaviour is determined by the `zoom`
parameter passed to the object initializer, otherwise that selection
is overridden.
gray : bool or None, optional (default None)
Flag indicating whether RGB images should be converted to grayscale.
If the value is None, behaviour is determined by the `gray`
parameter passed to the object initializer.
Returns
-------
img : ndarray
Image array
Raises
------
IOError
If the image is not accessible
"""
if scaled is None:
scaled = self.scaled
if dtype is None:
if self.dtype is None:
dtype = np.uint8
else:
dtype = self.dtype
if scaled and np.issubdtype(dtype, np.integer):
dtype = np.float32
if zoom is None:
zoom = self.zoom
if gray is None:
gray = self.gray
if group is None:
pth = os.path.join(self.bpth, fname)
else:
pth = os.path.join(self.bpth, group, fname)
try:
img = np.asarray(imageio.imread(pth), dtype=dtype)
except IOError:
raise IOError('Could not access image %s in group %s' %
(fname, group))
if scaled:
img /= 255.0
if idxexp is not None:
img = img[idxexp]
if zoom is not None:
if img.ndim == 2:
img = sni.zoom(img, zoom)
else:
img = sni.zoom(img, (zoom,)*2 + (1,)*(img.ndim-2))
if gray:
img = rgb2gray(img)
return img |
def as_ul(self, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as ul
"""
return self.__do_menu("as_ul", show_leaf, current_linkable, class_current) | It returns breadcrumb as ul | Below is the the instruction that describes the task:
### Input:
It returns breadcrumb as ul
### Response:
def as_ul(self, show_leaf=True, current_linkable=False, class_current="active_link"):
"""
It returns breadcrumb as ul
"""
return self.__do_menu("as_ul", show_leaf, current_linkable, class_current) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.