code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def find_matches(self, content, file_to_handle):
"""Find all matches of an expression in a file
"""
# look for all match groups in the content
groups = [match.groupdict() for match in
self.match_expression.finditer(content)]
# filter out content not in the matchgroup
matches = [group['matchgroup'] for group in groups
if group.get('matchgroup')]
logger.info('Found %s matches in %s', len(matches), file_to_handle)
# We only need the unique strings found as we'll be replacing each
# of them. No need to replace the ones already replaced.
return list(set(matches)) | Find all matches of an expression in a file | Below is the the instruction that describes the task:
### Input:
Find all matches of an expression in a file
### Response:
def find_matches(self, content, file_to_handle):
"""Find all matches of an expression in a file
"""
# look for all match groups in the content
groups = [match.groupdict() for match in
self.match_expression.finditer(content)]
# filter out content not in the matchgroup
matches = [group['matchgroup'] for group in groups
if group.get('matchgroup')]
logger.info('Found %s matches in %s', len(matches), file_to_handle)
# We only need the unique strings found as we'll be replacing each
# of them. No need to replace the ones already replaced.
return list(set(matches)) |
def xhdr(self, header, msgid_range=None):
"""XHDR command.
"""
args = header
if range is not None:
args += " " + utils.unparse_msgid_range(msgid_range)
code, message = self.command("XHDR", args)
if code != 221:
raise NNTPReplyError(code, message)
return self.info(code, message) | XHDR command. | Below is the the instruction that describes the task:
### Input:
XHDR command.
### Response:
def xhdr(self, header, msgid_range=None):
"""XHDR command.
"""
args = header
if range is not None:
args += " " + utils.unparse_msgid_range(msgid_range)
code, message = self.command("XHDR", args)
if code != 221:
raise NNTPReplyError(code, message)
return self.info(code, message) |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an Opera typed history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
data = file_object.read(self._HEADER_READ_SIZE)
if not data.startswith(b'<?xml'):
raise errors.UnableToParseFile(
'Not an Opera typed history file [not a XML]')
_, _, data = data.partition(b'\n')
if not data.startswith(b'<typed_history'):
raise errors.UnableToParseFile(
'Not an Opera typed history file [wrong XML root key]')
# For ElementTree to work we need to work on a file object seeked
# to the beginning.
file_object.seek(0, os.SEEK_SET)
xml = ElementTree.parse(file_object)
for history_item in xml.iterfind('typed_history_item'):
event_data = OperaTypedHistoryEventData()
event_data.entry_type = history_item.get('type', None)
event_data.url = history_item.get('content', None)
if event_data.entry_type == 'selected':
event_data.entry_selection = 'Filled from autocomplete.'
elif event_data.entry_type == 'text':
event_data.entry_selection = 'Manually typed.'
last_typed_time = history_item.get('last_typed', None)
if last_typed_time is None:
parser_mediator.ProduceExtractionWarning('missing last typed time.')
continue
date_time = dfdatetime_time_elements.TimeElements()
try:
date_time.CopyFromStringISO8601(last_typed_time)
except ValueError as exception:
parser_mediator.ProduceExtractionWarning(
'unsupported last typed time: {0:s} with error: {1!s}.'.format(
last_typed_time, exception))
continue
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses an Opera typed history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. | Below is the the instruction that describes the task:
### Input:
Parses an Opera typed history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
### Response:
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an Opera typed history file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
data = file_object.read(self._HEADER_READ_SIZE)
if not data.startswith(b'<?xml'):
raise errors.UnableToParseFile(
'Not an Opera typed history file [not a XML]')
_, _, data = data.partition(b'\n')
if not data.startswith(b'<typed_history'):
raise errors.UnableToParseFile(
'Not an Opera typed history file [wrong XML root key]')
# For ElementTree to work we need to work on a file object seeked
# to the beginning.
file_object.seek(0, os.SEEK_SET)
xml = ElementTree.parse(file_object)
for history_item in xml.iterfind('typed_history_item'):
event_data = OperaTypedHistoryEventData()
event_data.entry_type = history_item.get('type', None)
event_data.url = history_item.get('content', None)
if event_data.entry_type == 'selected':
event_data.entry_selection = 'Filled from autocomplete.'
elif event_data.entry_type == 'text':
event_data.entry_selection = 'Manually typed.'
last_typed_time = history_item.get('last_typed', None)
if last_typed_time is None:
parser_mediator.ProduceExtractionWarning('missing last typed time.')
continue
date_time = dfdatetime_time_elements.TimeElements()
try:
date_time.CopyFromStringISO8601(last_typed_time)
except ValueError as exception:
parser_mediator.ProduceExtractionWarning(
'unsupported last typed time: {0:s} with error: {1!s}.'.format(
last_typed_time, exception))
continue
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value) | Inserts the key, value pair before the item with the given index. | Below is the the instruction that describes the task:
### Input:
Inserts the key, value pair before the item with the given index.
### Response:
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value) |
async def playback(dev: Device, cmd, target, value):
"""Get and set playback settings, e.g. repeat and shuffle.."""
if target and value:
dev.set_playback_settings(target, value)
if cmd == "support":
click.echo("Supported playback functions:")
supported = await dev.get_supported_playback_functions("storage:usb1")
for i in supported:
print(i)
elif cmd == "settings":
print_settings(await dev.get_playback_settings())
# click.echo("Playback functions:")
# funcs = await dev.get_available_playback_functions()
# print(funcs)
else:
click.echo("Currently playing: %s" % await dev.get_play_info()) | Get and set playback settings, e.g. repeat and shuffle.. | Below is the the instruction that describes the task:
### Input:
Get and set playback settings, e.g. repeat and shuffle..
### Response:
async def playback(dev: Device, cmd, target, value):
"""Get and set playback settings, e.g. repeat and shuffle.."""
if target and value:
dev.set_playback_settings(target, value)
if cmd == "support":
click.echo("Supported playback functions:")
supported = await dev.get_supported_playback_functions("storage:usb1")
for i in supported:
print(i)
elif cmd == "settings":
print_settings(await dev.get_playback_settings())
# click.echo("Playback functions:")
# funcs = await dev.get_available_playback_functions()
# print(funcs)
else:
click.echo("Currently playing: %s" % await dev.get_play_info()) |
def fn_std(self, a, axis=None):
"""
Compute the standard deviation of an array, ignoring NaNs.
:param a: The array.
:return: The standard deviation of the array.
"""
return numpy.nanstd(self._to_ndarray(a), axis=axis) | Compute the standard deviation of an array, ignoring NaNs.
:param a: The array.
:return: The standard deviation of the array. | Below is the the instruction that describes the task:
### Input:
Compute the standard deviation of an array, ignoring NaNs.
:param a: The array.
:return: The standard deviation of the array.
### Response:
def fn_std(self, a, axis=None):
"""
Compute the standard deviation of an array, ignoring NaNs.
:param a: The array.
:return: The standard deviation of the array.
"""
return numpy.nanstd(self._to_ndarray(a), axis=axis) |
def fieldname_to_dtype(fieldname):
"""Converts a column header from the MPT file into a tuple of
canonical name and appropriate numpy dtype"""
if fieldname == 'mode':
return ('mode', np.uint8)
elif fieldname in ("ox/red", "error", "control changes", "Ns changes",
"counter inc."):
return (fieldname, np.bool_)
elif fieldname in ("time/s", "P/W", "(Q-Qo)/mA.h", "x", "control/V",
"control/V/mA", "(Q-Qo)/C", "dQ/C", "freq/Hz",
"|Ewe|/V", "|I|/A", "Phase(Z)/deg", "|Z|/Ohm",
"Re(Z)/Ohm", "-Im(Z)/Ohm"):
return (fieldname, np.float_)
elif fieldname in ("cycle number", "I Range", "Ns", "half cycle"):
return (fieldname, np.int_)
elif fieldname in ("dq/mA.h", "dQ/mA.h"):
return ("dQ/mA.h", np.float_)
elif fieldname in ("I/mA", "<I>/mA"):
return ("I/mA", np.float_)
elif fieldname in ("Ewe/V", "<Ewe>/V"):
return ("Ewe/V", np.float_)
else:
raise ValueError("Invalid column header: %s" % fieldname) | Converts a column header from the MPT file into a tuple of
canonical name and appropriate numpy dtype | Below is the the instruction that describes the task:
### Input:
Converts a column header from the MPT file into a tuple of
canonical name and appropriate numpy dtype
### Response:
def fieldname_to_dtype(fieldname):
"""Converts a column header from the MPT file into a tuple of
canonical name and appropriate numpy dtype"""
if fieldname == 'mode':
return ('mode', np.uint8)
elif fieldname in ("ox/red", "error", "control changes", "Ns changes",
"counter inc."):
return (fieldname, np.bool_)
elif fieldname in ("time/s", "P/W", "(Q-Qo)/mA.h", "x", "control/V",
"control/V/mA", "(Q-Qo)/C", "dQ/C", "freq/Hz",
"|Ewe|/V", "|I|/A", "Phase(Z)/deg", "|Z|/Ohm",
"Re(Z)/Ohm", "-Im(Z)/Ohm"):
return (fieldname, np.float_)
elif fieldname in ("cycle number", "I Range", "Ns", "half cycle"):
return (fieldname, np.int_)
elif fieldname in ("dq/mA.h", "dQ/mA.h"):
return ("dQ/mA.h", np.float_)
elif fieldname in ("I/mA", "<I>/mA"):
return ("I/mA", np.float_)
elif fieldname in ("Ewe/V", "<Ewe>/V"):
return ("Ewe/V", np.float_)
else:
raise ValueError("Invalid column header: %s" % fieldname) |
def multibox_layer(from_layers, num_classes, sizes=[.2, .95],
ratios=[1], normalization=-1, num_channels=[],
clip=False, interm_layer=0, steps=[]):
"""
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
"""
assert len(from_layers) > 0, "from_layers must not be empty list"
assert num_classes > 0, \
"num_classes {} must be larger than 0".format(num_classes)
assert len(ratios) > 0, "aspect ratios must not be empty list"
if not isinstance(ratios[0], list):
# provided only one ratio list, broadcast to all from_layers
ratios = [ratios] * len(from_layers)
assert len(ratios) == len(from_layers), \
"ratios and from_layers must have same length"
assert len(sizes) > 0, "sizes must not be empty list"
if len(sizes) == 2 and not isinstance(sizes[0], list):
# provided size range, we need to compute the sizes for each layer
assert sizes[0] > 0 and sizes[0] < 1
assert sizes[1] > 0 and sizes[1] < 1 and sizes[1] > sizes[0]
tmp = np.linspace(sizes[0], sizes[1], num=(len(from_layers)-1))
# Ref for start_offset value:
# https://arxiv.org/abs/1512.02325
start_offset = 0.1
min_sizes = [start_offset] + tmp.tolist()
max_sizes = tmp.tolist() + [tmp[-1]+start_offset]
sizes = zip(min_sizes, max_sizes)
assert len(sizes) == len(from_layers), \
"sizes and from_layers must have same length"
if not isinstance(normalization, list):
normalization = [normalization] * len(from_layers)
assert len(normalization) == len(from_layers)
assert sum(x > 0 for x in normalization) <= len(num_channels), \
"must provide number of channels for each normalized layer"
if steps:
assert len(steps) == len(from_layers), "provide steps for all layers or leave empty"
loc_pred_layers = []
cls_pred_layers = []
anchor_layers = []
num_classes += 1 # always use background as label 0
for k, from_layer in enumerate(from_layers):
from_name = from_layer.name
# normalize
if normalization[k] > 0:
from_layer = mx.symbol.L2Normalization(data=from_layer, \
mode="channel", name="{}_norm".format(from_name))
scale = mx.symbol.Variable(name="{}_scale".format(from_name),
shape=(1, num_channels.pop(0), 1, 1),
init=mx.init.Constant(normalization[k]),
attr={'__wd_mult__': '0.1'})
from_layer = mx.symbol.broadcast_mul(lhs=scale, rhs=from_layer)
if interm_layer > 0:
from_layer = mx.symbol.Convolution(data=from_layer, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=interm_layer, \
name="{}_inter_conv".format(from_name))
from_layer = mx.symbol.Activation(data=from_layer, act_type="relu", \
name="{}_inter_relu".format(from_name))
# estimate number of anchors per location
# here I follow the original version in caffe
# TODO: better way to shape the anchors??
size = sizes[k]
assert len(size) > 0, "must provide at least one size"
size_str = "(" + ",".join([str(x) for x in size]) + ")"
ratio = ratios[k]
assert len(ratio) > 0, "must provide at least one ratio"
ratio_str = "(" + ",".join([str(x) for x in ratio]) + ")"
num_anchors = len(size) -1 + len(ratio)
# create location prediction layer
num_loc_pred = num_anchors * 4
bias = mx.symbol.Variable(name="{}_loc_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
loc_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_loc_pred, \
name="{}_loc_pred_conv".format(from_name))
loc_pred = mx.symbol.transpose(loc_pred, axes=(0,2,3,1))
loc_pred = mx.symbol.Flatten(data=loc_pred)
loc_pred_layers.append(loc_pred)
# create class prediction layer
num_cls_pred = num_anchors * num_classes
bias = mx.symbol.Variable(name="{}_cls_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
cls_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_cls_pred, \
name="{}_cls_pred_conv".format(from_name))
cls_pred = mx.symbol.transpose(cls_pred, axes=(0,2,3,1))
cls_pred = mx.symbol.Flatten(data=cls_pred)
cls_pred_layers.append(cls_pred)
# create anchor generation layer
if steps:
step = (steps[k], steps[k])
else:
step = '(-1.0, -1.0)'
anchors = mx.symbol.contrib.MultiBoxPrior(from_layer, sizes=size_str, ratios=ratio_str,
clip=clip, name="{}_anchors".format(from_name),
steps=step)
anchors = mx.symbol.Flatten(data=anchors)
anchor_layers.append(anchors)
loc_preds = mx.symbol.Concat(*loc_pred_layers, num_args=len(loc_pred_layers), \
dim=1, name="multibox_loc_pred")
cls_preds = mx.symbol.Concat(*cls_pred_layers, num_args=len(cls_pred_layers), \
dim=1)
cls_preds = mx.symbol.Reshape(data=cls_preds, shape=(0, -1, num_classes))
cls_preds = mx.symbol.transpose(cls_preds, axes=(0, 2, 1), name="multibox_cls_pred")
anchor_boxes = mx.symbol.Concat(*anchor_layers, \
num_args=len(anchor_layers), dim=1)
anchor_boxes = mx.symbol.Reshape(data=anchor_boxes, shape=(0, -1, 4), name="multibox_anchors")
return [loc_preds, cls_preds, anchor_boxes] | the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes | Below is the the instruction that describes the task:
### Input:
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
### Response:
def multibox_layer(from_layers, num_classes, sizes=[.2, .95],
ratios=[1], normalization=-1, num_channels=[],
clip=False, interm_layer=0, steps=[]):
"""
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
"""
assert len(from_layers) > 0, "from_layers must not be empty list"
assert num_classes > 0, \
"num_classes {} must be larger than 0".format(num_classes)
assert len(ratios) > 0, "aspect ratios must not be empty list"
if not isinstance(ratios[0], list):
# provided only one ratio list, broadcast to all from_layers
ratios = [ratios] * len(from_layers)
assert len(ratios) == len(from_layers), \
"ratios and from_layers must have same length"
assert len(sizes) > 0, "sizes must not be empty list"
if len(sizes) == 2 and not isinstance(sizes[0], list):
# provided size range, we need to compute the sizes for each layer
assert sizes[0] > 0 and sizes[0] < 1
assert sizes[1] > 0 and sizes[1] < 1 and sizes[1] > sizes[0]
tmp = np.linspace(sizes[0], sizes[1], num=(len(from_layers)-1))
# Ref for start_offset value:
# https://arxiv.org/abs/1512.02325
start_offset = 0.1
min_sizes = [start_offset] + tmp.tolist()
max_sizes = tmp.tolist() + [tmp[-1]+start_offset]
sizes = zip(min_sizes, max_sizes)
assert len(sizes) == len(from_layers), \
"sizes and from_layers must have same length"
if not isinstance(normalization, list):
normalization = [normalization] * len(from_layers)
assert len(normalization) == len(from_layers)
assert sum(x > 0 for x in normalization) <= len(num_channels), \
"must provide number of channels for each normalized layer"
if steps:
assert len(steps) == len(from_layers), "provide steps for all layers or leave empty"
loc_pred_layers = []
cls_pred_layers = []
anchor_layers = []
num_classes += 1 # always use background as label 0
for k, from_layer in enumerate(from_layers):
from_name = from_layer.name
# normalize
if normalization[k] > 0:
from_layer = mx.symbol.L2Normalization(data=from_layer, \
mode="channel", name="{}_norm".format(from_name))
scale = mx.symbol.Variable(name="{}_scale".format(from_name),
shape=(1, num_channels.pop(0), 1, 1),
init=mx.init.Constant(normalization[k]),
attr={'__wd_mult__': '0.1'})
from_layer = mx.symbol.broadcast_mul(lhs=scale, rhs=from_layer)
if interm_layer > 0:
from_layer = mx.symbol.Convolution(data=from_layer, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=interm_layer, \
name="{}_inter_conv".format(from_name))
from_layer = mx.symbol.Activation(data=from_layer, act_type="relu", \
name="{}_inter_relu".format(from_name))
# estimate number of anchors per location
# here I follow the original version in caffe
# TODO: better way to shape the anchors??
size = sizes[k]
assert len(size) > 0, "must provide at least one size"
size_str = "(" + ",".join([str(x) for x in size]) + ")"
ratio = ratios[k]
assert len(ratio) > 0, "must provide at least one ratio"
ratio_str = "(" + ",".join([str(x) for x in ratio]) + ")"
num_anchors = len(size) -1 + len(ratio)
# create location prediction layer
num_loc_pred = num_anchors * 4
bias = mx.symbol.Variable(name="{}_loc_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
loc_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_loc_pred, \
name="{}_loc_pred_conv".format(from_name))
loc_pred = mx.symbol.transpose(loc_pred, axes=(0,2,3,1))
loc_pred = mx.symbol.Flatten(data=loc_pred)
loc_pred_layers.append(loc_pred)
# create class prediction layer
num_cls_pred = num_anchors * num_classes
bias = mx.symbol.Variable(name="{}_cls_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
cls_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_cls_pred, \
name="{}_cls_pred_conv".format(from_name))
cls_pred = mx.symbol.transpose(cls_pred, axes=(0,2,3,1))
cls_pred = mx.symbol.Flatten(data=cls_pred)
cls_pred_layers.append(cls_pred)
# create anchor generation layer
if steps:
step = (steps[k], steps[k])
else:
step = '(-1.0, -1.0)'
anchors = mx.symbol.contrib.MultiBoxPrior(from_layer, sizes=size_str, ratios=ratio_str,
clip=clip, name="{}_anchors".format(from_name),
steps=step)
anchors = mx.symbol.Flatten(data=anchors)
anchor_layers.append(anchors)
loc_preds = mx.symbol.Concat(*loc_pred_layers, num_args=len(loc_pred_layers), \
dim=1, name="multibox_loc_pred")
cls_preds = mx.symbol.Concat(*cls_pred_layers, num_args=len(cls_pred_layers), \
dim=1)
cls_preds = mx.symbol.Reshape(data=cls_preds, shape=(0, -1, num_classes))
cls_preds = mx.symbol.transpose(cls_preds, axes=(0, 2, 1), name="multibox_cls_pred")
anchor_boxes = mx.symbol.Concat(*anchor_layers, \
num_args=len(anchor_layers), dim=1)
anchor_boxes = mx.symbol.Reshape(data=anchor_boxes, shape=(0, -1, 4), name="multibox_anchors")
return [loc_preds, cls_preds, anchor_boxes] |
def rem2ics():
"""Command line tool to convert from Remind to iCalendar"""
# pylint: disable=maybe-no-member
from argparse import ArgumentParser, FileType
from dateutil.parser import parse
from sys import stdin, stdout
parser = ArgumentParser(description='Converter from Remind to iCalendar syntax.')
parser.add_argument('-s', '--startdate', type=lambda s: parse(s).date(),
default=date.today() - timedelta(weeks=12),
help='Start offset for remind call (default: -12 weeks)')
parser.add_argument('-m', '--month', type=int, default=15,
help='Number of month to generate calendar beginning wit startdate (default: 15)')
parser.add_argument('-a', '--alarm', type=int, default=-10,
help='Trigger time for the alarm before the event in minutes (default: -10)')
parser.add_argument('-z', '--zone',
help='Timezone of Remind file (default: local timezone)')
parser.add_argument('infile', nargs='?', default=expanduser('~/.reminders'),
help='The Remind file to process (default: ~/.reminders)')
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output iCalendar file (default: stdout)')
args = parser.parse_args()
zone = timezone(args.zone) if args.zone else None
if args.infile == '-':
remind = Remind(args.infile, zone, args.startdate, args.month, timedelta(minutes=args.alarm))
vobject = remind.stdin_to_vobject(stdin.read())
if vobject:
args.outfile.write(vobject.serialize())
else:
remind = Remind(args.infile, zone, args.startdate, args.month, timedelta(minutes=args.alarm))
args.outfile.write(remind.to_vobject().serialize()) | Command line tool to convert from Remind to iCalendar | Below is the the instruction that describes the task:
### Input:
Command line tool to convert from Remind to iCalendar
### Response:
def rem2ics():
"""Command line tool to convert from Remind to iCalendar"""
# pylint: disable=maybe-no-member
from argparse import ArgumentParser, FileType
from dateutil.parser import parse
from sys import stdin, stdout
parser = ArgumentParser(description='Converter from Remind to iCalendar syntax.')
parser.add_argument('-s', '--startdate', type=lambda s: parse(s).date(),
default=date.today() - timedelta(weeks=12),
help='Start offset for remind call (default: -12 weeks)')
parser.add_argument('-m', '--month', type=int, default=15,
help='Number of month to generate calendar beginning wit startdate (default: 15)')
parser.add_argument('-a', '--alarm', type=int, default=-10,
help='Trigger time for the alarm before the event in minutes (default: -10)')
parser.add_argument('-z', '--zone',
help='Timezone of Remind file (default: local timezone)')
parser.add_argument('infile', nargs='?', default=expanduser('~/.reminders'),
help='The Remind file to process (default: ~/.reminders)')
parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout,
help='Output iCalendar file (default: stdout)')
args = parser.parse_args()
zone = timezone(args.zone) if args.zone else None
if args.infile == '-':
remind = Remind(args.infile, zone, args.startdate, args.month, timedelta(minutes=args.alarm))
vobject = remind.stdin_to_vobject(stdin.read())
if vobject:
args.outfile.write(vobject.serialize())
else:
remind = Remind(args.infile, zone, args.startdate, args.month, timedelta(minutes=args.alarm))
args.outfile.write(remind.to_vobject().serialize()) |
def _form_datetimes(days, msecs):
"""Calculate seconds since EPOCH from days and milliseconds for each of IASI scan."""
all_datetimes = []
for i in range(days.size):
day = int(days[i])
msec = msecs[i]
scanline_datetimes = []
for j in range(int(VALUES_PER_SCAN_LINE / 4)):
usec = 1000 * (j * VIEW_TIME_ADJUSTMENT + msec)
delta = (dt.timedelta(days=day, microseconds=usec))
for k in range(4):
scanline_datetimes.append(delta.total_seconds())
all_datetimes.append(scanline_datetimes)
return np.array(all_datetimes, dtype=np.float64) | Calculate seconds since EPOCH from days and milliseconds for each of IASI scan. | Below is the the instruction that describes the task:
### Input:
Calculate seconds since EPOCH from days and milliseconds for each of IASI scan.
### Response:
def _form_datetimes(days, msecs):
"""Calculate seconds since EPOCH from days and milliseconds for each of IASI scan."""
all_datetimes = []
for i in range(days.size):
day = int(days[i])
msec = msecs[i]
scanline_datetimes = []
for j in range(int(VALUES_PER_SCAN_LINE / 4)):
usec = 1000 * (j * VIEW_TIME_ADJUSTMENT + msec)
delta = (dt.timedelta(days=day, microseconds=usec))
for k in range(4):
scanline_datetimes.append(delta.total_seconds())
all_datetimes.append(scanline_datetimes)
return np.array(all_datetimes, dtype=np.float64) |
def getpcmd(pid):
"""
Returns command of process.
:param pid:
"""
if os.name == "nt":
# Use wmic command instead of ps on Windows.
cmd = 'wmic path win32_process where ProcessID=%s get Commandline 2> nul' % (pid, )
with os.popen(cmd, 'r') as p:
lines = [line for line in p.readlines() if line.strip("\r\n ") != ""]
if lines:
_, val = lines
return val
elif sys.platform == "darwin":
# Use pgrep instead of /proc on macOS.
pidfile = ".%d.pid" % (pid, )
with open(pidfile, 'w') as f:
f.write(str(pid))
try:
p = Popen(['pgrep', '-lf', '-F', pidfile], stdout=PIPE)
stdout, _ = p.communicate()
line = stdout.decode('utf8').strip()
if line:
_, scmd = line.split(' ', 1)
return scmd
finally:
os.unlink(pidfile)
else:
# Use the /proc filesystem
# At least on android there have been some issues with not all
# process infos being readable. In these cases using the `ps` command
# worked. See the pull request at
# https://github.com/spotify/luigi/pull/1876
try:
with open('/proc/{0}/cmdline'.format(pid), 'r') as fh:
if six.PY3:
return fh.read().replace('\0', ' ').rstrip()
else:
return fh.read().replace('\0', ' ').decode('utf8').rstrip()
except IOError:
# the system may not allow reading the command line
# of a process owned by another user
pass
# Fallback instead of None, for e.g. Cygwin where -o is an "unknown option" for the ps command:
return '[PROCESS_WITH_PID={}]'.format(pid) | Returns command of process.
:param pid: | Below is the the instruction that describes the task:
### Input:
Returns command of process.
:param pid:
### Response:
def getpcmd(pid):
"""
Returns command of process.
:param pid:
"""
if os.name == "nt":
# Use wmic command instead of ps on Windows.
cmd = 'wmic path win32_process where ProcessID=%s get Commandline 2> nul' % (pid, )
with os.popen(cmd, 'r') as p:
lines = [line for line in p.readlines() if line.strip("\r\n ") != ""]
if lines:
_, val = lines
return val
elif sys.platform == "darwin":
# Use pgrep instead of /proc on macOS.
pidfile = ".%d.pid" % (pid, )
with open(pidfile, 'w') as f:
f.write(str(pid))
try:
p = Popen(['pgrep', '-lf', '-F', pidfile], stdout=PIPE)
stdout, _ = p.communicate()
line = stdout.decode('utf8').strip()
if line:
_, scmd = line.split(' ', 1)
return scmd
finally:
os.unlink(pidfile)
else:
# Use the /proc filesystem
# At least on android there have been some issues with not all
# process infos being readable. In these cases using the `ps` command
# worked. See the pull request at
# https://github.com/spotify/luigi/pull/1876
try:
with open('/proc/{0}/cmdline'.format(pid), 'r') as fh:
if six.PY3:
return fh.read().replace('\0', ' ').rstrip()
else:
return fh.read().replace('\0', ' ').decode('utf8').rstrip()
except IOError:
# the system may not allow reading the command line
# of a process owned by another user
pass
# Fallback instead of None, for e.g. Cygwin where -o is an "unknown option" for the ps command:
return '[PROCESS_WITH_PID={}]'.format(pid) |
def get_addon_module_name(addonxml_filename):
'''Attempts to extract a module name for the given addon's addon.xml file.
Looks for the 'xbmc.python.pluginsource' extension node and returns the
addon's filename without the .py suffix.
'''
try:
xml = ET.parse(addonxml_filename).getroot()
except IOError:
sys.exit('Cannot find an addon.xml file in the current working '
'directory. Please run this command from the root directory '
'of an addon.')
try:
plugin_source = (ext for ext in xml.findall('extension') if
ext.get('point') == 'xbmc.python.pluginsource').next()
except StopIteration:
sys.exit('ERROR, no pluginsource in addonxml')
return plugin_source.get('library').split('.')[0] | Attempts to extract a module name for the given addon's addon.xml file.
Looks for the 'xbmc.python.pluginsource' extension node and returns the
addon's filename without the .py suffix. | Below is the the instruction that describes the task:
### Input:
Attempts to extract a module name for the given addon's addon.xml file.
Looks for the 'xbmc.python.pluginsource' extension node and returns the
addon's filename without the .py suffix.
### Response:
def get_addon_module_name(addonxml_filename):
'''Attempts to extract a module name for the given addon's addon.xml file.
Looks for the 'xbmc.python.pluginsource' extension node and returns the
addon's filename without the .py suffix.
'''
try:
xml = ET.parse(addonxml_filename).getroot()
except IOError:
sys.exit('Cannot find an addon.xml file in the current working '
'directory. Please run this command from the root directory '
'of an addon.')
try:
plugin_source = (ext for ext in xml.findall('extension') if
ext.get('point') == 'xbmc.python.pluginsource').next()
except StopIteration:
sys.exit('ERROR, no pluginsource in addonxml')
return plugin_source.get('library').split('.')[0] |
def extract_native_client_tarball(dir):
r'''
Download a native_client.tar.xz file from TaskCluster and extract it to dir.
'''
assert_valid_dir(dir)
target_tarball = os.path.join(dir, 'native_client.tar.xz')
if os.path.isfile(target_tarball) and os.stat(target_tarball).st_size == 0:
return
subprocess.check_call(['pixz', '-d', 'native_client.tar.xz'], cwd=dir)
subprocess.check_call(['tar', 'xf', 'native_client.tar'], cwd=dir)
os.unlink(os.path.join(dir, 'native_client.tar'))
open(target_tarball, 'w').close() | r'''
Download a native_client.tar.xz file from TaskCluster and extract it to dir. | Below is the the instruction that describes the task:
### Input:
r'''
Download a native_client.tar.xz file from TaskCluster and extract it to dir.
### Response:
def extract_native_client_tarball(dir):
r'''
Download a native_client.tar.xz file from TaskCluster and extract it to dir.
'''
assert_valid_dir(dir)
target_tarball = os.path.join(dir, 'native_client.tar.xz')
if os.path.isfile(target_tarball) and os.stat(target_tarball).st_size == 0:
return
subprocess.check_call(['pixz', '-d', 'native_client.tar.xz'], cwd=dir)
subprocess.check_call(['tar', 'xf', 'native_client.tar'], cwd=dir)
os.unlink(os.path.join(dir, 'native_client.tar'))
open(target_tarball, 'w').close() |
def convert_elementwise_add(
params, w_name, scope_name, inputs, layers, weights, names
):
"""
Convert elementwise addition.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting elementwise_add ...')
if 'broadcast' in params:
model0 = layers[inputs[0]]
model1 = layers[inputs[1]]
if names == 'short':
tf_name = 'A' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
def target_layer(x):
layer = tf.add(x[0], x[1])
return layer
lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)
layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])
else:
model0 = layers[inputs[0]]
model1 = layers[inputs[1]]
if names == 'short':
tf_name = 'A' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
add = keras.layers.Add(name=tf_name)
layers[scope_name] = add([model0, model1]) | Convert elementwise addition.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers | Below is the the instruction that describes the task:
### Input:
Convert elementwise addition.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
### Response:
def convert_elementwise_add(
params, w_name, scope_name, inputs, layers, weights, names
):
"""
Convert elementwise addition.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting elementwise_add ...')
if 'broadcast' in params:
model0 = layers[inputs[0]]
model1 = layers[inputs[1]]
if names == 'short':
tf_name = 'A' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
def target_layer(x):
layer = tf.add(x[0], x[1])
return layer
lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)
layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])
else:
model0 = layers[inputs[0]]
model1 = layers[inputs[1]]
if names == 'short':
tf_name = 'A' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
add = keras.layers.Add(name=tf_name)
layers[scope_name] = add([model0, model1]) |
def _webdav_move_copy(self, remote_path_source, remote_path_target,
operation):
"""Copies or moves a remote file or directory
:param remote_path_source: source file or folder to copy / move
:param remote_path_target: target file to which to copy / move
:param operation: MOVE or COPY
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned
"""
if operation != "MOVE" and operation != "COPY":
return False
if remote_path_target[-1] == '/':
remote_path_target += os.path.basename(remote_path_source)
if not (remote_path_target[0] == '/'):
remote_path_target = '/' + remote_path_target
remote_path_source = self._normalize_path(remote_path_source)
headers = {
'Destination': self._webdav_url + parse.quote(
self._encode_string(remote_path_target))
}
return self._make_dav_request(
operation,
remote_path_source,
headers=headers
) | Copies or moves a remote file or directory
:param remote_path_source: source file or folder to copy / move
:param remote_path_target: target file to which to copy / move
:param operation: MOVE or COPY
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned | Below is the the instruction that describes the task:
### Input:
Copies or moves a remote file or directory
:param remote_path_source: source file or folder to copy / move
:param remote_path_target: target file to which to copy / move
:param operation: MOVE or COPY
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned
### Response:
def _webdav_move_copy(self, remote_path_source, remote_path_target,
operation):
"""Copies or moves a remote file or directory
:param remote_path_source: source file or folder to copy / move
:param remote_path_target: target file to which to copy / move
:param operation: MOVE or COPY
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned
"""
if operation != "MOVE" and operation != "COPY":
return False
if remote_path_target[-1] == '/':
remote_path_target += os.path.basename(remote_path_source)
if not (remote_path_target[0] == '/'):
remote_path_target = '/' + remote_path_target
remote_path_source = self._normalize_path(remote_path_source)
headers = {
'Destination': self._webdav_url + parse.quote(
self._encode_string(remote_path_target))
}
return self._make_dav_request(
operation,
remote_path_source,
headers=headers
) |
def get_single_lab(lab_slug):
"""Gets data from a single lab from makeinitaly.foundation."""
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
# result.remove(u'FabLab')
# Transform the data into a Lab object
current_lab = MILab()
# Add existing data
for i in result:
if "coordinates=" in i:
value = i.replace("coordinates=", "")
current_lab.coordinates = value
latlong = []
if ", " in value:
latlong = value.rstrip(", ").split(", ")
elif " , " in value:
latlong = value.rstrip(" , ").split(" , ")
else:
latlong = ["", ""]
current_lab.latitude = latlong[0]
current_lab.longitude = latlong[1]
elif "province=" in i:
value = i.replace("province=", "")
current_lab.province = value.upper()
elif "region=" in i:
value = i.replace("region=", "")
current_lab.region = value
elif "address=" in i:
value = i.replace("address=", "")
current_lab.address = value
elif "city=" in i:
value = i.replace("city=", "")
current_lab.city = value
elif "fablabsio=" in i:
value = i.replace("fablabsio=", "")
current_lab.fablabsio = value
elif "website=" in i:
value = i.replace("website=", "")
current_lab.website = value
elif "facebook=" in i:
value = i.replace("facebook=", "")
current_lab.facebook = value
elif "twitter=" in i:
value = i.replace("twitter=", "")
current_lab.twitter = value
elif "email=" in i:
value = i.replace("email=", "")
current_lab.email = value
elif "manager=" in i:
value = i.replace("manager=", "")
current_lab.manager = value
elif "birthyear=" in i:
value = i.replace("birthyear=", "")
current_lab.birthyear = value
current_lab.text_en = get_lab_text(lab_slug=lab_slug, language="en")
current_lab.text_it = get_lab_text(lab_slug=lab_slug, language="it")
return current_lab | Gets data from a single lab from makeinitaly.foundation. | Below is the the instruction that describes the task:
### Input:
Gets data from a single lab from makeinitaly.foundation.
### Response:
def get_single_lab(lab_slug):
"""Gets data from a single lab from makeinitaly.foundation."""
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
# result.remove(u'FabLab')
# Transform the data into a Lab object
current_lab = MILab()
# Add existing data
for i in result:
if "coordinates=" in i:
value = i.replace("coordinates=", "")
current_lab.coordinates = value
latlong = []
if ", " in value:
latlong = value.rstrip(", ").split(", ")
elif " , " in value:
latlong = value.rstrip(" , ").split(" , ")
else:
latlong = ["", ""]
current_lab.latitude = latlong[0]
current_lab.longitude = latlong[1]
elif "province=" in i:
value = i.replace("province=", "")
current_lab.province = value.upper()
elif "region=" in i:
value = i.replace("region=", "")
current_lab.region = value
elif "address=" in i:
value = i.replace("address=", "")
current_lab.address = value
elif "city=" in i:
value = i.replace("city=", "")
current_lab.city = value
elif "fablabsio=" in i:
value = i.replace("fablabsio=", "")
current_lab.fablabsio = value
elif "website=" in i:
value = i.replace("website=", "")
current_lab.website = value
elif "facebook=" in i:
value = i.replace("facebook=", "")
current_lab.facebook = value
elif "twitter=" in i:
value = i.replace("twitter=", "")
current_lab.twitter = value
elif "email=" in i:
value = i.replace("email=", "")
current_lab.email = value
elif "manager=" in i:
value = i.replace("manager=", "")
current_lab.manager = value
elif "birthyear=" in i:
value = i.replace("birthyear=", "")
current_lab.birthyear = value
current_lab.text_en = get_lab_text(lab_slug=lab_slug, language="en")
current_lab.text_it = get_lab_text(lab_slug=lab_slug, language="it")
return current_lab |
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list | Get AP profile names. | Below is the the instruction that describes the task:
### Input:
Get AP profile names.
### Response:
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list |
def from_response(cls, header_data, ignore_bad_cookies=False,
ignore_bad_attributes=True):
"Construct a Cookies object from response header data."
cookies = cls()
cookies.parse_response(
header_data,
ignore_bad_cookies=ignore_bad_cookies,
ignore_bad_attributes=ignore_bad_attributes)
return cookies | Construct a Cookies object from response header data. | Below is the the instruction that describes the task:
### Input:
Construct a Cookies object from response header data.
### Response:
def from_response(cls, header_data, ignore_bad_cookies=False,
ignore_bad_attributes=True):
"Construct a Cookies object from response header data."
cookies = cls()
cookies.parse_response(
header_data,
ignore_bad_cookies=ignore_bad_cookies,
ignore_bad_attributes=ignore_bad_attributes)
return cookies |
def memoise(cls, func):
''' Decorator that stores the result of the stored function in the
user's results cache until the batch completes. Keyword arguments are
not yet supported.
Arguments:
func (callable(*a)): The function whose results we want
to store. The positional arguments, ``a``, are used as cache
keys.
Returns:
callable(*a): The memosing version of ``func``.
'''
@functools.wraps(func)
def f(*a):
for arg in a:
if isinstance(arg, User):
user = arg
break
else:
raise ValueError("One position argument must be a User")
func_key = (func, tuple(a))
cache = cls.get_cache(user)
if func_key not in cache:
cache[func_key] = func(*a)
return cache[func_key]
return f | Decorator that stores the result of the stored function in the
user's results cache until the batch completes. Keyword arguments are
not yet supported.
Arguments:
func (callable(*a)): The function whose results we want
to store. The positional arguments, ``a``, are used as cache
keys.
Returns:
callable(*a): The memosing version of ``func``. | Below is the the instruction that describes the task:
### Input:
Decorator that stores the result of the stored function in the
user's results cache until the batch completes. Keyword arguments are
not yet supported.
Arguments:
func (callable(*a)): The function whose results we want
to store. The positional arguments, ``a``, are used as cache
keys.
Returns:
callable(*a): The memosing version of ``func``.
### Response:
def memoise(cls, func):
''' Decorator that stores the result of the stored function in the
user's results cache until the batch completes. Keyword arguments are
not yet supported.
Arguments:
func (callable(*a)): The function whose results we want
to store. The positional arguments, ``a``, are used as cache
keys.
Returns:
callable(*a): The memosing version of ``func``.
'''
@functools.wraps(func)
def f(*a):
for arg in a:
if isinstance(arg, User):
user = arg
break
else:
raise ValueError("One position argument must be a User")
func_key = (func, tuple(a))
cache = cls.get_cache(user)
if func_key not in cache:
cache[func_key] = func(*a)
return cache[func_key]
return f |
def match(self, dom, act):
"""
Check if the given `domain` and `act` are allowed
by this capability
"""
return self.match_domain(dom) and self.match_action(act) | Check if the given `domain` and `act` are allowed
by this capability | Below is the the instruction that describes the task:
### Input:
Check if the given `domain` and `act` are allowed
by this capability
### Response:
def match(self, dom, act):
"""
Check if the given `domain` and `act` are allowed
by this capability
"""
return self.match_domain(dom) and self.match_action(act) |
def write_list(path_out, image_list):
"""Hepler function to write image list into the file.
The format is as below,
integer_image_index \t float_label_index \t path_to_image
Note that the blank between number and tab is only used for readability.
Parameters
----------
path_out: string
image_list: list
"""
with open(path_out, 'w') as fout:
for i, item in enumerate(image_list):
line = '%d\t' % item[0]
for j in item[2:]:
line += '%f\t' % j
line += '%s\n' % item[1]
fout.write(line) | Hepler function to write image list into the file.
The format is as below,
integer_image_index \t float_label_index \t path_to_image
Note that the blank between number and tab is only used for readability.
Parameters
----------
path_out: string
image_list: list | Below is the the instruction that describes the task:
### Input:
Hepler function to write image list into the file.
The format is as below,
integer_image_index \t float_label_index \t path_to_image
Note that the blank between number and tab is only used for readability.
Parameters
----------
path_out: string
image_list: list
### Response:
def write_list(path_out, image_list):
"""Hepler function to write image list into the file.
The format is as below,
integer_image_index \t float_label_index \t path_to_image
Note that the blank between number and tab is only used for readability.
Parameters
----------
path_out: string
image_list: list
"""
with open(path_out, 'w') as fout:
for i, item in enumerate(image_list):
line = '%d\t' % item[0]
for j in item[2:]:
line += '%f\t' % j
line += '%s\n' % item[1]
fout.write(line) |
def refresh_hrefs(self, request):
"""
Refresh all the cached menu item HREFs in the database.
"""
for item in treenav.MenuItem.objects.all():
item.save() # refreshes the HREF
self.message_user(request, _('Menu item HREFs refreshed successfully.'))
info = self.model._meta.app_label, self.model._meta.model_name
changelist_url = reverse('admin:%s_%s_changelist' % info, current_app=self.admin_site.name)
return redirect(changelist_url) | Refresh all the cached menu item HREFs in the database. | Below is the the instruction that describes the task:
### Input:
Refresh all the cached menu item HREFs in the database.
### Response:
def refresh_hrefs(self, request):
"""
Refresh all the cached menu item HREFs in the database.
"""
for item in treenav.MenuItem.objects.all():
item.save() # refreshes the HREF
self.message_user(request, _('Menu item HREFs refreshed successfully.'))
info = self.model._meta.app_label, self.model._meta.model_name
changelist_url = reverse('admin:%s_%s_changelist' % info, current_app=self.admin_site.name)
return redirect(changelist_url) |
def freq2midi(freq):
"""
Given a frequency in Hz, returns its MIDI pitch number.
"""
result = 12 * (log2(freq) - log2(FREQ_A4)) + MIDI_A4
return nan if isinstance(result, complex) else result | Given a frequency in Hz, returns its MIDI pitch number. | Below is the the instruction that describes the task:
### Input:
Given a frequency in Hz, returns its MIDI pitch number.
### Response:
def freq2midi(freq):
"""
Given a frequency in Hz, returns its MIDI pitch number.
"""
result = 12 * (log2(freq) - log2(FREQ_A4)) + MIDI_A4
return nan if isinstance(result, complex) else result |
def _layout_to_vdev(layout, device_dir=None):
'''
Turn the layout data into usable vdevs spedcification
We need to support 2 ways of passing the layout:
.. code::
layout_new:
- mirror:
- disk0
- disk1
- mirror:
- disk2
- disk3
.. code:
layout_legacy:
mirror-0:
disk0
disk1
mirror-1:
disk2
disk3
'''
vdevs = []
# NOTE: check device_dir exists
if device_dir and not os.path.exists(device_dir):
device_dir = None
# NOTE: handle list of OrderedDicts (new layout)
if isinstance(layout, list):
# NOTE: parse each vdev as a tiny layout and just append
for vdev in layout:
if isinstance(vdev, OrderedDict):
vdevs.extend(_layout_to_vdev(vdev, device_dir))
else:
if device_dir and vdev[0] != '/':
vdev = os.path.join(device_dir, vdev)
vdevs.append(vdev)
# NOTE: handle nested OrderedDict (legacy layout)
# this is also used to parse the nested OrderedDicts
# from the new layout
elif isinstance(layout, OrderedDict):
for vdev in layout:
# NOTE: extract the vdev type and disks in the vdev
vdev_type = vdev.split('-')[0]
vdev_disk = layout[vdev]
# NOTE: skip appending the dummy type 'disk'
if vdev_type != 'disk':
vdevs.append(vdev_type)
# NOTE: ensure the disks are a list (legacy layout are not)
if not isinstance(vdev_disk, list):
vdev_disk = vdev_disk.split(' ')
# NOTE: also append the actualy disks behind the type
# also prepend device_dir to disks if required
for disk in vdev_disk:
if device_dir and disk[0] != '/':
disk = os.path.join(device_dir, disk)
vdevs.append(disk)
# NOTE: we got invalid data for layout
else:
vdevs = None
return vdevs | Turn the layout data into usable vdevs spedcification
We need to support 2 ways of passing the layout:
.. code::
layout_new:
- mirror:
- disk0
- disk1
- mirror:
- disk2
- disk3
.. code:
layout_legacy:
mirror-0:
disk0
disk1
mirror-1:
disk2
disk3 | Below is the the instruction that describes the task:
### Input:
Turn the layout data into usable vdevs spedcification
We need to support 2 ways of passing the layout:
.. code::
layout_new:
- mirror:
- disk0
- disk1
- mirror:
- disk2
- disk3
.. code:
layout_legacy:
mirror-0:
disk0
disk1
mirror-1:
disk2
disk3
### Response:
def _layout_to_vdev(layout, device_dir=None):
'''
Turn the layout data into usable vdevs spedcification
We need to support 2 ways of passing the layout:
.. code::
layout_new:
- mirror:
- disk0
- disk1
- mirror:
- disk2
- disk3
.. code:
layout_legacy:
mirror-0:
disk0
disk1
mirror-1:
disk2
disk3
'''
vdevs = []
# NOTE: check device_dir exists
if device_dir and not os.path.exists(device_dir):
device_dir = None
# NOTE: handle list of OrderedDicts (new layout)
if isinstance(layout, list):
# NOTE: parse each vdev as a tiny layout and just append
for vdev in layout:
if isinstance(vdev, OrderedDict):
vdevs.extend(_layout_to_vdev(vdev, device_dir))
else:
if device_dir and vdev[0] != '/':
vdev = os.path.join(device_dir, vdev)
vdevs.append(vdev)
# NOTE: handle nested OrderedDict (legacy layout)
# this is also used to parse the nested OrderedDicts
# from the new layout
elif isinstance(layout, OrderedDict):
for vdev in layout:
# NOTE: extract the vdev type and disks in the vdev
vdev_type = vdev.split('-')[0]
vdev_disk = layout[vdev]
# NOTE: skip appending the dummy type 'disk'
if vdev_type != 'disk':
vdevs.append(vdev_type)
# NOTE: ensure the disks are a list (legacy layout are not)
if not isinstance(vdev_disk, list):
vdev_disk = vdev_disk.split(' ')
# NOTE: also append the actualy disks behind the type
# also prepend device_dir to disks if required
for disk in vdev_disk:
if device_dir and disk[0] != '/':
disk = os.path.join(device_dir, disk)
vdevs.append(disk)
# NOTE: we got invalid data for layout
else:
vdevs = None
return vdevs |
def _delete_plot(cls, plot_id):
"""
Deletes registered plots and calls Plot.cleanup
"""
plot = cls._plots.get(plot_id)
if plot is None:
return
plot.cleanup()
del cls._plots[plot_id] | Deletes registered plots and calls Plot.cleanup | Below is the the instruction that describes the task:
### Input:
Deletes registered plots and calls Plot.cleanup
### Response:
def _delete_plot(cls, plot_id):
"""
Deletes registered plots and calls Plot.cleanup
"""
plot = cls._plots.get(plot_id)
if plot is None:
return
plot.cleanup()
del cls._plots[plot_id] |
def setPointSize(self, pointSize):
"""
Sets the point size for this widget to the inputed size.
:param pointSize | <int>
"""
self.uiSizeSPN.blockSignals(True)
self.uiSizeSPN.setValue(pointSize)
self.uiSizeSPN.blockSignals(False)
for i in range(self.uiFontTREE.topLevelItemCount()):
item = self.uiFontTREE.topLevelItem(i)
font = item.font(0)
font.setPointSize(pointSize)
item.setFont(0, font) | Sets the point size for this widget to the inputed size.
:param pointSize | <int> | Below is the the instruction that describes the task:
### Input:
Sets the point size for this widget to the inputed size.
:param pointSize | <int>
### Response:
def setPointSize(self, pointSize):
"""
Sets the point size for this widget to the inputed size.
:param pointSize | <int>
"""
self.uiSizeSPN.blockSignals(True)
self.uiSizeSPN.setValue(pointSize)
self.uiSizeSPN.blockSignals(False)
for i in range(self.uiFontTREE.topLevelItemCount()):
item = self.uiFontTREE.topLevelItem(i)
font = item.font(0)
font.setPointSize(pointSize)
item.setFont(0, font) |
def clean(self, value):
""" Propagate to list elements. """
value = super(ListField, self).clean(value)
if value is not None:
return map(self.itemspec.clean, value) | Propagate to list elements. | Below is the the instruction that describes the task:
### Input:
Propagate to list elements.
### Response:
def clean(self, value):
""" Propagate to list elements. """
value = super(ListField, self).clean(value)
if value is not None:
return map(self.itemspec.clean, value) |
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit)) | Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees | Below is the the instruction that describes the task:
### Input:
Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
### Response:
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit)) |
def _get_struct_colormatrixfilter(self):
"""Get the values for the COLORMATRIXFILTER record."""
obj = _make_object("ColorMatrixFilter")
obj.Matrix = [unpack_float(self._src) for _ in range(20)]
return obj | Get the values for the COLORMATRIXFILTER record. | Below is the the instruction that describes the task:
### Input:
Get the values for the COLORMATRIXFILTER record.
### Response:
def _get_struct_colormatrixfilter(self):
"""Get the values for the COLORMATRIXFILTER record."""
obj = _make_object("ColorMatrixFilter")
obj.Matrix = [unpack_float(self._src) for _ in range(20)]
return obj |
def create_atype(self, ):
"""Create a atype and store it in the self.atype
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
desc = self.desc_pte.toPlainText()
try:
atype = djadapter.models.Atype(name=name, description=desc)
atype.save()
for prj in self.projects:
atype.projects.add(prj)
self.atype = atype
self.accept()
except:
log.exception("Could not create new assettype") | Create a atype and store it in the self.atype
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Create a atype and store it in the self.atype
:returns: None
:rtype: None
:raises: None
### Response:
def create_atype(self, ):
"""Create a atype and store it in the self.atype
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
desc = self.desc_pte.toPlainText()
try:
atype = djadapter.models.Atype(name=name, description=desc)
atype.save()
for prj in self.projects:
atype.projects.add(prj)
self.atype = atype
self.accept()
except:
log.exception("Could not create new assettype") |
def create(cls, cards, custom_headers=None):
"""
:type user_id: int
:param cards: The cards that need to be updated.
:type cards: list[object_.CardBatchEntry]
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseCardBatch
"""
if custom_headers is None:
custom_headers = {}
request_map = {
cls.FIELD_CARDS: cards
}
request_map_string = converter.class_to_json(request_map)
request_map_string = cls._remove_field_for_request(request_map_string)
api_client = client.ApiClient(cls._get_api_context())
request_bytes = request_map_string.encode()
endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id())
response_raw = api_client.post(endpoint_url, request_bytes,
custom_headers)
return BunqResponseCardBatch.cast_from_bunq_response(
cls._from_json(response_raw, cls._OBJECT_TYPE_POST)
) | :type user_id: int
:param cards: The cards that need to be updated.
:type cards: list[object_.CardBatchEntry]
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseCardBatch | Below is the the instruction that describes the task:
### Input:
:type user_id: int
:param cards: The cards that need to be updated.
:type cards: list[object_.CardBatchEntry]
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseCardBatch
### Response:
def create(cls, cards, custom_headers=None):
"""
:type user_id: int
:param cards: The cards that need to be updated.
:type cards: list[object_.CardBatchEntry]
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseCardBatch
"""
if custom_headers is None:
custom_headers = {}
request_map = {
cls.FIELD_CARDS: cards
}
request_map_string = converter.class_to_json(request_map)
request_map_string = cls._remove_field_for_request(request_map_string)
api_client = client.ApiClient(cls._get_api_context())
request_bytes = request_map_string.encode()
endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id())
response_raw = api_client.post(endpoint_url, request_bytes,
custom_headers)
return BunqResponseCardBatch.cast_from_bunq_response(
cls._from_json(response_raw, cls._OBJECT_TYPE_POST)
) |
def get_config_directory(override_files=False):
"""
Looks for the most specific configuration directory possible, in order to
load individual configuration files.
"""
if override_files:
possible_dirs = [override_files]
else:
xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
os.path.expanduser('~/.config')
possible_dirs = [os.path.join(xdg_config_home, "supernova.d/"),
os.path.expanduser("~/.supernova.d/"),
".supernova.d/"]
for config_dir in reversed(possible_dirs):
if os.path.isdir(config_dir):
return config_dir
return False | Looks for the most specific configuration directory possible, in order to
load individual configuration files. | Below is the the instruction that describes the task:
### Input:
Looks for the most specific configuration directory possible, in order to
load individual configuration files.
### Response:
def get_config_directory(override_files=False):
"""
Looks for the most specific configuration directory possible, in order to
load individual configuration files.
"""
if override_files:
possible_dirs = [override_files]
else:
xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
os.path.expanduser('~/.config')
possible_dirs = [os.path.join(xdg_config_home, "supernova.d/"),
os.path.expanduser("~/.supernova.d/"),
".supernova.d/"]
for config_dir in reversed(possible_dirs):
if os.path.isdir(config_dir):
return config_dir
return False |
def axes(self, axes):
'''Set the axes for this object's degrees of freedom.
Parameters
----------
axes : list of axes specifications
A list of axis values to set. This list must have the same number of
elements as the degrees of freedom of the underlying ODE object.
Each element can be
(a) None, which has no effect on the corresponding axis, or
(b) three floats specifying the axis to set.
'''
assert self.ADOF == len(axes) or self.LDOF == len(axes)
for i, axis in enumerate(axes):
if axis is not None:
self.ode_obj.setAxis(i, 0, axis) | Set the axes for this object's degrees of freedom.
Parameters
----------
axes : list of axes specifications
A list of axis values to set. This list must have the same number of
elements as the degrees of freedom of the underlying ODE object.
Each element can be
(a) None, which has no effect on the corresponding axis, or
(b) three floats specifying the axis to set. | Below is the the instruction that describes the task:
### Input:
Set the axes for this object's degrees of freedom.
Parameters
----------
axes : list of axes specifications
A list of axis values to set. This list must have the same number of
elements as the degrees of freedom of the underlying ODE object.
Each element can be
(a) None, which has no effect on the corresponding axis, or
(b) three floats specifying the axis to set.
### Response:
def axes(self, axes):
'''Set the axes for this object's degrees of freedom.
Parameters
----------
axes : list of axes specifications
A list of axis values to set. This list must have the same number of
elements as the degrees of freedom of the underlying ODE object.
Each element can be
(a) None, which has no effect on the corresponding axis, or
(b) three floats specifying the axis to set.
'''
assert self.ADOF == len(axes) or self.LDOF == len(axes)
for i, axis in enumerate(axes):
if axis is not None:
self.ode_obj.setAxis(i, 0, axis) |
def setup(self, workers=1, qsize=0):
"""Setup the pool parameters like number of workers and output queue size"""
if workers <= 0:
raise ValueError("workers have to be greater then zero")
if qsize < 0:
raise ValueError("qsize have to be greater or equal zero")
self.qsize = qsize # output que size
self.workers = workers
return self | Setup the pool parameters like number of workers and output queue size | Below is the the instruction that describes the task:
### Input:
Setup the pool parameters like number of workers and output queue size
### Response:
def setup(self, workers=1, qsize=0):
"""Setup the pool parameters like number of workers and output queue size"""
if workers <= 0:
raise ValueError("workers have to be greater then zero")
if qsize < 0:
raise ValueError("qsize have to be greater or equal zero")
self.qsize = qsize # output que size
self.workers = workers
return self |
def ensure_instruction(instruction: int) -> bytes:
"""
Wraps an instruction to be Python 3.6+ compatible. This does nothing on Python 3.5 and below.
This is most useful for operating on bare, single-width instructions such as
``RETURN_FUNCTION`` in a version portable way.
:param instruction: The instruction integer to use.
:return: A safe bytes object, if applicable.
"""
if PY36:
return instruction.to_bytes(2, byteorder="little")
else:
return instruction.to_bytes(1, byteorder="little") | Wraps an instruction to be Python 3.6+ compatible. This does nothing on Python 3.5 and below.
This is most useful for operating on bare, single-width instructions such as
``RETURN_FUNCTION`` in a version portable way.
:param instruction: The instruction integer to use.
:return: A safe bytes object, if applicable. | Below is the the instruction that describes the task:
### Input:
Wraps an instruction to be Python 3.6+ compatible. This does nothing on Python 3.5 and below.
This is most useful for operating on bare, single-width instructions such as
``RETURN_FUNCTION`` in a version portable way.
:param instruction: The instruction integer to use.
:return: A safe bytes object, if applicable.
### Response:
def ensure_instruction(instruction: int) -> bytes:
"""
Wraps an instruction to be Python 3.6+ compatible. This does nothing on Python 3.5 and below.
This is most useful for operating on bare, single-width instructions such as
``RETURN_FUNCTION`` in a version portable way.
:param instruction: The instruction integer to use.
:return: A safe bytes object, if applicable.
"""
if PY36:
return instruction.to_bytes(2, byteorder="little")
else:
return instruction.to_bytes(1, byteorder="little") |
def get_all_hosting_devices(self, context):
"""Get a list of all hosting devices."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_all_hosting_devices',
host=self.host) | Get a list of all hosting devices. | Below is the the instruction that describes the task:
### Input:
Get a list of all hosting devices.
### Response:
def get_all_hosting_devices(self, context):
"""Get a list of all hosting devices."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_all_hosting_devices',
host=self.host) |
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.SparseArray([0, 1, 2])
>>> arr.apply(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
"""
# this is used in apply.
# We get hit since we're an "is_extension_type" but regular extension
# types are not hit. This may be worth adding to the interface.
if isinstance(mapper, ABCSeries):
mapper = mapper.to_dict()
if isinstance(mapper, abc.Mapping):
fill_value = mapper.get(self.fill_value, self.fill_value)
sp_values = [mapper.get(x, None) for x in self.sp_values]
else:
fill_value = mapper(self.fill_value)
sp_values = [mapper(x) for x in self.sp_values]
return type(self)(sp_values, sparse_index=self.sp_index,
fill_value=fill_value) | Map categories using input correspondence (dict, Series, or function).
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.SparseArray([0, 1, 2])
>>> arr.apply(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32) | Below is the the instruction that describes the task:
### Input:
Map categories using input correspondence (dict, Series, or function).
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.SparseArray([0, 1, 2])
>>> arr.apply(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
### Response:
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.SparseArray([0, 1, 2])
>>> arr.apply(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
"""
# this is used in apply.
# We get hit since we're an "is_extension_type" but regular extension
# types are not hit. This may be worth adding to the interface.
if isinstance(mapper, ABCSeries):
mapper = mapper.to_dict()
if isinstance(mapper, abc.Mapping):
fill_value = mapper.get(self.fill_value, self.fill_value)
sp_values = [mapper.get(x, None) for x in self.sp_values]
else:
fill_value = mapper(self.fill_value)
sp_values = [mapper(x) for x in self.sp_values]
return type(self)(sp_values, sparse_index=self.sp_index,
fill_value=fill_value) |
def list(self, request):
"""Search the doctypes for this model."""
query = get_query_params(request).get("search", "")
results = []
base = self.model.get_base_class()
doctypes = indexable_registry.families[base]
for doctype, klass in doctypes.items():
name = klass._meta.verbose_name.title()
if query.lower() in name.lower():
results.append(dict(
name=name,
doctype=doctype
))
results.sort(key=lambda x: x["name"])
return Response(dict(results=results)) | Search the doctypes for this model. | Below is the the instruction that describes the task:
### Input:
Search the doctypes for this model.
### Response:
def list(self, request):
"""Search the doctypes for this model."""
query = get_query_params(request).get("search", "")
results = []
base = self.model.get_base_class()
doctypes = indexable_registry.families[base]
for doctype, klass in doctypes.items():
name = klass._meta.verbose_name.title()
if query.lower() in name.lower():
results.append(dict(
name=name,
doctype=doctype
))
results.sort(key=lambda x: x["name"])
return Response(dict(results=results)) |
def GetDefault(self, fd=None, default=None):
"""Returns a default attribute if it is not set."""
if callable(self.default):
return self.default(fd)
if self.default is not None:
# We can't return mutable objects here or the default might change for all
# objects of this class.
if isinstance(self.default, rdfvalue.RDFValue):
default = self.default.Copy()
default.attribute_instance = self
return self(default)
else:
return self(self.default)
if isinstance(default, rdfvalue.RDFValue):
default = default.Copy()
default.attribute_instance = self
return default | Returns a default attribute if it is not set. | Below is the the instruction that describes the task:
### Input:
Returns a default attribute if it is not set.
### Response:
def GetDefault(self, fd=None, default=None):
"""Returns a default attribute if it is not set."""
if callable(self.default):
return self.default(fd)
if self.default is not None:
# We can't return mutable objects here or the default might change for all
# objects of this class.
if isinstance(self.default, rdfvalue.RDFValue):
default = self.default.Copy()
default.attribute_instance = self
return self(default)
else:
return self(self.default)
if isinstance(default, rdfvalue.RDFValue):
default = default.Copy()
default.attribute_instance = self
return default |
def cli(ctx):
"""Shows the saved commands."""
json_path = os.path.join(os.path.expanduser('~'), '.keep', 'commands.json')
if not os.path.exists(json_path):
click.echo('No commands to show. Add one by `keep new`.')
else:
utils.list_commands(ctx) | Shows the saved commands. | Below is the the instruction that describes the task:
### Input:
Shows the saved commands.
### Response:
def cli(ctx):
"""Shows the saved commands."""
json_path = os.path.join(os.path.expanduser('~'), '.keep', 'commands.json')
if not os.path.exists(json_path):
click.echo('No commands to show. Add one by `keep new`.')
else:
utils.list_commands(ctx) |
def quat2mat(quaternion):
"""
Converts given quaternion (x, y, z, w) to matrix.
Args:
quaternion: vec4 float angles
Returns:
3x3 rotation matrix
"""
q = np.array(quaternion, dtype=np.float32, copy=True)[[3, 0, 1, 2]]
n = np.dot(q, q)
if n < EPS:
return np.identity(3)
q *= math.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array(
[
[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]],
]
) | Converts given quaternion (x, y, z, w) to matrix.
Args:
quaternion: vec4 float angles
Returns:
3x3 rotation matrix | Below is the the instruction that describes the task:
### Input:
Converts given quaternion (x, y, z, w) to matrix.
Args:
quaternion: vec4 float angles
Returns:
3x3 rotation matrix
### Response:
def quat2mat(quaternion):
"""
Converts given quaternion (x, y, z, w) to matrix.
Args:
quaternion: vec4 float angles
Returns:
3x3 rotation matrix
"""
q = np.array(quaternion, dtype=np.float32, copy=True)[[3, 0, 1, 2]]
n = np.dot(q, q)
if n < EPS:
return np.identity(3)
q *= math.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array(
[
[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]],
]
) |
def move(self, from_stash, to_stash, filter_func=None):
"""
Move states from one stash to another.
:param from_stash: Take matching states from this stash.
:param to_stash: Put matching states into this stash.
:param filter_func: Stash states that match this filter. Should be a function that takes
a state and returns True or False. (default: stash all states)
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
"""
filter_func = filter_func or (lambda s: True)
stash_splitter = lambda states: reversed(self._filter_states(filter_func, states))
return self.split(stash_splitter, from_stash=from_stash, to_stash=to_stash) | Move states from one stash to another.
:param from_stash: Take matching states from this stash.
:param to_stash: Put matching states into this stash.
:param filter_func: Stash states that match this filter. Should be a function that takes
a state and returns True or False. (default: stash all states)
:returns: The simulation manager, for chaining.
:rtype: SimulationManager | Below is the the instruction that describes the task:
### Input:
Move states from one stash to another.
:param from_stash: Take matching states from this stash.
:param to_stash: Put matching states into this stash.
:param filter_func: Stash states that match this filter. Should be a function that takes
a state and returns True or False. (default: stash all states)
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
### Response:
def move(self, from_stash, to_stash, filter_func=None):
"""
Move states from one stash to another.
:param from_stash: Take matching states from this stash.
:param to_stash: Put matching states into this stash.
:param filter_func: Stash states that match this filter. Should be a function that takes
a state and returns True or False. (default: stash all states)
:returns: The simulation manager, for chaining.
:rtype: SimulationManager
"""
filter_func = filter_func or (lambda s: True)
stash_splitter = lambda states: reversed(self._filter_states(filter_func, states))
return self.split(stash_splitter, from_stash=from_stash, to_stash=to_stash) |
def _verifyChildren(self, i):
"""
Used for validation during parsing, and additional
book-keeping. For internal use only.
"""
super(Table, self)._verifyChildren(i)
child = self.childNodes[i]
if child.tagName == ligolw.Column.tagName:
self._update_column_info()
elif child.tagName == ligolw.Stream.tagName:
# require agreement of non-stripped strings
if child.getAttribute("Name") != self.getAttribute("Name"):
raise ligolw.ElementError("Stream name '%s' does not match Table name '%s'" % (child.getAttribute("Name"), self.getAttribute("Name"))) | Used for validation during parsing, and additional
book-keeping. For internal use only. | Below is the the instruction that describes the task:
### Input:
Used for validation during parsing, and additional
book-keeping. For internal use only.
### Response:
def _verifyChildren(self, i):
"""
Used for validation during parsing, and additional
book-keeping. For internal use only.
"""
super(Table, self)._verifyChildren(i)
child = self.childNodes[i]
if child.tagName == ligolw.Column.tagName:
self._update_column_info()
elif child.tagName == ligolw.Stream.tagName:
# require agreement of non-stripped strings
if child.getAttribute("Name") != self.getAttribute("Name"):
raise ligolw.ElementError("Stream name '%s' does not match Table name '%s'" % (child.getAttribute("Name"), self.getAttribute("Name"))) |
def search(self, terms):
""" Search transcripts.
Args:
terms (str): Terms for search
Returns:
array. Messages
"""
messages = self._connection.get("search/%s" % urllib.quote_plus(terms), key="messages")
if messages:
messages = [Message(self, message) for message in messages]
return messages | Search transcripts.
Args:
terms (str): Terms for search
Returns:
array. Messages | Below is the the instruction that describes the task:
### Input:
Search transcripts.
Args:
terms (str): Terms for search
Returns:
array. Messages
### Response:
def search(self, terms):
""" Search transcripts.
Args:
terms (str): Terms for search
Returns:
array. Messages
"""
messages = self._connection.get("search/%s" % urllib.quote_plus(terms), key="messages")
if messages:
messages = [Message(self, message) for message in messages]
return messages |
def get_random_mass_point_particles(numPoints, massRangeParams):
"""
This function will generate a large set of points within the chosen mass
and spin space. It will also return the corresponding PN spin coefficients
for ease of use later (though these may be removed at some future point).
Parameters
----------
numPoints : int
Number of systems to simulate
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
Returns
--------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2.
"""
# WARNING: We expect mass1 > mass2 ALWAYS
# First we choose the total masses from a unifrom distribution in mass
# to the -5/3. power.
mass = numpy.random.random(numPoints) * \
(massRangeParams.minTotMass**(-5./3.) \
- massRangeParams.maxTotMass**(-5./3.)) \
+ massRangeParams.maxTotMass**(-5./3.)
mass = mass**(-3./5.)
# Next we choose the mass ratios, this will take different limits based on
# the value of total mass
maxmass2 = numpy.minimum(mass/2., massRangeParams.maxMass2)
minmass1 = numpy.maximum(massRangeParams.minMass1, mass/2.)
mineta = numpy.maximum(massRangeParams.minCompMass \
* (mass-massRangeParams.minCompMass)/(mass*mass), \
massRangeParams.maxCompMass \
* (mass-massRangeParams.maxCompMass)/(mass*mass))
# Note that mineta is a numpy.array because mineta depends on the total
# mass. Therefore this is not precomputed in the massRangeParams instance
if massRangeParams.minEta:
mineta = numpy.maximum(massRangeParams.minEta, mineta)
# Eta also restricted by chirp mass restrictions
if massRangeParams.min_chirp_mass:
eta_val_at_min_chirp = massRangeParams.min_chirp_mass / mass
eta_val_at_min_chirp = eta_val_at_min_chirp**(5./3.)
mineta = numpy.maximum(mineta, eta_val_at_min_chirp)
maxeta = numpy.minimum(massRangeParams.maxEta, maxmass2 \
* (mass - maxmass2) / (mass*mass))
maxeta = numpy.minimum(maxeta, minmass1 \
* (mass - minmass1) / (mass*mass))
# max eta also affected by chirp mass restrictions
if massRangeParams.max_chirp_mass:
eta_val_at_max_chirp = massRangeParams.max_chirp_mass / mass
eta_val_at_max_chirp = eta_val_at_max_chirp**(5./3.)
maxeta = numpy.minimum(maxeta, eta_val_at_max_chirp)
if (maxeta < mineta).any():
errMsg = "ERROR: Maximum eta is smaller than minimum eta!!"
raise ValueError(errMsg)
eta = numpy.random.random(numPoints) * (maxeta - mineta) + mineta
# Also calculate the component masses; mass1 > mass2
diff = (mass*mass * (1-4*eta))**0.5
mass1 = (mass + diff)/2.
mass2 = (mass - diff)/2.
# Check the masses are where we want them to be (allowing some floating
# point rounding error).
if (mass1 > massRangeParams.maxMass1*1.001).any() \
or (mass1 < massRangeParams.minMass1*0.999).any():
errMsg = "Mass1 is not within the specified mass range."
raise ValueError(errMsg)
if (mass2 > massRangeParams.maxMass2*1.001).any() \
or (mass2 < massRangeParams.minMass2*0.999).any():
errMsg = "Mass2 is not within the specified mass range."
raise ValueError(errMsg)
# Next up is the spins. First check if we have non-zero spins
if massRangeParams.maxNSSpinMag == 0 and massRangeParams.maxBHSpinMag == 0:
spin1z = numpy.zeros(numPoints,dtype=float)
spin2z = numpy.zeros(numPoints,dtype=float)
elif massRangeParams.nsbhFlag:
# Spin 1 first
mspin = numpy.zeros(len(mass1))
mspin += massRangeParams.maxBHSpinMag
spin1z = (2*numpy.random.random(numPoints) - 1) * mspin
# Then spin2
mspin = numpy.zeros(len(mass2))
mspin += massRangeParams.maxNSSpinMag
spin2z = (2*numpy.random.random(numPoints) - 1) * mspin
else:
boundary_mass = massRangeParams.ns_bh_boundary_mass
# Spin 1 first
mspin = numpy.zeros(len(mass1))
mspin += massRangeParams.maxNSSpinMag
mspin[mass1 > boundary_mass] = massRangeParams.maxBHSpinMag
spin1z = (2*numpy.random.random(numPoints) - 1) * mspin
# Then spin 2
mspin = numpy.zeros(len(mass2))
mspin += massRangeParams.maxNSSpinMag
mspin[mass2 > boundary_mass] = massRangeParams.maxBHSpinMag
spin2z = (2*numpy.random.random(numPoints) - 1) * mspin
return mass1, mass2, spin1z, spin2z | This function will generate a large set of points within the chosen mass
and spin space. It will also return the corresponding PN spin coefficients
for ease of use later (though these may be removed at some future point).
Parameters
----------
numPoints : int
Number of systems to simulate
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
Returns
--------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2. | Below is the the instruction that describes the task:
### Input:
This function will generate a large set of points within the chosen mass
and spin space. It will also return the corresponding PN spin coefficients
for ease of use later (though these may be removed at some future point).
Parameters
----------
numPoints : int
Number of systems to simulate
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
Returns
--------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2.
### Response:
def get_random_mass_point_particles(numPoints, massRangeParams):
"""
This function will generate a large set of points within the chosen mass
and spin space. It will also return the corresponding PN spin coefficients
for ease of use later (though these may be removed at some future point).
Parameters
----------
numPoints : int
Number of systems to simulate
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
Returns
--------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2.
"""
# WARNING: We expect mass1 > mass2 ALWAYS
# First we choose the total masses from a unifrom distribution in mass
# to the -5/3. power.
mass = numpy.random.random(numPoints) * \
(massRangeParams.minTotMass**(-5./3.) \
- massRangeParams.maxTotMass**(-5./3.)) \
+ massRangeParams.maxTotMass**(-5./3.)
mass = mass**(-3./5.)
# Next we choose the mass ratios, this will take different limits based on
# the value of total mass
maxmass2 = numpy.minimum(mass/2., massRangeParams.maxMass2)
minmass1 = numpy.maximum(massRangeParams.minMass1, mass/2.)
mineta = numpy.maximum(massRangeParams.minCompMass \
* (mass-massRangeParams.minCompMass)/(mass*mass), \
massRangeParams.maxCompMass \
* (mass-massRangeParams.maxCompMass)/(mass*mass))
# Note that mineta is a numpy.array because mineta depends on the total
# mass. Therefore this is not precomputed in the massRangeParams instance
if massRangeParams.minEta:
mineta = numpy.maximum(massRangeParams.minEta, mineta)
# Eta also restricted by chirp mass restrictions
if massRangeParams.min_chirp_mass:
eta_val_at_min_chirp = massRangeParams.min_chirp_mass / mass
eta_val_at_min_chirp = eta_val_at_min_chirp**(5./3.)
mineta = numpy.maximum(mineta, eta_val_at_min_chirp)
maxeta = numpy.minimum(massRangeParams.maxEta, maxmass2 \
* (mass - maxmass2) / (mass*mass))
maxeta = numpy.minimum(maxeta, minmass1 \
* (mass - minmass1) / (mass*mass))
# max eta also affected by chirp mass restrictions
if massRangeParams.max_chirp_mass:
eta_val_at_max_chirp = massRangeParams.max_chirp_mass / mass
eta_val_at_max_chirp = eta_val_at_max_chirp**(5./3.)
maxeta = numpy.minimum(maxeta, eta_val_at_max_chirp)
if (maxeta < mineta).any():
errMsg = "ERROR: Maximum eta is smaller than minimum eta!!"
raise ValueError(errMsg)
eta = numpy.random.random(numPoints) * (maxeta - mineta) + mineta
# Also calculate the component masses; mass1 > mass2
diff = (mass*mass * (1-4*eta))**0.5
mass1 = (mass + diff)/2.
mass2 = (mass - diff)/2.
# Check the masses are where we want them to be (allowing some floating
# point rounding error).
if (mass1 > massRangeParams.maxMass1*1.001).any() \
or (mass1 < massRangeParams.minMass1*0.999).any():
errMsg = "Mass1 is not within the specified mass range."
raise ValueError(errMsg)
if (mass2 > massRangeParams.maxMass2*1.001).any() \
or (mass2 < massRangeParams.minMass2*0.999).any():
errMsg = "Mass2 is not within the specified mass range."
raise ValueError(errMsg)
# Next up is the spins. First check if we have non-zero spins
if massRangeParams.maxNSSpinMag == 0 and massRangeParams.maxBHSpinMag == 0:
spin1z = numpy.zeros(numPoints,dtype=float)
spin2z = numpy.zeros(numPoints,dtype=float)
elif massRangeParams.nsbhFlag:
# Spin 1 first
mspin = numpy.zeros(len(mass1))
mspin += massRangeParams.maxBHSpinMag
spin1z = (2*numpy.random.random(numPoints) - 1) * mspin
# Then spin2
mspin = numpy.zeros(len(mass2))
mspin += massRangeParams.maxNSSpinMag
spin2z = (2*numpy.random.random(numPoints) - 1) * mspin
else:
boundary_mass = massRangeParams.ns_bh_boundary_mass
# Spin 1 first
mspin = numpy.zeros(len(mass1))
mspin += massRangeParams.maxNSSpinMag
mspin[mass1 > boundary_mass] = massRangeParams.maxBHSpinMag
spin1z = (2*numpy.random.random(numPoints) - 1) * mspin
# Then spin 2
mspin = numpy.zeros(len(mass2))
mspin += massRangeParams.maxNSSpinMag
mspin[mass2 > boundary_mass] = massRangeParams.maxBHSpinMag
spin2z = (2*numpy.random.random(numPoints) - 1) * mspin
return mass1, mass2, spin1z, spin2z |
def explain_prediction_tree_regressor(
reg, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False):
""" Explain prediction of a tree regressor.
See :func:`eli5.explain_prediction` for description of
``top``, ``top_targets``, ``target_names``, ``targets``,
``feature_names``, ``feature_re`` and ``feature_filter`` parameters.
``vec`` is a vectorizer instance used to transform
raw features to the input of the regressor ``reg``
(e.g. a fitted CountVectorizer instance); you can pass it
instead of ``feature_names``.
``vectorized`` is a flag which tells eli5 if ``doc`` should be
passed through ``vec`` or not. By default it is False, meaning that
if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
regressor. Set it to True if you're passing ``vec``,
but ``doc`` is already vectorized.
Method for determining feature importances follows an idea from
http://blog.datadive.net/interpreting-random-forests/.
Feature weights are calculated by following decision paths in trees
of an ensemble (or a single tree for DecisionTreeRegressor).
Each node of the tree has an output score, and contribution of a feature
on the decision path is how much the score changes from parent to child.
Weights of all features sum to the output score of the estimator.
"""
vec, feature_names = handle_vec(reg, doc, vec, vectorized, feature_names)
X = get_X(doc, vec=vec, vectorized=vectorized)
if feature_names.bias_name is None:
# Tree estimators do not have an intercept, but here we interpret
# them as having an intercept
feature_names.bias_name = '<BIAS>'
score, = reg.predict(X)
num_targets = getattr(reg, 'n_outputs_', 1)
is_multitarget = num_targets > 1
feature_weights = _trees_feature_weights(reg, X, feature_names, num_targets)
x = get_X0(add_intercept(X))
flt_feature_names, flt_indices = feature_names.handle_filter(
feature_filter, feature_re, x)
def _weights(label_id, scale=1.0):
weights = feature_weights[:, label_id]
return get_top_features_filtered(x, flt_feature_names, flt_indices,
weights, top, scale)
res = Explanation(
estimator=repr(reg),
method='decision path',
description=(DESCRIPTION_TREE_REG_MULTITARGET if is_multitarget
else DESCRIPTION_TREE_REG),
targets=[],
is_regression=True,
)
assert res.targets is not None
names = get_default_target_names(reg, num_targets=num_targets)
display_names = get_target_display_names(names, target_names, targets,
top_targets, score)
if is_multitarget:
for label_id, label in display_names:
target_expl = TargetExplanation(
target=label,
feature_weights=_weights(label_id),
score=score[label_id],
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
else:
target_expl = TargetExplanation(
target=display_names[0][1],
feature_weights=_weights(0),
score=score,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
return res | Explain prediction of a tree regressor.
See :func:`eli5.explain_prediction` for description of
``top``, ``top_targets``, ``target_names``, ``targets``,
``feature_names``, ``feature_re`` and ``feature_filter`` parameters.
``vec`` is a vectorizer instance used to transform
raw features to the input of the regressor ``reg``
(e.g. a fitted CountVectorizer instance); you can pass it
instead of ``feature_names``.
``vectorized`` is a flag which tells eli5 if ``doc`` should be
passed through ``vec`` or not. By default it is False, meaning that
if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
regressor. Set it to True if you're passing ``vec``,
but ``doc`` is already vectorized.
Method for determining feature importances follows an idea from
http://blog.datadive.net/interpreting-random-forests/.
Feature weights are calculated by following decision paths in trees
of an ensemble (or a single tree for DecisionTreeRegressor).
Each node of the tree has an output score, and contribution of a feature
on the decision path is how much the score changes from parent to child.
Weights of all features sum to the output score of the estimator. | Below is the the instruction that describes the task:
### Input:
Explain prediction of a tree regressor.
See :func:`eli5.explain_prediction` for description of
``top``, ``top_targets``, ``target_names``, ``targets``,
``feature_names``, ``feature_re`` and ``feature_filter`` parameters.
``vec`` is a vectorizer instance used to transform
raw features to the input of the regressor ``reg``
(e.g. a fitted CountVectorizer instance); you can pass it
instead of ``feature_names``.
``vectorized`` is a flag which tells eli5 if ``doc`` should be
passed through ``vec`` or not. By default it is False, meaning that
if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
regressor. Set it to True if you're passing ``vec``,
but ``doc`` is already vectorized.
Method for determining feature importances follows an idea from
http://blog.datadive.net/interpreting-random-forests/.
Feature weights are calculated by following decision paths in trees
of an ensemble (or a single tree for DecisionTreeRegressor).
Each node of the tree has an output score, and contribution of a feature
on the decision path is how much the score changes from parent to child.
Weights of all features sum to the output score of the estimator.
### Response:
def explain_prediction_tree_regressor(
reg, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False):
""" Explain prediction of a tree regressor.
See :func:`eli5.explain_prediction` for description of
``top``, ``top_targets``, ``target_names``, ``targets``,
``feature_names``, ``feature_re`` and ``feature_filter`` parameters.
``vec`` is a vectorizer instance used to transform
raw features to the input of the regressor ``reg``
(e.g. a fitted CountVectorizer instance); you can pass it
instead of ``feature_names``.
``vectorized`` is a flag which tells eli5 if ``doc`` should be
passed through ``vec`` or not. By default it is False, meaning that
if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
regressor. Set it to True if you're passing ``vec``,
but ``doc`` is already vectorized.
Method for determining feature importances follows an idea from
http://blog.datadive.net/interpreting-random-forests/.
Feature weights are calculated by following decision paths in trees
of an ensemble (or a single tree for DecisionTreeRegressor).
Each node of the tree has an output score, and contribution of a feature
on the decision path is how much the score changes from parent to child.
Weights of all features sum to the output score of the estimator.
"""
vec, feature_names = handle_vec(reg, doc, vec, vectorized, feature_names)
X = get_X(doc, vec=vec, vectorized=vectorized)
if feature_names.bias_name is None:
# Tree estimators do not have an intercept, but here we interpret
# them as having an intercept
feature_names.bias_name = '<BIAS>'
score, = reg.predict(X)
num_targets = getattr(reg, 'n_outputs_', 1)
is_multitarget = num_targets > 1
feature_weights = _trees_feature_weights(reg, X, feature_names, num_targets)
x = get_X0(add_intercept(X))
flt_feature_names, flt_indices = feature_names.handle_filter(
feature_filter, feature_re, x)
def _weights(label_id, scale=1.0):
weights = feature_weights[:, label_id]
return get_top_features_filtered(x, flt_feature_names, flt_indices,
weights, top, scale)
res = Explanation(
estimator=repr(reg),
method='decision path',
description=(DESCRIPTION_TREE_REG_MULTITARGET if is_multitarget
else DESCRIPTION_TREE_REG),
targets=[],
is_regression=True,
)
assert res.targets is not None
names = get_default_target_names(reg, num_targets=num_targets)
display_names = get_target_display_names(names, target_names, targets,
top_targets, score)
if is_multitarget:
for label_id, label in display_names:
target_expl = TargetExplanation(
target=label,
feature_weights=_weights(label_id),
score=score[label_id],
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
else:
target_expl = TargetExplanation(
target=display_names[0][1],
feature_weights=_weights(0),
score=score,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
return res |
def generate(env):
"""Add Builders and construction variables for jar to an Environment."""
SCons.Tool.CreateJarBuilder(env)
SCons.Tool.CreateJavaFileBuilder(env)
SCons.Tool.CreateJavaClassFileBuilder(env)
SCons.Tool.CreateJavaClassDirBuilder(env)
env.AddMethod(Jar)
env['JAR'] = 'jar'
env['JARFLAGS'] = SCons.Util.CLVar('cf')
env['_JARFLAGS'] = jarFlags
env['_JARMANIFEST'] = jarManifest
env['_JARSOURCES'] = jarSources
env['_JARCOM'] = '$JAR $_JARFLAGS $TARGET $_JARMANIFEST $_JARSOURCES'
env['JARCOM'] = "${TEMPFILE('$_JARCOM','$JARCOMSTR')}"
env['JARSUFFIX'] = '.jar' | Add Builders and construction variables for jar to an Environment. | Below is the the instruction that describes the task:
### Input:
Add Builders and construction variables for jar to an Environment.
### Response:
def generate(env):
"""Add Builders and construction variables for jar to an Environment."""
SCons.Tool.CreateJarBuilder(env)
SCons.Tool.CreateJavaFileBuilder(env)
SCons.Tool.CreateJavaClassFileBuilder(env)
SCons.Tool.CreateJavaClassDirBuilder(env)
env.AddMethod(Jar)
env['JAR'] = 'jar'
env['JARFLAGS'] = SCons.Util.CLVar('cf')
env['_JARFLAGS'] = jarFlags
env['_JARMANIFEST'] = jarManifest
env['_JARSOURCES'] = jarSources
env['_JARCOM'] = '$JAR $_JARFLAGS $TARGET $_JARMANIFEST $_JARSOURCES'
env['JARCOM'] = "${TEMPFILE('$_JARCOM','$JARCOMSTR')}"
env['JARSUFFIX'] = '.jar' |
def contains_value(self, value):
"""
Determines whether this map contains one or more keys for the specified value.
:param value: (object), the specified value.
:return: (bool), ``true`` if this map contains an entry for the specified value.
"""
check_not_none(value, "value can't be None")
value_data = self._to_data(value)
return self._encode_invoke(map_contains_value_codec, value=value_data) | Determines whether this map contains one or more keys for the specified value.
:param value: (object), the specified value.
:return: (bool), ``true`` if this map contains an entry for the specified value. | Below is the the instruction that describes the task:
### Input:
Determines whether this map contains one or more keys for the specified value.
:param value: (object), the specified value.
:return: (bool), ``true`` if this map contains an entry for the specified value.
### Response:
def contains_value(self, value):
"""
Determines whether this map contains one or more keys for the specified value.
:param value: (object), the specified value.
:return: (bool), ``true`` if this map contains an entry for the specified value.
"""
check_not_none(value, "value can't be None")
value_data = self._to_data(value)
return self._encode_invoke(map_contains_value_codec, value=value_data) |
def get_property(self, index, doctype, name):
"""
Returns a property of a given type
:return a mapped property
"""
return self.indices[index][doctype].properties[name] | Returns a property of a given type
:return a mapped property | Below is the the instruction that describes the task:
### Input:
Returns a property of a given type
:return a mapped property
### Response:
def get_property(self, index, doctype, name):
"""
Returns a property of a given type
:return a mapped property
"""
return self.indices[index][doctype].properties[name] |
def example_compute(self):
"""
简单的神经网络实现前向传播的算法
这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现
正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美
"""
a = matmul(self.x, self.w1)
y = matmul(a, self.w2)
sess = Session()
sess.run(self.w1.initializer)
sess.run(self.w2.initializer)
print('第一种:', sess.run(y))
sess.close() | 简单的神经网络实现前向传播的算法
这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现
正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美 | Below is the the instruction that describes the task:
### Input:
简单的神经网络实现前向传播的算法
这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现
正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美
### Response:
def example_compute(self):
"""
简单的神经网络实现前向传播的算法
这里为什么能用矩阵乘法来计算呢?因为,通过求加权和发现
正好是矩阵的乘法运算结果。所以,这里充分的体现了数学的美
"""
a = matmul(self.x, self.w1)
y = matmul(a, self.w2)
sess = Session()
sess.run(self.w1.initializer)
sess.run(self.w2.initializer)
print('第一种:', sess.run(y))
sess.close() |
def global_cache_write(key, val, appname='default'):
""" Writes cache files to a safe place in each operating system """
with GlobalShelfContext(appname) as shelf:
shelf[key] = val | Writes cache files to a safe place in each operating system | Below is the the instruction that describes the task:
### Input:
Writes cache files to a safe place in each operating system
### Response:
def global_cache_write(key, val, appname='default'):
""" Writes cache files to a safe place in each operating system """
with GlobalShelfContext(appname) as shelf:
shelf[key] = val |
def index(self, attr):
"""
Indexes the :class:`.Paper`\s in this :class:`.Corpus` instance
by the attribute ``attr``.
New indices are added to :attr:`.indices`\.
Parameters
----------
attr : str
The name of a :class:`.Paper` attribute.
"""
for i, paper in self.indexed_papers.iteritems():
self.index_paper_by_attr(paper, attr) | Indexes the :class:`.Paper`\s in this :class:`.Corpus` instance
by the attribute ``attr``.
New indices are added to :attr:`.indices`\.
Parameters
----------
attr : str
The name of a :class:`.Paper` attribute. | Below is the the instruction that describes the task:
### Input:
Indexes the :class:`.Paper`\s in this :class:`.Corpus` instance
by the attribute ``attr``.
New indices are added to :attr:`.indices`\.
Parameters
----------
attr : str
The name of a :class:`.Paper` attribute.
### Response:
def index(self, attr):
"""
Indexes the :class:`.Paper`\s in this :class:`.Corpus` instance
by the attribute ``attr``.
New indices are added to :attr:`.indices`\.
Parameters
----------
attr : str
The name of a :class:`.Paper` attribute.
"""
for i, paper in self.indexed_papers.iteritems():
self.index_paper_by_attr(paper, attr) |
def write_result_stream(result_stream, filename_prefix=None,
results_per_file=None, **kwargs):
"""
Wraps a ``ResultStream`` object to save it to a file. This function will still
return all data from the result stream as a generator that wraps the
``write_ndjson`` method.
Args:
result_stream (ResultStream): the unstarted ResultStream object
filename_prefix (str or None): the base name for file writing
results_per_file (int or None): the maximum number of tweets to write
per file. Defaults to having no max, which means one file. Multiple
files will be named by datetime, according to
``<prefix>_YYY-mm-ddTHH_MM_SS.json``.
"""
if isinstance(result_stream, types.GeneratorType):
stream = result_stream
else:
stream = result_stream.stream()
file_time_formatter = "%Y-%m-%dT%H_%M_%S"
if filename_prefix is None:
filename_prefix = "twitter_search_results"
if results_per_file:
logger.info("chunking result stream to files with {} tweets per file"
.format(results_per_file))
chunked_stream = partition(stream, results_per_file, pad_none=True)
for chunk in chunked_stream:
chunk = filter(lambda x: x is not None, chunk)
curr_datetime = (datetime.datetime.utcnow()
.strftime(file_time_formatter))
_filename = "{}_{}.json".format(filename_prefix, curr_datetime)
yield from write_ndjson(_filename, chunk)
else:
curr_datetime = (datetime.datetime.utcnow()
.strftime(file_time_formatter))
_filename = "{}.json".format(filename_prefix)
yield from write_ndjson(_filename, stream) | Wraps a ``ResultStream`` object to save it to a file. This function will still
return all data from the result stream as a generator that wraps the
``write_ndjson`` method.
Args:
result_stream (ResultStream): the unstarted ResultStream object
filename_prefix (str or None): the base name for file writing
results_per_file (int or None): the maximum number of tweets to write
per file. Defaults to having no max, which means one file. Multiple
files will be named by datetime, according to
``<prefix>_YYY-mm-ddTHH_MM_SS.json``. | Below is the the instruction that describes the task:
### Input:
Wraps a ``ResultStream`` object to save it to a file. This function will still
return all data from the result stream as a generator that wraps the
``write_ndjson`` method.
Args:
result_stream (ResultStream): the unstarted ResultStream object
filename_prefix (str or None): the base name for file writing
results_per_file (int or None): the maximum number of tweets to write
per file. Defaults to having no max, which means one file. Multiple
files will be named by datetime, according to
``<prefix>_YYY-mm-ddTHH_MM_SS.json``.
### Response:
def write_result_stream(result_stream, filename_prefix=None,
results_per_file=None, **kwargs):
"""
Wraps a ``ResultStream`` object to save it to a file. This function will still
return all data from the result stream as a generator that wraps the
``write_ndjson`` method.
Args:
result_stream (ResultStream): the unstarted ResultStream object
filename_prefix (str or None): the base name for file writing
results_per_file (int or None): the maximum number of tweets to write
per file. Defaults to having no max, which means one file. Multiple
files will be named by datetime, according to
``<prefix>_YYY-mm-ddTHH_MM_SS.json``.
"""
if isinstance(result_stream, types.GeneratorType):
stream = result_stream
else:
stream = result_stream.stream()
file_time_formatter = "%Y-%m-%dT%H_%M_%S"
if filename_prefix is None:
filename_prefix = "twitter_search_results"
if results_per_file:
logger.info("chunking result stream to files with {} tweets per file"
.format(results_per_file))
chunked_stream = partition(stream, results_per_file, pad_none=True)
for chunk in chunked_stream:
chunk = filter(lambda x: x is not None, chunk)
curr_datetime = (datetime.datetime.utcnow()
.strftime(file_time_formatter))
_filename = "{}_{}.json".format(filename_prefix, curr_datetime)
yield from write_ndjson(_filename, chunk)
else:
curr_datetime = (datetime.datetime.utcnow()
.strftime(file_time_formatter))
_filename = "{}.json".format(filename_prefix)
yield from write_ndjson(_filename, stream) |
def get_color_name(value):
"""Return color name depending on value type"""
if not is_known_type(value):
return CUSTOM_TYPE_COLOR
for typ, name in list(COLORS.items()):
if isinstance(value, typ):
return name
else:
np_dtype = get_numpy_dtype(value)
if np_dtype is None or not hasattr(value, 'size'):
return UNSUPPORTED_COLOR
elif value.size == 1:
return SCALAR_COLOR
else:
return ARRAY_COLOR | Return color name depending on value type | Below is the the instruction that describes the task:
### Input:
Return color name depending on value type
### Response:
def get_color_name(value):
"""Return color name depending on value type"""
if not is_known_type(value):
return CUSTOM_TYPE_COLOR
for typ, name in list(COLORS.items()):
if isinstance(value, typ):
return name
else:
np_dtype = get_numpy_dtype(value)
if np_dtype is None or not hasattr(value, 'size'):
return UNSUPPORTED_COLOR
elif value.size == 1:
return SCALAR_COLOR
else:
return ARRAY_COLOR |
def list_subgroups_accounts(self, id, account_id):
"""
List subgroups.
List the immediate OutcomeGroup children of the outcome group. Paginated.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("GET /api/v1/accounts/{account_id}/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, all_pages=True) | List subgroups.
List the immediate OutcomeGroup children of the outcome group. Paginated. | Below is the the instruction that describes the task:
### Input:
List subgroups.
List the immediate OutcomeGroup children of the outcome group. Paginated.
### Response:
def list_subgroups_accounts(self, id, account_id):
"""
List subgroups.
List the immediate OutcomeGroup children of the outcome group. Paginated.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("GET /api/v1/accounts/{account_id}/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, all_pages=True) |
def update_instance(self, data):
"""Update a single record by id with the provided data.
Args:
data (dict): The new data to update the record with.
Returns:
self: This is an instance of itself with the updated data.
Raises:
AttributeError: This is raised if a key in the ``data`` isn't
a field on the model.
"""
for key, val in iteritems(data):
if not hasattr(self, key):
raise AttributeError(
"No field named {key} for model {model}".format(
key=key,
model=self.__class__.__name__
)
)
setattr(self, key, val)
self.save()
return self | Update a single record by id with the provided data.
Args:
data (dict): The new data to update the record with.
Returns:
self: This is an instance of itself with the updated data.
Raises:
AttributeError: This is raised if a key in the ``data`` isn't
a field on the model. | Below is the the instruction that describes the task:
### Input:
Update a single record by id with the provided data.
Args:
data (dict): The new data to update the record with.
Returns:
self: This is an instance of itself with the updated data.
Raises:
AttributeError: This is raised if a key in the ``data`` isn't
a field on the model.
### Response:
def update_instance(self, data):
"""Update a single record by id with the provided data.
Args:
data (dict): The new data to update the record with.
Returns:
self: This is an instance of itself with the updated data.
Raises:
AttributeError: This is raised if a key in the ``data`` isn't
a field on the model.
"""
for key, val in iteritems(data):
if not hasattr(self, key):
raise AttributeError(
"No field named {key} for model {model}".format(
key=key,
model=self.__class__.__name__
)
)
setattr(self, key, val)
self.save()
return self |
def _idle(self):
"""Put I2C lines into idle state."""
# Put the I2C lines into an idle state with SCL and SDA high.
self._ft232h.setup_pins({0: GPIO.OUT, 1: GPIO.OUT, 2: GPIO.IN},
{0: GPIO.HIGH, 1: GPIO.HIGH}) | Put I2C lines into idle state. | Below is the the instruction that describes the task:
### Input:
Put I2C lines into idle state.
### Response:
def _idle(self):
"""Put I2C lines into idle state."""
# Put the I2C lines into an idle state with SCL and SDA high.
self._ft232h.setup_pins({0: GPIO.OUT, 1: GPIO.OUT, 2: GPIO.IN},
{0: GPIO.HIGH, 1: GPIO.HIGH}) |
def som_create(rows, cols, conn_type, parameters):
"""!
@brief Create of self-organized map using CCORE pyclustering library.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@return (POINTER) C-pointer to object of self-organized feature in memory.
"""
ccore = ccore_library.get()
c_params = c_som_parameters()
c_params.init_type = parameters.init_type
c_params.init_radius = parameters.init_radius
c_params.init_learn_rate = parameters.init_learn_rate
c_params.adaptation_threshold = parameters.adaptation_threshold
ccore.som_create.restype = POINTER(c_void_p)
som_pointer = ccore.som_create(c_uint(rows), c_uint(cols), c_uint(conn_type), pointer(c_params))
return som_pointer | !
@brief Create of self-organized map using CCORE pyclustering library.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@return (POINTER) C-pointer to object of self-organized feature in memory. | Below is the the instruction that describes the task:
### Input:
!
@brief Create of self-organized map using CCORE pyclustering library.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@return (POINTER) C-pointer to object of self-organized feature in memory.
### Response:
def som_create(rows, cols, conn_type, parameters):
"""!
@brief Create of self-organized map using CCORE pyclustering library.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@return (POINTER) C-pointer to object of self-organized feature in memory.
"""
ccore = ccore_library.get()
c_params = c_som_parameters()
c_params.init_type = parameters.init_type
c_params.init_radius = parameters.init_radius
c_params.init_learn_rate = parameters.init_learn_rate
c_params.adaptation_threshold = parameters.adaptation_threshold
ccore.som_create.restype = POINTER(c_void_p)
som_pointer = ccore.som_create(c_uint(rows), c_uint(cols), c_uint(conn_type), pointer(c_params))
return som_pointer |
def compile_string(string, compiler_class=Compiler, **kwargs):
"""Compile a single string, and return a string of CSS.
Keyword arguments are passed along to the underlying `Compiler`.
"""
compiler = compiler_class(**kwargs)
return compiler.compile_string(string) | Compile a single string, and return a string of CSS.
Keyword arguments are passed along to the underlying `Compiler`. | Below is the the instruction that describes the task:
### Input:
Compile a single string, and return a string of CSS.
Keyword arguments are passed along to the underlying `Compiler`.
### Response:
def compile_string(string, compiler_class=Compiler, **kwargs):
"""Compile a single string, and return a string of CSS.
Keyword arguments are passed along to the underlying `Compiler`.
"""
compiler = compiler_class(**kwargs)
return compiler.compile_string(string) |
def get_parameters_by_location(self, locations=None, excludes=None):
""" Get parameters list by location
:param locations: list of locations
:type locations: list or None
:param excludes: list of excludes locations
:type excludes: list or None
:return: list of Parameter
:rtype: list
"""
result = self.parameters
if locations:
result = filter(lambda x: x.location_in in locations, result)
if excludes:
result = filter(lambda x: x.location_in not in excludes, result)
return list(result) | Get parameters list by location
:param locations: list of locations
:type locations: list or None
:param excludes: list of excludes locations
:type excludes: list or None
:return: list of Parameter
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get parameters list by location
:param locations: list of locations
:type locations: list or None
:param excludes: list of excludes locations
:type excludes: list or None
:return: list of Parameter
:rtype: list
### Response:
def get_parameters_by_location(self, locations=None, excludes=None):
""" Get parameters list by location
:param locations: list of locations
:type locations: list or None
:param excludes: list of excludes locations
:type excludes: list or None
:return: list of Parameter
:rtype: list
"""
result = self.parameters
if locations:
result = filter(lambda x: x.location_in in locations, result)
if excludes:
result = filter(lambda x: x.location_in not in excludes, result)
return list(result) |
def x_plus(self, dx=None):
""" Mutable x addition. Defaults to set delta value. """
if dx is None:
self.x += self.dx
else:
self.x = self.x + dx | Mutable x addition. Defaults to set delta value. | Below is the the instruction that describes the task:
### Input:
Mutable x addition. Defaults to set delta value.
### Response:
def x_plus(self, dx=None):
""" Mutable x addition. Defaults to set delta value. """
if dx is None:
self.x += self.dx
else:
self.x = self.x + dx |
def landsat_c1_sr_cloud_mask(input_img, cloud_confidence=3, snow_flag=False):
"""Extract cloud mask from the Landsat Collection 1 SR pixel_qa band
Parameters
----------
img : ee.Image
Image from a Landsat Collection 1 SR image collection with a pixel_qa
band (e.g. LANDSAT/LE07/C01/T1_SR).
cloud_confidence : int
Minimum cloud confidence value (the default is 3).
snow_flag : bool
If true, mask snow pixels (the default is False).
Returns
-------
ee.Image
Notes
-----
Output image is structured to be applied directly with updateMask()
i.e. 0 is cloud, 1 is cloud free
Assuming Cloud must be set to check Cloud Confidence
Bits
0: Fill
1: Clear
2: Water
3: Cloud Shadow
4: Snow
5: Cloud
6-7: Cloud Confidence
Confidence values
00: "None"
01: "Low"
10: "Medium"
11: "High"
References
----------
https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment
"""
qa_img = input_img.select(['pixel_qa'])
cloud_mask = qa_img.rightShift(5).bitwiseAnd(1).neq(0)\
.And(qa_img.rightShift(6).bitwiseAnd(3).gte(cloud_confidence))\
.Or(qa_img.rightShift(3).bitwiseAnd(1).neq(0))
if snow_flag:
cloud_mask = cloud_mask.Or(qa_img.rightShift(4).bitwiseAnd(1).neq(0))
# Set cloudy pixels to 0 and clear to 1
return cloud_mask.Not() | Extract cloud mask from the Landsat Collection 1 SR pixel_qa band
Parameters
----------
img : ee.Image
Image from a Landsat Collection 1 SR image collection with a pixel_qa
band (e.g. LANDSAT/LE07/C01/T1_SR).
cloud_confidence : int
Minimum cloud confidence value (the default is 3).
snow_flag : bool
If true, mask snow pixels (the default is False).
Returns
-------
ee.Image
Notes
-----
Output image is structured to be applied directly with updateMask()
i.e. 0 is cloud, 1 is cloud free
Assuming Cloud must be set to check Cloud Confidence
Bits
0: Fill
1: Clear
2: Water
3: Cloud Shadow
4: Snow
5: Cloud
6-7: Cloud Confidence
Confidence values
00: "None"
01: "Low"
10: "Medium"
11: "High"
References
----------
https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment | Below is the the instruction that describes the task:
### Input:
Extract cloud mask from the Landsat Collection 1 SR pixel_qa band
Parameters
----------
img : ee.Image
Image from a Landsat Collection 1 SR image collection with a pixel_qa
band (e.g. LANDSAT/LE07/C01/T1_SR).
cloud_confidence : int
Minimum cloud confidence value (the default is 3).
snow_flag : bool
If true, mask snow pixels (the default is False).
Returns
-------
ee.Image
Notes
-----
Output image is structured to be applied directly with updateMask()
i.e. 0 is cloud, 1 is cloud free
Assuming Cloud must be set to check Cloud Confidence
Bits
0: Fill
1: Clear
2: Water
3: Cloud Shadow
4: Snow
5: Cloud
6-7: Cloud Confidence
Confidence values
00: "None"
01: "Low"
10: "Medium"
11: "High"
References
----------
https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment
### Response:
def landsat_c1_sr_cloud_mask(input_img, cloud_confidence=3, snow_flag=False):
"""Extract cloud mask from the Landsat Collection 1 SR pixel_qa band
Parameters
----------
img : ee.Image
Image from a Landsat Collection 1 SR image collection with a pixel_qa
band (e.g. LANDSAT/LE07/C01/T1_SR).
cloud_confidence : int
Minimum cloud confidence value (the default is 3).
snow_flag : bool
If true, mask snow pixels (the default is False).
Returns
-------
ee.Image
Notes
-----
Output image is structured to be applied directly with updateMask()
i.e. 0 is cloud, 1 is cloud free
Assuming Cloud must be set to check Cloud Confidence
Bits
0: Fill
1: Clear
2: Water
3: Cloud Shadow
4: Snow
5: Cloud
6-7: Cloud Confidence
Confidence values
00: "None"
01: "Low"
10: "Medium"
11: "High"
References
----------
https://landsat.usgs.gov/landsat-surface-reflectance-quality-assessment
"""
qa_img = input_img.select(['pixel_qa'])
cloud_mask = qa_img.rightShift(5).bitwiseAnd(1).neq(0)\
.And(qa_img.rightShift(6).bitwiseAnd(3).gte(cloud_confidence))\
.Or(qa_img.rightShift(3).bitwiseAnd(1).neq(0))
if snow_flag:
cloud_mask = cloud_mask.Or(qa_img.rightShift(4).bitwiseAnd(1).neq(0))
# Set cloudy pixels to 0 and clear to 1
return cloud_mask.Not() |
def queryset_formatter(queryset):
"""
This is used for custom detail fields returning a QuerySet of
admin objects.
"""
return Markup(
base_list_formatter(
None,
[
'<a href="{}">{}</a>'.format(u.get_admin_url(_external=True), u)
for u in queryset
],
)
) | This is used for custom detail fields returning a QuerySet of
admin objects. | Below is the the instruction that describes the task:
### Input:
This is used for custom detail fields returning a QuerySet of
admin objects.
### Response:
def queryset_formatter(queryset):
"""
This is used for custom detail fields returning a QuerySet of
admin objects.
"""
return Markup(
base_list_formatter(
None,
[
'<a href="{}">{}</a>'.format(u.get_admin_url(_external=True), u)
for u in queryset
],
)
) |
def _format_line(self, side, flag, linenum, text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side], linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text = text.replace("&", "&"). \
replace(">", ">"). \
replace("<", "<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ', ' ').rstrip()
color = ''
if '\0^' in text or '\0+' in text or '\0-' in text:
color = ';background-color:{0}'
if side == 0:
color = color.format('#ffe6e6')
else:
color = color.format('#e3ffe3')
return self.TD_DIFF_HEADER.format(id, linenum, color, text) | Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up | Below is the the instruction that describes the task:
### Input:
Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
### Response:
def _format_line(self, side, flag, linenum, text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side], linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text = text.replace("&", "&"). \
replace(">", ">"). \
replace("<", "<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ', ' ').rstrip()
color = ''
if '\0^' in text or '\0+' in text or '\0-' in text:
color = ';background-color:{0}'
if side == 0:
color = color.format('#ffe6e6')
else:
color = color.format('#e3ffe3')
return self.TD_DIFF_HEADER.format(id, linenum, color, text) |
def decode(self):
"""Decode this report from a msgpack encoded binary blob."""
report_dict = msgpack.unpackb(self.raw_report, raw=False)
events = [IOTileEvent.FromDict(x) for x in report_dict.get('events', [])]
readings = [IOTileReading.FromDict(x) for x in report_dict.get('data', [])]
if 'device' not in report_dict:
raise DataError("Invalid encoded FlexibleDictionaryReport that did not "
"have a device key set with the device uuid")
self.origin = report_dict['device']
self.report_id = report_dict.get("incremental_id", IOTileReading.InvalidReadingID)
self.sent_timestamp = report_dict.get("device_sent_timestamp", 0)
self.origin_streamer = report_dict.get("streamer_index")
self.streamer_selector = report_dict.get("streamer_selector")
self.lowest_id = report_dict.get('lowest_id')
self.highest_id = report_dict.get('highest_id')
return readings, events | Decode this report from a msgpack encoded binary blob. | Below is the the instruction that describes the task:
### Input:
Decode this report from a msgpack encoded binary blob.
### Response:
def decode(self):
"""Decode this report from a msgpack encoded binary blob."""
report_dict = msgpack.unpackb(self.raw_report, raw=False)
events = [IOTileEvent.FromDict(x) for x in report_dict.get('events', [])]
readings = [IOTileReading.FromDict(x) for x in report_dict.get('data', [])]
if 'device' not in report_dict:
raise DataError("Invalid encoded FlexibleDictionaryReport that did not "
"have a device key set with the device uuid")
self.origin = report_dict['device']
self.report_id = report_dict.get("incremental_id", IOTileReading.InvalidReadingID)
self.sent_timestamp = report_dict.get("device_sent_timestamp", 0)
self.origin_streamer = report_dict.get("streamer_index")
self.streamer_selector = report_dict.get("streamer_selector")
self.lowest_id = report_dict.get('lowest_id')
self.highest_id = report_dict.get('highest_id')
return readings, events |
def _get_deadline(results, timeout=None):
""" returns the earliest deadline point in time """
start_time = time()
all_deadlines = set(result.get_deadline() for result in results)
all_deadlines.discard(None)
if timeout is not None:
all_deadlines.add(start_time + timeout)
return min(all_deadlines) if all_deadlines else None | returns the earliest deadline point in time | Below is the the instruction that describes the task:
### Input:
returns the earliest deadline point in time
### Response:
def _get_deadline(results, timeout=None):
""" returns the earliest deadline point in time """
start_time = time()
all_deadlines = set(result.get_deadline() for result in results)
all_deadlines.discard(None)
if timeout is not None:
all_deadlines.add(start_time + timeout)
return min(all_deadlines) if all_deadlines else None |
def encode_item_link(item_id, number=1, skin_id=None,
upgrade1=None, upgrade2=None):
"""Encode a chat link for an item (or a stack of items).
:param item_id: the Id of the item
:param number: the number of items in the stack
:param skin_id: the id of the skin applied to the item
:param upgrade1: the id of the first upgrade component
:param upgrade2: the id of the second upgrade component
"""
return encode_chat_link(gw2api.TYPE_ITEM, id=item_id, number=number,
skin_id=skin_id, upgrade1=upgrade1,
upgrade2=upgrade2) | Encode a chat link for an item (or a stack of items).
:param item_id: the Id of the item
:param number: the number of items in the stack
:param skin_id: the id of the skin applied to the item
:param upgrade1: the id of the first upgrade component
:param upgrade2: the id of the second upgrade component | Below is the the instruction that describes the task:
### Input:
Encode a chat link for an item (or a stack of items).
:param item_id: the Id of the item
:param number: the number of items in the stack
:param skin_id: the id of the skin applied to the item
:param upgrade1: the id of the first upgrade component
:param upgrade2: the id of the second upgrade component
### Response:
def encode_item_link(item_id, number=1, skin_id=None,
upgrade1=None, upgrade2=None):
"""Encode a chat link for an item (or a stack of items).
:param item_id: the Id of the item
:param number: the number of items in the stack
:param skin_id: the id of the skin applied to the item
:param upgrade1: the id of the first upgrade component
:param upgrade2: the id of the second upgrade component
"""
return encode_chat_link(gw2api.TYPE_ITEM, id=item_id, number=number,
skin_id=skin_id, upgrade1=upgrade1,
upgrade2=upgrade2) |
def static_partial_tile_sizes(width, height, tilesize, scale_factors):
"""Generator for partial tile sizes for zoomed in views.
Positional arguments:
width -- width of full size image
height -- height of full size image
tilesize -- width and height of tiles
scale_factors -- iterable of scale factors, typically [1,2,4..]
Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile
"""
for sf in scale_factors:
if (sf * tilesize >= width and sf * tilesize >= height):
continue # avoid any full-region tiles
rts = tilesize * sf # tile size in original region
xt = (width - 1) // rts + 1
yt = (height - 1) // rts + 1
for nx in range(xt):
rx = nx * rts
rxe = rx + rts
if (rxe > width):
rxe = width
rw = rxe - rx
# same as sw = int(math.ceil(rw/float(sf)))
sw = (rw + sf - 1) // sf
for ny in range(yt):
ry = ny * rts
rye = ry + rts
if (rye > height):
rye = height
rh = rye - ry
# same as sh = int(math.ceil(rh/float(sf)))
sh = (rh + sf - 1) // sf
yield([rx, ry, rw, rh], [sw, sh]) | Generator for partial tile sizes for zoomed in views.
Positional arguments:
width -- width of full size image
height -- height of full size image
tilesize -- width and height of tiles
scale_factors -- iterable of scale factors, typically [1,2,4..]
Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile | Below is the the instruction that describes the task:
### Input:
Generator for partial tile sizes for zoomed in views.
Positional arguments:
width -- width of full size image
height -- height of full size image
tilesize -- width and height of tiles
scale_factors -- iterable of scale factors, typically [1,2,4..]
Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile
### Response:
def static_partial_tile_sizes(width, height, tilesize, scale_factors):
"""Generator for partial tile sizes for zoomed in views.
Positional arguments:
width -- width of full size image
height -- height of full size image
tilesize -- width and height of tiles
scale_factors -- iterable of scale factors, typically [1,2,4..]
Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile
"""
for sf in scale_factors:
if (sf * tilesize >= width and sf * tilesize >= height):
continue # avoid any full-region tiles
rts = tilesize * sf # tile size in original region
xt = (width - 1) // rts + 1
yt = (height - 1) // rts + 1
for nx in range(xt):
rx = nx * rts
rxe = rx + rts
if (rxe > width):
rxe = width
rw = rxe - rx
# same as sw = int(math.ceil(rw/float(sf)))
sw = (rw + sf - 1) // sf
for ny in range(yt):
ry = ny * rts
rye = ry + rts
if (rye > height):
rye = height
rh = rye - ry
# same as sh = int(math.ceil(rh/float(sf)))
sh = (rh + sf - 1) // sf
yield([rx, ry, rw, rh], [sw, sh]) |
def _canon_decode_camera_info(self, camera_info_tag):
"""
Decode the variable length encoded camera info section.
"""
model = self.tags.get('Image Model', None)
if not model:
return
model = str(model.values)
camera_info_tags = None
for (model_name_re, tag_desc) in makernote.canon.CAMERA_INFO_MODEL_MAP.items():
if re.search(model_name_re, model):
camera_info_tags = tag_desc
break
else:
return
# We are assuming here that these are all unsigned bytes (Byte or
# Unknown)
if camera_info_tag.field_type not in (1, 7):
return
camera_info = struct.pack('<%dB' % len(camera_info_tag.values),
*camera_info_tag.values)
# Look for each data value and decode it appropriately.
for offset, tag in camera_info_tags.items():
tag_format = tag[1]
tag_size = struct.calcsize(tag_format)
if len(camera_info) < offset + tag_size:
continue
packed_tag_value = camera_info[offset:offset + tag_size]
tag_value = struct.unpack(tag_format, packed_tag_value)[0]
tag_name = tag[0]
if len(tag) > 2:
if callable(tag[2]):
tag_value = tag[2](tag_value)
else:
tag_value = tag[2].get(tag_value, tag_value)
logger.debug(" %s %s", tag_name, tag_value)
self.tags['MakerNote ' + tag_name] = IfdTag(str(tag_value), None,
0, None, None, None) | Decode the variable length encoded camera info section. | Below is the the instruction that describes the task:
### Input:
Decode the variable length encoded camera info section.
### Response:
def _canon_decode_camera_info(self, camera_info_tag):
"""
Decode the variable length encoded camera info section.
"""
model = self.tags.get('Image Model', None)
if not model:
return
model = str(model.values)
camera_info_tags = None
for (model_name_re, tag_desc) in makernote.canon.CAMERA_INFO_MODEL_MAP.items():
if re.search(model_name_re, model):
camera_info_tags = tag_desc
break
else:
return
# We are assuming here that these are all unsigned bytes (Byte or
# Unknown)
if camera_info_tag.field_type not in (1, 7):
return
camera_info = struct.pack('<%dB' % len(camera_info_tag.values),
*camera_info_tag.values)
# Look for each data value and decode it appropriately.
for offset, tag in camera_info_tags.items():
tag_format = tag[1]
tag_size = struct.calcsize(tag_format)
if len(camera_info) < offset + tag_size:
continue
packed_tag_value = camera_info[offset:offset + tag_size]
tag_value = struct.unpack(tag_format, packed_tag_value)[0]
tag_name = tag[0]
if len(tag) > 2:
if callable(tag[2]):
tag_value = tag[2](tag_value)
else:
tag_value = tag[2].get(tag_value, tag_value)
logger.debug(" %s %s", tag_name, tag_value)
self.tags['MakerNote ' + tag_name] = IfdTag(str(tag_value), None,
0, None, None, None) |
def percentage(part, whole, resolution=2):
"""Calculates the percentage of a number, given a part and a whole
ie: 10 is what percent of 25 --> 40%
Args:
part (float): The part of a number
whole (float): The whole of a number
resolution (int): Number of decimal places (Default is 2)
Returns:
float: The percentage of a number
or
int: The percentage of a number (if resolution is zero or a negative number)
Example:
>>> percentage(10, 25)
40.0
>>> percentage(5, 19, 3)
26.316
"""
if whole == 0:
raise ZeroDivisionError
percent = 100 * float(part)/float(whole)
return round(percent, resolution) if resolution >=1 else int(percent) | Calculates the percentage of a number, given a part and a whole
ie: 10 is what percent of 25 --> 40%
Args:
part (float): The part of a number
whole (float): The whole of a number
resolution (int): Number of decimal places (Default is 2)
Returns:
float: The percentage of a number
or
int: The percentage of a number (if resolution is zero or a negative number)
Example:
>>> percentage(10, 25)
40.0
>>> percentage(5, 19, 3)
26.316 | Below is the the instruction that describes the task:
### Input:
Calculates the percentage of a number, given a part and a whole
ie: 10 is what percent of 25 --> 40%
Args:
part (float): The part of a number
whole (float): The whole of a number
resolution (int): Number of decimal places (Default is 2)
Returns:
float: The percentage of a number
or
int: The percentage of a number (if resolution is zero or a negative number)
Example:
>>> percentage(10, 25)
40.0
>>> percentage(5, 19, 3)
26.316
### Response:
def percentage(part, whole, resolution=2):
"""Calculates the percentage of a number, given a part and a whole
ie: 10 is what percent of 25 --> 40%
Args:
part (float): The part of a number
whole (float): The whole of a number
resolution (int): Number of decimal places (Default is 2)
Returns:
float: The percentage of a number
or
int: The percentage of a number (if resolution is zero or a negative number)
Example:
>>> percentage(10, 25)
40.0
>>> percentage(5, 19, 3)
26.316
"""
if whole == 0:
raise ZeroDivisionError
percent = 100 * float(part)/float(whole)
return round(percent, resolution) if resolution >=1 else int(percent) |
def _key(self):
""" Generates the Key object based on dimension fields. """
return Key(self._schema.key_type, self._identity, self._name,
[str(item.value) for item in self._dimension_fields.values()]) | Generates the Key object based on dimension fields. | Below is the the instruction that describes the task:
### Input:
Generates the Key object based on dimension fields.
### Response:
def _key(self):
""" Generates the Key object based on dimension fields. """
return Key(self._schema.key_type, self._identity, self._name,
[str(item.value) for item in self._dimension_fields.values()]) |
def lock_variable(self, key, block=False):
"""Locks a global variable
:param key: the key of the global variable to be locked
:param block: a flag to specify if to wait for locking the variable in blocking mode
"""
key = str(key)
# watch out for releasing the __dictionary_lock properly
try:
if key in self.__variable_locks:
# acquire without arguments is blocking
lock_successful = self.__variable_locks[key].acquire(False)
if lock_successful or block:
if (not lock_successful) and block: # case: lock could not be acquired => wait for it as block=True
duration = 0.
loop_time = 0.1
while not self.__variable_locks[key].acquire(False):
time.sleep(loop_time)
duration += loop_time
if int(duration*10) % 20 == 0:
# while loops informs the user about long locked variables
logger.verbose("Variable '{2}' is locked and thread {0} waits already {1} seconds to "
"access it.".format(currentThread(), duration, key))
access_key = global_variable_id_generator()
self.__access_keys[key] = access_key
return access_key
else:
logger.warning("Global variable {} already locked".format(str(key)))
return False
else:
logger.error("Global variable key {} does not exist".format(str(key)))
return False
except Exception as e:
logger.error("Exception thrown: {}".format(str(e)))
return False | Locks a global variable
:param key: the key of the global variable to be locked
:param block: a flag to specify if to wait for locking the variable in blocking mode | Below is the the instruction that describes the task:
### Input:
Locks a global variable
:param key: the key of the global variable to be locked
:param block: a flag to specify if to wait for locking the variable in blocking mode
### Response:
def lock_variable(self, key, block=False):
"""Locks a global variable
:param key: the key of the global variable to be locked
:param block: a flag to specify if to wait for locking the variable in blocking mode
"""
key = str(key)
# watch out for releasing the __dictionary_lock properly
try:
if key in self.__variable_locks:
# acquire without arguments is blocking
lock_successful = self.__variable_locks[key].acquire(False)
if lock_successful or block:
if (not lock_successful) and block: # case: lock could not be acquired => wait for it as block=True
duration = 0.
loop_time = 0.1
while not self.__variable_locks[key].acquire(False):
time.sleep(loop_time)
duration += loop_time
if int(duration*10) % 20 == 0:
# while loops informs the user about long locked variables
logger.verbose("Variable '{2}' is locked and thread {0} waits already {1} seconds to "
"access it.".format(currentThread(), duration, key))
access_key = global_variable_id_generator()
self.__access_keys[key] = access_key
return access_key
else:
logger.warning("Global variable {} already locked".format(str(key)))
return False
else:
logger.error("Global variable key {} does not exist".format(str(key)))
return False
except Exception as e:
logger.error("Exception thrown: {}".format(str(e)))
return False |
def add_tandems(mcscanfile, tandemfile):
"""
add tandem genes to anchor genes in mcscan file
"""
tandems = [f.strip().split(",") for f in file(tandemfile)]
fw = must_open(mcscanfile+".withtandems", "w")
fp = must_open(mcscanfile)
seen =set()
for i, row in enumerate(fp):
if row[0] == '#':
continue
anchorslist = row.strip().split("\t")
anchors = set([a.split(",")[0] for a in anchorslist])
anchors.remove(".")
if anchors & seen == anchors:
continue
newanchors = []
for a in anchorslist:
if a == ".":
newanchors.append(a)
continue
for t in tandems:
if a in t:
newanchors.append(",".join(t))
seen.update(t)
break
else:
newanchors.append(a)
seen.add(a)
print("\t".join(newanchors), file=fw)
fw.close()
newmcscanfile = merge_rows_local(fw.name)
logging.debug("Tandems added to `{0}`. Results in `{1}`".\
format(mcscanfile, newmcscanfile))
fp.seek(0)
logging.debug("{0} rows merged to {1} rows".\
format(len(fp.readlines()), len(file(newmcscanfile).readlines())))
sh("rm %s" % fw.name)
return newmcscanfile | add tandem genes to anchor genes in mcscan file | Below is the the instruction that describes the task:
### Input:
add tandem genes to anchor genes in mcscan file
### Response:
def add_tandems(mcscanfile, tandemfile):
"""
add tandem genes to anchor genes in mcscan file
"""
tandems = [f.strip().split(",") for f in file(tandemfile)]
fw = must_open(mcscanfile+".withtandems", "w")
fp = must_open(mcscanfile)
seen =set()
for i, row in enumerate(fp):
if row[0] == '#':
continue
anchorslist = row.strip().split("\t")
anchors = set([a.split(",")[0] for a in anchorslist])
anchors.remove(".")
if anchors & seen == anchors:
continue
newanchors = []
for a in anchorslist:
if a == ".":
newanchors.append(a)
continue
for t in tandems:
if a in t:
newanchors.append(",".join(t))
seen.update(t)
break
else:
newanchors.append(a)
seen.add(a)
print("\t".join(newanchors), file=fw)
fw.close()
newmcscanfile = merge_rows_local(fw.name)
logging.debug("Tandems added to `{0}`. Results in `{1}`".\
format(mcscanfile, newmcscanfile))
fp.seek(0)
logging.debug("{0} rows merged to {1} rows".\
format(len(fp.readlines()), len(file(newmcscanfile).readlines())))
sh("rm %s" % fw.name)
return newmcscanfile |
def local_position_ned_system_global_offset_send(self, time_boot_ms, x, y, z, roll, pitch, yaw, force_mavlink1=False):
'''
The offset in X, Y, Z and yaw between the LOCAL_POSITION_NED messages
of MAV X and the global coordinate frame in NED
coordinates. Coordinate frame is right-handed, Z-axis
down (aeronautical frame, NED / north-east-down
convention)
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
x : X Position (float)
y : Y Position (float)
z : Z Position (float)
roll : Roll (float)
pitch : Pitch (float)
yaw : Yaw (float)
'''
return self.send(self.local_position_ned_system_global_offset_encode(time_boot_ms, x, y, z, roll, pitch, yaw), force_mavlink1=force_mavlink1) | The offset in X, Y, Z and yaw between the LOCAL_POSITION_NED messages
of MAV X and the global coordinate frame in NED
coordinates. Coordinate frame is right-handed, Z-axis
down (aeronautical frame, NED / north-east-down
convention)
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
x : X Position (float)
y : Y Position (float)
z : Z Position (float)
roll : Roll (float)
pitch : Pitch (float)
yaw : Yaw (float) | Below is the the instruction that describes the task:
### Input:
The offset in X, Y, Z and yaw between the LOCAL_POSITION_NED messages
of MAV X and the global coordinate frame in NED
coordinates. Coordinate frame is right-handed, Z-axis
down (aeronautical frame, NED / north-east-down
convention)
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
x : X Position (float)
y : Y Position (float)
z : Z Position (float)
roll : Roll (float)
pitch : Pitch (float)
yaw : Yaw (float)
### Response:
def local_position_ned_system_global_offset_send(self, time_boot_ms, x, y, z, roll, pitch, yaw, force_mavlink1=False):
'''
The offset in X, Y, Z and yaw between the LOCAL_POSITION_NED messages
of MAV X and the global coordinate frame in NED
coordinates. Coordinate frame is right-handed, Z-axis
down (aeronautical frame, NED / north-east-down
convention)
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
x : X Position (float)
y : Y Position (float)
z : Z Position (float)
roll : Roll (float)
pitch : Pitch (float)
yaw : Yaw (float)
'''
return self.send(self.local_position_ned_system_global_offset_encode(time_boot_ms, x, y, z, roll, pitch, yaw), force_mavlink1=force_mavlink1) |
def _lookup_version(module_file):
"""
For the given module file (usually found by:
from package import __file__ as module_file
in the caller, return the location of
the current RELEASE-VERSION file and the file
itself.
"""
version_dir = path.abspath(path.dirname(module_file))
version_file = path.join(version_dir, "RELEASE-VERSION")
return version_dir, version_file | For the given module file (usually found by:
from package import __file__ as module_file
in the caller, return the location of
the current RELEASE-VERSION file and the file
itself. | Below is the the instruction that describes the task:
### Input:
For the given module file (usually found by:
from package import __file__ as module_file
in the caller, return the location of
the current RELEASE-VERSION file and the file
itself.
### Response:
def _lookup_version(module_file):
"""
For the given module file (usually found by:
from package import __file__ as module_file
in the caller, return the location of
the current RELEASE-VERSION file and the file
itself.
"""
version_dir = path.abspath(path.dirname(module_file))
version_file = path.join(version_dir, "RELEASE-VERSION")
return version_dir, version_file |
def extract_arguments(frame):
"""
Extracts the arguments from given frame.
:param frame: Frame.
:type frame: object
:return: Arguments.
:rtype: tuple
"""
arguments = ([], None, None)
try:
source = textwrap.dedent("".join(inspect.getsourcelines(frame)[0]).replace("\\\n", ""))
except (IOError, TypeError) as error:
return arguments
try:
node = ast.parse(source)
except:
return arguments
if not node.body:
return arguments
node = node.body[0]
if not isinstance(node, ast.FunctionDef):
return arguments
return [arg.id for arg in node.args.args], node.args.vararg, node.args.kwarg | Extracts the arguments from given frame.
:param frame: Frame.
:type frame: object
:return: Arguments.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Extracts the arguments from given frame.
:param frame: Frame.
:type frame: object
:return: Arguments.
:rtype: tuple
### Response:
def extract_arguments(frame):
"""
Extracts the arguments from given frame.
:param frame: Frame.
:type frame: object
:return: Arguments.
:rtype: tuple
"""
arguments = ([], None, None)
try:
source = textwrap.dedent("".join(inspect.getsourcelines(frame)[0]).replace("\\\n", ""))
except (IOError, TypeError) as error:
return arguments
try:
node = ast.parse(source)
except:
return arguments
if not node.body:
return arguments
node = node.body[0]
if not isinstance(node, ast.FunctionDef):
return arguments
return [arg.id for arg in node.args.args], node.args.vararg, node.args.kwarg |
def query(self,
where="1=1",
out_fields="*",
timeFilter=None,
geometryFilter=None,
returnGeometry=True,
returnIDsOnly=False,
returnCountOnly=False,
returnFeatureClass=False,
returnDistinctValues=False,
returnExtentOnly=False,
maxAllowableOffset=None,
geometryPrecision=None,
outSR=None,
groupByFieldsForStatistics=None,
statisticFilter=None,
out_fc=None,
**kwargs):
""" queries a feature service based on a sql statement
Inputs:
where - the selection sql statement
out_fields - the attribute fields to return
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time. The values in
the timeFilter should be as UTC timestampes in
milliseconds. No checking occurs to see if they
are in the right format.
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset.
returnGeometry - true means a geometry will be returned,
else just the attributes
returnIDsOnly - false is default. True means only OBJECTIDs
will be returned
returnCountOnly - if True, then an integer is returned only
based on the sql statement
returnFeatureClass - Default False. If true, query will be
returned as feature class
out_fc - only valid if returnFeatureClass is set to True.
Output location of query.
groupByFieldsForStatistics - One or more field names on
which the values need to be grouped for
calculating the statistics.
statisticFilter - object that performs statistic queries
kwargs - optional parameters that can be passed to the Query
function. This will allow users to pass additional
parameters not explicitly implemented on the function. A
complete list of functions available is documented on the
Query REST API.
Output:
A list of Feature Objects (default) or a path to the output featureclass if
returnFeatureClass is set to True.
"""
params = {"f": "json",
"where": where,
"outFields": out_fields,
"returnGeometry" : returnGeometry,
"returnIdsOnly" : returnIDsOnly,
"returnCountOnly" : returnCountOnly,
"returnDistinctValues" : returnDistinctValues,
"returnExtentOnly" : returnExtentOnly
}
if outSR is not None:
params['outSR'] = outSR
if not maxAllowableOffset is None:
params['maxAllowableOffset'] = maxAllowableOffset
if not geometryPrecision is None:
params['geometryPrecision'] = geometryPrecision
for k,v in kwargs.items():
params[k] = v
if returnDistinctValues:
params["returnGeometry"] = False
if not timeFilter is None and \
isinstance(timeFilter, filters.TimeFilter):
params['time'] = timeFilter.filter
if not geometryFilter is None and \
isinstance(geometryFilter, filters.GeometryFilter):
gf = geometryFilter.filter
params['geometry'] = gf['geometry']
params['geometryType'] = gf['geometryType']
params['spatialRelationship'] = gf['spatialRel']
params['inSR'] = gf['inSR']
if "buffer" in gf:
params['buffer'] = gf['buffer']
if "units" in gf:
params['units'] = gf['units']
if not groupByFieldsForStatistics is None:
params['groupByFieldsForStatistics'] = groupByFieldsForStatistics
if not statisticFilter is None and \
isinstance(statisticFilter, filters.StatisticFilter):
params['outStatistics'] = statisticFilter.filter
fURL = self._url + "/query"
results = self._post(fURL, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'error' in results:
raise ValueError (results)
if not returnCountOnly and not returnIDsOnly and \
not returnDistinctValues and not returnExtentOnly:
if returnFeatureClass:
json_text = json.dumps(results)
temp = scratchFolder() + os.sep + uuid.uuid4().get_hex() + ".json"
with open(temp, 'wb') as writer:
writer.write(json_text)
writer.flush()
del writer
fc = json_to_featureclass(json_file=temp,
out_fc=out_fc)
os.remove(temp)
return fc
else:
return FeatureSet.fromJSON(json.dumps(results))
else:
return results
return | queries a feature service based on a sql statement
Inputs:
where - the selection sql statement
out_fields - the attribute fields to return
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time. The values in
the timeFilter should be as UTC timestampes in
milliseconds. No checking occurs to see if they
are in the right format.
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset.
returnGeometry - true means a geometry will be returned,
else just the attributes
returnIDsOnly - false is default. True means only OBJECTIDs
will be returned
returnCountOnly - if True, then an integer is returned only
based on the sql statement
returnFeatureClass - Default False. If true, query will be
returned as feature class
out_fc - only valid if returnFeatureClass is set to True.
Output location of query.
groupByFieldsForStatistics - One or more field names on
which the values need to be grouped for
calculating the statistics.
statisticFilter - object that performs statistic queries
kwargs - optional parameters that can be passed to the Query
function. This will allow users to pass additional
parameters not explicitly implemented on the function. A
complete list of functions available is documented on the
Query REST API.
Output:
A list of Feature Objects (default) or a path to the output featureclass if
returnFeatureClass is set to True. | Below is the the instruction that describes the task:
### Input:
queries a feature service based on a sql statement
Inputs:
where - the selection sql statement
out_fields - the attribute fields to return
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time. The values in
the timeFilter should be as UTC timestampes in
milliseconds. No checking occurs to see if they
are in the right format.
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset.
returnGeometry - true means a geometry will be returned,
else just the attributes
returnIDsOnly - false is default. True means only OBJECTIDs
will be returned
returnCountOnly - if True, then an integer is returned only
based on the sql statement
returnFeatureClass - Default False. If true, query will be
returned as feature class
out_fc - only valid if returnFeatureClass is set to True.
Output location of query.
groupByFieldsForStatistics - One or more field names on
which the values need to be grouped for
calculating the statistics.
statisticFilter - object that performs statistic queries
kwargs - optional parameters that can be passed to the Query
function. This will allow users to pass additional
parameters not explicitly implemented on the function. A
complete list of functions available is documented on the
Query REST API.
Output:
A list of Feature Objects (default) or a path to the output featureclass if
returnFeatureClass is set to True.
### Response:
def query(self,
where="1=1",
out_fields="*",
timeFilter=None,
geometryFilter=None,
returnGeometry=True,
returnIDsOnly=False,
returnCountOnly=False,
returnFeatureClass=False,
returnDistinctValues=False,
returnExtentOnly=False,
maxAllowableOffset=None,
geometryPrecision=None,
outSR=None,
groupByFieldsForStatistics=None,
statisticFilter=None,
out_fc=None,
**kwargs):
""" queries a feature service based on a sql statement
Inputs:
where - the selection sql statement
out_fields - the attribute fields to return
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time. The values in
the timeFilter should be as UTC timestampes in
milliseconds. No checking occurs to see if they
are in the right format.
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset.
returnGeometry - true means a geometry will be returned,
else just the attributes
returnIDsOnly - false is default. True means only OBJECTIDs
will be returned
returnCountOnly - if True, then an integer is returned only
based on the sql statement
returnFeatureClass - Default False. If true, query will be
returned as feature class
out_fc - only valid if returnFeatureClass is set to True.
Output location of query.
groupByFieldsForStatistics - One or more field names on
which the values need to be grouped for
calculating the statistics.
statisticFilter - object that performs statistic queries
kwargs - optional parameters that can be passed to the Query
function. This will allow users to pass additional
parameters not explicitly implemented on the function. A
complete list of functions available is documented on the
Query REST API.
Output:
A list of Feature Objects (default) or a path to the output featureclass if
returnFeatureClass is set to True.
"""
params = {"f": "json",
"where": where,
"outFields": out_fields,
"returnGeometry" : returnGeometry,
"returnIdsOnly" : returnIDsOnly,
"returnCountOnly" : returnCountOnly,
"returnDistinctValues" : returnDistinctValues,
"returnExtentOnly" : returnExtentOnly
}
if outSR is not None:
params['outSR'] = outSR
if not maxAllowableOffset is None:
params['maxAllowableOffset'] = maxAllowableOffset
if not geometryPrecision is None:
params['geometryPrecision'] = geometryPrecision
for k,v in kwargs.items():
params[k] = v
if returnDistinctValues:
params["returnGeometry"] = False
if not timeFilter is None and \
isinstance(timeFilter, filters.TimeFilter):
params['time'] = timeFilter.filter
if not geometryFilter is None and \
isinstance(geometryFilter, filters.GeometryFilter):
gf = geometryFilter.filter
params['geometry'] = gf['geometry']
params['geometryType'] = gf['geometryType']
params['spatialRelationship'] = gf['spatialRel']
params['inSR'] = gf['inSR']
if "buffer" in gf:
params['buffer'] = gf['buffer']
if "units" in gf:
params['units'] = gf['units']
if not groupByFieldsForStatistics is None:
params['groupByFieldsForStatistics'] = groupByFieldsForStatistics
if not statisticFilter is None and \
isinstance(statisticFilter, filters.StatisticFilter):
params['outStatistics'] = statisticFilter.filter
fURL = self._url + "/query"
results = self._post(fURL, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'error' in results:
raise ValueError (results)
if not returnCountOnly and not returnIDsOnly and \
not returnDistinctValues and not returnExtentOnly:
if returnFeatureClass:
json_text = json.dumps(results)
temp = scratchFolder() + os.sep + uuid.uuid4().get_hex() + ".json"
with open(temp, 'wb') as writer:
writer.write(json_text)
writer.flush()
del writer
fc = json_to_featureclass(json_file=temp,
out_fc=out_fc)
os.remove(temp)
return fc
else:
return FeatureSet.fromJSON(json.dumps(results))
else:
return results
return |
def dataset_list_cli(self,
sort_by=None,
size=None,
file_type=None,
license_name=None,
tag_ids=None,
search=None,
user=None,
mine=False,
page=1,
csv_display=False):
""" a wrapper to datasets_list for the client. Additional parameters
are described here, see dataset_list for others.
Parameters
==========
sort_by: how to sort the result, see valid_sort_bys for options
size: the size of the dataset, see valid_sizes for string options
file_type: the format, see valid_file_types for string options
license_name: string descriptor for license, see valid_license_names
tag_ids: tag identifiers to filter the search
search: a search term to use (default is empty string)
user: username to filter the search to
mine: boolean if True, group is changed to "my" to return personal
page: the page to return (default is 1)
csv_display: if True, print comma separated values instead of table
"""
datasets = self.dataset_list(sort_by, size, file_type, license_name,
tag_ids, search, user, mine, page)
fields = ['ref', 'title', 'size', 'lastUpdated', 'downloadCount']
if datasets:
if csv_display:
self.print_csv(datasets, fields)
else:
self.print_table(datasets, fields)
else:
print('No datasets found') | a wrapper to datasets_list for the client. Additional parameters
are described here, see dataset_list for others.
Parameters
==========
sort_by: how to sort the result, see valid_sort_bys for options
size: the size of the dataset, see valid_sizes for string options
file_type: the format, see valid_file_types for string options
license_name: string descriptor for license, see valid_license_names
tag_ids: tag identifiers to filter the search
search: a search term to use (default is empty string)
user: username to filter the search to
mine: boolean if True, group is changed to "my" to return personal
page: the page to return (default is 1)
csv_display: if True, print comma separated values instead of table | Below is the the instruction that describes the task:
### Input:
a wrapper to datasets_list for the client. Additional parameters
are described here, see dataset_list for others.
Parameters
==========
sort_by: how to sort the result, see valid_sort_bys for options
size: the size of the dataset, see valid_sizes for string options
file_type: the format, see valid_file_types for string options
license_name: string descriptor for license, see valid_license_names
tag_ids: tag identifiers to filter the search
search: a search term to use (default is empty string)
user: username to filter the search to
mine: boolean if True, group is changed to "my" to return personal
page: the page to return (default is 1)
csv_display: if True, print comma separated values instead of table
### Response:
def dataset_list_cli(self,
sort_by=None,
size=None,
file_type=None,
license_name=None,
tag_ids=None,
search=None,
user=None,
mine=False,
page=1,
csv_display=False):
""" a wrapper to datasets_list for the client. Additional parameters
are described here, see dataset_list for others.
Parameters
==========
sort_by: how to sort the result, see valid_sort_bys for options
size: the size of the dataset, see valid_sizes for string options
file_type: the format, see valid_file_types for string options
license_name: string descriptor for license, see valid_license_names
tag_ids: tag identifiers to filter the search
search: a search term to use (default is empty string)
user: username to filter the search to
mine: boolean if True, group is changed to "my" to return personal
page: the page to return (default is 1)
csv_display: if True, print comma separated values instead of table
"""
datasets = self.dataset_list(sort_by, size, file_type, license_name,
tag_ids, search, user, mine, page)
fields = ['ref', 'title', 'size', 'lastUpdated', 'downloadCount']
if datasets:
if csv_display:
self.print_csv(datasets, fields)
else:
self.print_table(datasets, fields)
else:
print('No datasets found') |
def is_build_linked(source, pkg):
""" Returns true if the "compile project(':<project>')"
line exists exists in the file """
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False | Returns true if the "compile project(':<project>')"
line exists exists in the file | Below is the the instruction that describes the task:
### Input:
Returns true if the "compile project(':<project>')"
line exists exists in the file
### Response:
def is_build_linked(source, pkg):
""" Returns true if the "compile project(':<project>')"
line exists exists in the file """
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False |
def _process_gxd_allele_pair_view(self, limit):
"""
This assumes that the genotype and alleles
have already been added to the id hashmap.
We use the Genotype methods to add all the parts we need.
Triples added:
<genotype_id> has_part <vslc>
<vslc> has_part <allele1>
<vslc> has_part <allele2>
<vslc> has_zygosity <zygosity>
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'gxd_allelepair_view'))
LOG.info("processing allele pairs (VSLCs) for genotypes")
geno_hash = {}
with open(raw, 'r') as f:
f.readline() # read the header row; skip
for line in f:
line = line.rstrip("\n")
line_counter += 1
(allelepair_key, genotype_key, allele_key_1, allele_key_2,
allele1, allele2, allelestate) = line.split('\t')
# NOTE: symbol = gene/marker,
# allele1 + allele2 = VSLC,
# allele1/allele2 = variant locus,
# allelestate = zygosity
# FIXME Need to handle alleles not in the *<*> format,
# incl gene traps, induced mut, & transgenics
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id not in geno_hash:
geno_hash[genotype_id] = set()
if genotype_id is None:
LOG.error(
"genotype_id not found for key %s; skipping", genotype_key)
continue
allele1_id = self.idhash['allele'].get(allele_key_1)
allele2_id = self.idhash['allele'].get(allele_key_2)
# Need to map the allelestate to a zygosity term
zygosity_id = self.resolve(allelestate.strip())
ivslc_id = self._makeInternalIdentifier('vslc', allelepair_key)
geno_hash[genotype_id].add(ivslc_id)
# TODO: VSLC label likely needs processing similar to
# the processing in the all_allele_view
# FIXME: handle null alleles
vslc_label = allele1+'/'
if allele2_id is None:
if zygosity_id in [
self.globaltt['hemizygous insertion-linked'],
self.globaltt['hemizygous-x'],
self.globaltt['hemizygous-y'],
self.globaltt['hemizygous']]:
vslc_label += '0'
elif zygosity_id == self.globaltt['heterozygous']:
vslc_label += '+'
elif zygosity_id == self.globaltt['indeterminate']:
vslc_label += '?'
elif zygosity_id == self.globaltt['homozygous']:
# we shouldn't get here, but for testing this is handy
vslc_label += allele1
else: # heteroplasmic, homoplasmic, FIXME add these if possible
LOG.info(
"A different kind of zygosity found is: %s",
self.globaltcid[zygosity_id])
vslc_label += '?'
else:
vslc_label += allele2
model.addIndividualToGraph(
ivslc_id, vslc_label,
self.globaltt['variant single locus complement'])
self.label_hash[ivslc_id] = vslc_label
rel1 = rel2 = self.globaltt['has_variant_part']
if allele1_id in self.wildtype_alleles:
rel1 = self.globaltt['has_reference_part']
if allele2_id in self.wildtype_alleles:
rel2 = self.globaltt['has_reference_part']
geno.addPartsToVSLC(
ivslc_id, allele1_id, allele2_id, zygosity_id, rel1, rel2)
# if genotype_id not in geno_hash:
# geno_hash[genotype_id] = [vslc_label]
# else:
# geno_hash[genotype_id] += [vslc_label]
if not self.test_mode and limit is not None and line_counter > limit:
break
# build the gvc and the genotype label
for gt in geno_hash.keys():
if gt is None: # not sure why, but sometimes this is the case
continue
vslcs = sorted(list(geno_hash[gt]))
gvc_label = None
if len(vslcs) > 1:
gvc_id = re.sub(r'_', '', ('-'.join(vslcs)))
gvc_id = re.sub(r':', '', gvc_id)
gvc_id = '_:'+gvc_id
vslc_labels = []
for v in vslcs:
vslc_labels.append(self.label_hash[v])
gvc_label = '; '.join(vslc_labels)
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement'])
self.label_hash[gvc_id] = gvc_label
for v in vslcs:
geno.addParts(v, gvc_id, self.globaltt['has_variant_part'])
geno.addVSLCtoParent(v, gvc_id)
geno.addParts(gvc_id, gt, self.globaltt['has_variant_part'])
elif len(vslcs) == 1:
gvc_id = vslcs[0]
gvc_label = self.label_hash[gvc_id]
# type the VSLC as also a GVC
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement'])
geno.addVSLCtoParent(gvc_id, gt)
else:
LOG.info("No VSLCs for %s", gt)
# make the genotype label = gvc + background
bkgd_id = self.geno_bkgd.get(gt)
if bkgd_id is not None:
bkgd_label = self.label_hash.get(bkgd_id)
if bkgd_label is None:
bkgd_label = bkgd_id # just in case
else:
bkgd_label = 'n.s.'
if gvc_label is not None:
genotype_label = gvc_label + ' ['+bkgd_label+']'
else:
genotype_label = '['+bkgd_label+']'
model.addIndividualToGraph(gt, genotype_label)
self.label_hash[gt] = genotype_label
return | This assumes that the genotype and alleles
have already been added to the id hashmap.
We use the Genotype methods to add all the parts we need.
Triples added:
<genotype_id> has_part <vslc>
<vslc> has_part <allele1>
<vslc> has_part <allele2>
<vslc> has_zygosity <zygosity>
:param limit:
:return: | Below is the the instruction that describes the task:
### Input:
This assumes that the genotype and alleles
have already been added to the id hashmap.
We use the Genotype methods to add all the parts we need.
Triples added:
<genotype_id> has_part <vslc>
<vslc> has_part <allele1>
<vslc> has_part <allele2>
<vslc> has_zygosity <zygosity>
:param limit:
:return:
### Response:
def _process_gxd_allele_pair_view(self, limit):
"""
This assumes that the genotype and alleles
have already been added to the id hashmap.
We use the Genotype methods to add all the parts we need.
Triples added:
<genotype_id> has_part <vslc>
<vslc> has_part <allele1>
<vslc> has_part <allele2>
<vslc> has_zygosity <zygosity>
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'gxd_allelepair_view'))
LOG.info("processing allele pairs (VSLCs) for genotypes")
geno_hash = {}
with open(raw, 'r') as f:
f.readline() # read the header row; skip
for line in f:
line = line.rstrip("\n")
line_counter += 1
(allelepair_key, genotype_key, allele_key_1, allele_key_2,
allele1, allele2, allelestate) = line.split('\t')
# NOTE: symbol = gene/marker,
# allele1 + allele2 = VSLC,
# allele1/allele2 = variant locus,
# allelestate = zygosity
# FIXME Need to handle alleles not in the *<*> format,
# incl gene traps, induced mut, & transgenics
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id not in geno_hash:
geno_hash[genotype_id] = set()
if genotype_id is None:
LOG.error(
"genotype_id not found for key %s; skipping", genotype_key)
continue
allele1_id = self.idhash['allele'].get(allele_key_1)
allele2_id = self.idhash['allele'].get(allele_key_2)
# Need to map the allelestate to a zygosity term
zygosity_id = self.resolve(allelestate.strip())
ivslc_id = self._makeInternalIdentifier('vslc', allelepair_key)
geno_hash[genotype_id].add(ivslc_id)
# TODO: VSLC label likely needs processing similar to
# the processing in the all_allele_view
# FIXME: handle null alleles
vslc_label = allele1+'/'
if allele2_id is None:
if zygosity_id in [
self.globaltt['hemizygous insertion-linked'],
self.globaltt['hemizygous-x'],
self.globaltt['hemizygous-y'],
self.globaltt['hemizygous']]:
vslc_label += '0'
elif zygosity_id == self.globaltt['heterozygous']:
vslc_label += '+'
elif zygosity_id == self.globaltt['indeterminate']:
vslc_label += '?'
elif zygosity_id == self.globaltt['homozygous']:
# we shouldn't get here, but for testing this is handy
vslc_label += allele1
else: # heteroplasmic, homoplasmic, FIXME add these if possible
LOG.info(
"A different kind of zygosity found is: %s",
self.globaltcid[zygosity_id])
vslc_label += '?'
else:
vslc_label += allele2
model.addIndividualToGraph(
ivslc_id, vslc_label,
self.globaltt['variant single locus complement'])
self.label_hash[ivslc_id] = vslc_label
rel1 = rel2 = self.globaltt['has_variant_part']
if allele1_id in self.wildtype_alleles:
rel1 = self.globaltt['has_reference_part']
if allele2_id in self.wildtype_alleles:
rel2 = self.globaltt['has_reference_part']
geno.addPartsToVSLC(
ivslc_id, allele1_id, allele2_id, zygosity_id, rel1, rel2)
# if genotype_id not in geno_hash:
# geno_hash[genotype_id] = [vslc_label]
# else:
# geno_hash[genotype_id] += [vslc_label]
if not self.test_mode and limit is not None and line_counter > limit:
break
# build the gvc and the genotype label
for gt in geno_hash.keys():
if gt is None: # not sure why, but sometimes this is the case
continue
vslcs = sorted(list(geno_hash[gt]))
gvc_label = None
if len(vslcs) > 1:
gvc_id = re.sub(r'_', '', ('-'.join(vslcs)))
gvc_id = re.sub(r':', '', gvc_id)
gvc_id = '_:'+gvc_id
vslc_labels = []
for v in vslcs:
vslc_labels.append(self.label_hash[v])
gvc_label = '; '.join(vslc_labels)
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement'])
self.label_hash[gvc_id] = gvc_label
for v in vslcs:
geno.addParts(v, gvc_id, self.globaltt['has_variant_part'])
geno.addVSLCtoParent(v, gvc_id)
geno.addParts(gvc_id, gt, self.globaltt['has_variant_part'])
elif len(vslcs) == 1:
gvc_id = vslcs[0]
gvc_label = self.label_hash[gvc_id]
# type the VSLC as also a GVC
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement'])
geno.addVSLCtoParent(gvc_id, gt)
else:
LOG.info("No VSLCs for %s", gt)
# make the genotype label = gvc + background
bkgd_id = self.geno_bkgd.get(gt)
if bkgd_id is not None:
bkgd_label = self.label_hash.get(bkgd_id)
if bkgd_label is None:
bkgd_label = bkgd_id # just in case
else:
bkgd_label = 'n.s.'
if gvc_label is not None:
genotype_label = gvc_label + ' ['+bkgd_label+']'
else:
genotype_label = '['+bkgd_label+']'
model.addIndividualToGraph(gt, genotype_label)
self.label_hash[gt] = genotype_label
return |
def corenlp_to_xmltree(s, prune_root=True):
"""
Transforms an object with CoreNLP dep_path and dep_parent attributes into
an XMLTree. Will include elements of any array having the same dimensiion
as dep_* as node attributes. Also adds special word_idx attribute
corresponding to original sequence order in sentence.
"""
# Convert input object to dictionary
s = get_as_dict(s)
# Use the dep_parents array as a guide: ensure it is present and a list of
# ints
if not ("dep_parents" in s and isinstance(s["dep_parents"], list)):
raise ValueError(
"Input CoreNLP object must have a 'dep_parents' attribute which is a list"
)
try:
dep_parents = list(map(int, s["dep_parents"]))
except Exception:
raise ValueError("'dep_parents' attribute must be a list of ints")
# Also ensure that we are using CoreNLP-native indexing
# (root=0, 1-base word indexes)!
b = min(dep_parents)
if b != 0:
dep_parents = list(map(lambda j: j - b, dep_parents))
# Parse recursively
root = corenlp_to_xmltree_sub(s, dep_parents, 0)
# Often the return tree will have several roots, where one is the actual
# root and the rest are just singletons not included in the dep tree
# parse...
# We optionally remove these singletons and then collapse the root if only
# one child left.
if prune_root:
for c in root:
if len(c) == 0:
root.remove(c)
if len(root) == 1:
root = root.findall("./*")[0]
return XMLTree(root, words=s["words"]) | Transforms an object with CoreNLP dep_path and dep_parent attributes into
an XMLTree. Will include elements of any array having the same dimensiion
as dep_* as node attributes. Also adds special word_idx attribute
corresponding to original sequence order in sentence. | Below is the the instruction that describes the task:
### Input:
Transforms an object with CoreNLP dep_path and dep_parent attributes into
an XMLTree. Will include elements of any array having the same dimensiion
as dep_* as node attributes. Also adds special word_idx attribute
corresponding to original sequence order in sentence.
### Response:
def corenlp_to_xmltree(s, prune_root=True):
"""
Transforms an object with CoreNLP dep_path and dep_parent attributes into
an XMLTree. Will include elements of any array having the same dimensiion
as dep_* as node attributes. Also adds special word_idx attribute
corresponding to original sequence order in sentence.
"""
# Convert input object to dictionary
s = get_as_dict(s)
# Use the dep_parents array as a guide: ensure it is present and a list of
# ints
if not ("dep_parents" in s and isinstance(s["dep_parents"], list)):
raise ValueError(
"Input CoreNLP object must have a 'dep_parents' attribute which is a list"
)
try:
dep_parents = list(map(int, s["dep_parents"]))
except Exception:
raise ValueError("'dep_parents' attribute must be a list of ints")
# Also ensure that we are using CoreNLP-native indexing
# (root=0, 1-base word indexes)!
b = min(dep_parents)
if b != 0:
dep_parents = list(map(lambda j: j - b, dep_parents))
# Parse recursively
root = corenlp_to_xmltree_sub(s, dep_parents, 0)
# Often the return tree will have several roots, where one is the actual
# root and the rest are just singletons not included in the dep tree
# parse...
# We optionally remove these singletons and then collapse the root if only
# one child left.
if prune_root:
for c in root:
if len(c) == 0:
root.remove(c)
if len(root) == 1:
root = root.findall("./*")[0]
return XMLTree(root, words=s["words"]) |
def db_temp_from_wb_rh(wet_bulb, rel_humid, b_press=101325):
"""Dry Bulb Temperature (C) and humidity_ratio at at wet_bulb (C),
rel_humid (%) and Pressure b_press (Pa).
Formula is only valid for rel_humid == 0 or rel_humid == 100.
"""
assert rel_humid == 0 or rel_humid == 100, 'formula is only valid for' \
' rel_humid == 0 or rel_humid == 100'
humidity_ratio = humid_ratio_from_db_rh(wet_bulb, rel_humid, b_press)
hr_saturation = humid_ratio_from_db_rh(wet_bulb, 100, b_press)
db_temp = wet_bulb + (((hr_saturation - humidity_ratio) * 2260000) / (1005))
return db_temp, humidity_ratio | Dry Bulb Temperature (C) and humidity_ratio at at wet_bulb (C),
rel_humid (%) and Pressure b_press (Pa).
Formula is only valid for rel_humid == 0 or rel_humid == 100. | Below is the the instruction that describes the task:
### Input:
Dry Bulb Temperature (C) and humidity_ratio at at wet_bulb (C),
rel_humid (%) and Pressure b_press (Pa).
Formula is only valid for rel_humid == 0 or rel_humid == 100.
### Response:
def db_temp_from_wb_rh(wet_bulb, rel_humid, b_press=101325):
"""Dry Bulb Temperature (C) and humidity_ratio at at wet_bulb (C),
rel_humid (%) and Pressure b_press (Pa).
Formula is only valid for rel_humid == 0 or rel_humid == 100.
"""
assert rel_humid == 0 or rel_humid == 100, 'formula is only valid for' \
' rel_humid == 0 or rel_humid == 100'
humidity_ratio = humid_ratio_from_db_rh(wet_bulb, rel_humid, b_press)
hr_saturation = humid_ratio_from_db_rh(wet_bulb, 100, b_press)
db_temp = wet_bulb + (((hr_saturation - humidity_ratio) * 2260000) / (1005))
return db_temp, humidity_ratio |
def glob(self, filename):
"""Returns a list of files that match the given pattern(s)."""
if isinstance(filename, six.string_types):
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for matching_filename in py_glob.glob(
compat.as_bytes(filename))
]
else:
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for single_filename in filename
for matching_filename in py_glob.glob(
compat.as_bytes(single_filename))
] | Returns a list of files that match the given pattern(s). | Below is the the instruction that describes the task:
### Input:
Returns a list of files that match the given pattern(s).
### Response:
def glob(self, filename):
"""Returns a list of files that match the given pattern(s)."""
if isinstance(filename, six.string_types):
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for matching_filename in py_glob.glob(
compat.as_bytes(filename))
]
else:
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for single_filename in filename
for matching_filename in py_glob.glob(
compat.as_bytes(single_filename))
] |
def Tethering_bind(self, port):
"""
Function path: Tethering.bind
Domain: Tethering
Method name: bind
Parameters:
Required arguments:
'port' (type: integer) -> Port number to bind.
No return value.
Description: Request browser port binding.
"""
assert isinstance(port, (int,)
), "Argument 'port' must be of type '['int']'. Received type: '%s'" % type(
port)
subdom_funcs = self.synchronous_command('Tethering.bind', port=port)
return subdom_funcs | Function path: Tethering.bind
Domain: Tethering
Method name: bind
Parameters:
Required arguments:
'port' (type: integer) -> Port number to bind.
No return value.
Description: Request browser port binding. | Below is the the instruction that describes the task:
### Input:
Function path: Tethering.bind
Domain: Tethering
Method name: bind
Parameters:
Required arguments:
'port' (type: integer) -> Port number to bind.
No return value.
Description: Request browser port binding.
### Response:
def Tethering_bind(self, port):
"""
Function path: Tethering.bind
Domain: Tethering
Method name: bind
Parameters:
Required arguments:
'port' (type: integer) -> Port number to bind.
No return value.
Description: Request browser port binding.
"""
assert isinstance(port, (int,)
), "Argument 'port' must be of type '['int']'. Received type: '%s'" % type(
port)
subdom_funcs = self.synchronous_command('Tethering.bind', port=port)
return subdom_funcs |
def get(self, name):
"""
Get workspace infos from name.
Return None if workspace doesn't exists.
"""
ws_list = self.list()
return ws_list[name] if name in ws_list else None | Get workspace infos from name.
Return None if workspace doesn't exists. | Below is the the instruction that describes the task:
### Input:
Get workspace infos from name.
Return None if workspace doesn't exists.
### Response:
def get(self, name):
"""
Get workspace infos from name.
Return None if workspace doesn't exists.
"""
ws_list = self.list()
return ws_list[name] if name in ws_list else None |
def get_som_clusters(self):
"""!
@brief Returns clusters with SOM neurons that encode input features in line with result of synchronization in the second (Sync) layer.
@return (list) List of clusters that are represented by lists of indexes of neurons that encode input data.
@see process()
@see get_clusters()
"""
sync_clusters = self._analyser.allocate_clusters();
# Decode it to indexes of SOM neurons
som_clusters = list();
for oscillators in sync_clusters:
cluster = list();
for index_oscillator in oscillators:
index_neuron = self._som_osc_table[index_oscillator];
cluster.append(index_neuron);
som_clusters.append(cluster);
return som_clusters; | !
@brief Returns clusters with SOM neurons that encode input features in line with result of synchronization in the second (Sync) layer.
@return (list) List of clusters that are represented by lists of indexes of neurons that encode input data.
@see process()
@see get_clusters() | Below is the the instruction that describes the task:
### Input:
!
@brief Returns clusters with SOM neurons that encode input features in line with result of synchronization in the second (Sync) layer.
@return (list) List of clusters that are represented by lists of indexes of neurons that encode input data.
@see process()
@see get_clusters()
### Response:
def get_som_clusters(self):
"""!
@brief Returns clusters with SOM neurons that encode input features in line with result of synchronization in the second (Sync) layer.
@return (list) List of clusters that are represented by lists of indexes of neurons that encode input data.
@see process()
@see get_clusters()
"""
sync_clusters = self._analyser.allocate_clusters();
# Decode it to indexes of SOM neurons
som_clusters = list();
for oscillators in sync_clusters:
cluster = list();
for index_oscillator in oscillators:
index_neuron = self._som_osc_table[index_oscillator];
cluster.append(index_neuron);
som_clusters.append(cluster);
return som_clusters; |
async def start(self, zone_id: int, time: int) -> dict:
"""Start a program."""
return await self._request(
'post', 'zone/{0}/start'.format(zone_id), json={'time': time}) | Start a program. | Below is the the instruction that describes the task:
### Input:
Start a program.
### Response:
async def start(self, zone_id: int, time: int) -> dict:
"""Start a program."""
return await self._request(
'post', 'zone/{0}/start'.format(zone_id), json={'time': time}) |
def inflate_context_tuple(ast_rootpath, root_env):
"""Instantiate a Tuple from a TupleNode.
Walking the AST tree upwards, evaluate from the root down again.
"""
with util.LogTime('inflate_context_tuple'):
# We only need to look at tuple members going down.
inflated = ast_rootpath[0].eval(root_env)
current = inflated
env = root_env
try:
for node in ast_rootpath[1:]:
if is_tuple_member_node(node):
assert framework.is_tuple(current)
with util.LogTime('into tuple'):
thunk, env = inflated.get_thunk_env(node.name)
current = framework.eval(thunk, env)
elif framework.is_list(current):
with util.LogTime('eval thing'):
current = framework.eval(node, env)
if framework.is_tuple(current):
inflated = current
except (gcl.EvaluationError, ast.UnparseableAccess):
# Eat evaluation error, probably means the rightmost tuplemember wasn't complete.
# Return what we have so far.
pass
return inflated | Instantiate a Tuple from a TupleNode.
Walking the AST tree upwards, evaluate from the root down again. | Below is the the instruction that describes the task:
### Input:
Instantiate a Tuple from a TupleNode.
Walking the AST tree upwards, evaluate from the root down again.
### Response:
def inflate_context_tuple(ast_rootpath, root_env):
"""Instantiate a Tuple from a TupleNode.
Walking the AST tree upwards, evaluate from the root down again.
"""
with util.LogTime('inflate_context_tuple'):
# We only need to look at tuple members going down.
inflated = ast_rootpath[0].eval(root_env)
current = inflated
env = root_env
try:
for node in ast_rootpath[1:]:
if is_tuple_member_node(node):
assert framework.is_tuple(current)
with util.LogTime('into tuple'):
thunk, env = inflated.get_thunk_env(node.name)
current = framework.eval(thunk, env)
elif framework.is_list(current):
with util.LogTime('eval thing'):
current = framework.eval(node, env)
if framework.is_tuple(current):
inflated = current
except (gcl.EvaluationError, ast.UnparseableAccess):
# Eat evaluation error, probably means the rightmost tuplemember wasn't complete.
# Return what we have so far.
pass
return inflated |
def rollback(self):
""" Do journal rollback """
# Close the journal for writing, if this is an automatic rollback following a crash,
# the file descriptor will not be open, so don't need to do anything.
if self.journal != None: self.journal.close()
self.journal = None
# Read the journal
journ_list = []
with open(self.j_file) as fle:
for l in fle: journ_list.append(json.loads(l))
journ_subtract = deque(reversed(journ_list))
for j_itm in reversed(journ_list):
try: self.do_action({'do' : j_itm}, False)
except IOError: pass
# As each item is completed remove it from the journal file, in case
# something fails during the rollback we can pick up where it stopped.
journ_subtract.popleft()
with open(self.j_file, 'w') as f:
for data in list(journ_subtract):
f.write(json.dumps(data) + "\n")
f.flush()
# Rollback is complete so delete the journal file
os.remove(self.j_file) | Do journal rollback | Below is the the instruction that describes the task:
### Input:
Do journal rollback
### Response:
def rollback(self):
""" Do journal rollback """
# Close the journal for writing, if this is an automatic rollback following a crash,
# the file descriptor will not be open, so don't need to do anything.
if self.journal != None: self.journal.close()
self.journal = None
# Read the journal
journ_list = []
with open(self.j_file) as fle:
for l in fle: journ_list.append(json.loads(l))
journ_subtract = deque(reversed(journ_list))
for j_itm in reversed(journ_list):
try: self.do_action({'do' : j_itm}, False)
except IOError: pass
# As each item is completed remove it from the journal file, in case
# something fails during the rollback we can pick up where it stopped.
journ_subtract.popleft()
with open(self.j_file, 'w') as f:
for data in list(journ_subtract):
f.write(json.dumps(data) + "\n")
f.flush()
# Rollback is complete so delete the journal file
os.remove(self.j_file) |
def not_files(self):
"""Returns a list of all arguments that aren't files/globs."""
_args = []
for arg in self.all:
if not len(_expand_path(arg)):
if not os.path.exists(arg):
_args.append(arg)
return ArgsList(_args, no_argv=True) | Returns a list of all arguments that aren't files/globs. | Below is the the instruction that describes the task:
### Input:
Returns a list of all arguments that aren't files/globs.
### Response:
def not_files(self):
"""Returns a list of all arguments that aren't files/globs."""
_args = []
for arg in self.all:
if not len(_expand_path(arg)):
if not os.path.exists(arg):
_args.append(arg)
return ArgsList(_args, no_argv=True) |
def report(mount):
'''
Report on quotas for a specific volume
CLI Example:
.. code-block:: bash
salt '*' quota.report /media/data
'''
ret = {mount: {}}
ret[mount]['User Quotas'] = _parse_quota(mount, '-u')
ret[mount]['Group Quotas'] = _parse_quota(mount, '-g')
return ret | Report on quotas for a specific volume
CLI Example:
.. code-block:: bash
salt '*' quota.report /media/data | Below is the the instruction that describes the task:
### Input:
Report on quotas for a specific volume
CLI Example:
.. code-block:: bash
salt '*' quota.report /media/data
### Response:
def report(mount):
'''
Report on quotas for a specific volume
CLI Example:
.. code-block:: bash
salt '*' quota.report /media/data
'''
ret = {mount: {}}
ret[mount]['User Quotas'] = _parse_quota(mount, '-u')
ret[mount]['Group Quotas'] = _parse_quota(mount, '-g')
return ret |
async def self_check(cls):
"""
Check that the configuration is correct
- Presence of "token" in the settings
- Presence of "BERNARD_BASE_URL" in the global configuration
"""
# noinspection PyTypeChecker
async for check in super(Telegram, cls).self_check():
yield check
s = cls.settings()
try:
assert isinstance(s['token'], str)
except (KeyError, TypeError, AssertionError):
yield HealthCheckFail(
'00005',
'Missing "token" for Telegram platform. You can obtain one by'
'registering your bot in Telegram.',
)
if not hasattr(settings, 'BERNARD_BASE_URL'):
yield HealthCheckFail(
'00005',
'"BERNARD_BASE_URL" cannot be found in the configuration. The'
'Telegram platform needs it because it uses it to '
'automatically register its hook.'
)
if not hasattr(settings, 'WEBVIEW_SECRET_KEY'):
yield HealthCheckFail(
'00005',
'"WEBVIEW_SECRET_KEY" cannot be found in the configuration. '
'It is required in order to be able to create secure postback '
'URLs.'
) | Check that the configuration is correct
- Presence of "token" in the settings
- Presence of "BERNARD_BASE_URL" in the global configuration | Below is the the instruction that describes the task:
### Input:
Check that the configuration is correct
- Presence of "token" in the settings
- Presence of "BERNARD_BASE_URL" in the global configuration
### Response:
async def self_check(cls):
"""
Check that the configuration is correct
- Presence of "token" in the settings
- Presence of "BERNARD_BASE_URL" in the global configuration
"""
# noinspection PyTypeChecker
async for check in super(Telegram, cls).self_check():
yield check
s = cls.settings()
try:
assert isinstance(s['token'], str)
except (KeyError, TypeError, AssertionError):
yield HealthCheckFail(
'00005',
'Missing "token" for Telegram platform. You can obtain one by'
'registering your bot in Telegram.',
)
if not hasattr(settings, 'BERNARD_BASE_URL'):
yield HealthCheckFail(
'00005',
'"BERNARD_BASE_URL" cannot be found in the configuration. The'
'Telegram platform needs it because it uses it to '
'automatically register its hook.'
)
if not hasattr(settings, 'WEBVIEW_SECRET_KEY'):
yield HealthCheckFail(
'00005',
'"WEBVIEW_SECRET_KEY" cannot be found in the configuration. '
'It is required in order to be able to create secure postback '
'URLs.'
) |
def load_configs(self, filename):
"""Load configurations from a file with four columns: a b m n
"""
configs = np.loadtxt(filename)
self.add_to_configs(configs) | Load configurations from a file with four columns: a b m n | Below is the the instruction that describes the task:
### Input:
Load configurations from a file with four columns: a b m n
### Response:
def load_configs(self, filename):
"""Load configurations from a file with four columns: a b m n
"""
configs = np.loadtxt(filename)
self.add_to_configs(configs) |
def change_and_save(obj, update_only_changed_fields=False, save_kwargs=None, **changed_fields):
"""
Changes a given `changed_fields` on object, saves it and returns changed object.
"""
from chamber.models import SmartModel
change(obj, **changed_fields)
if update_only_changed_fields and not isinstance(obj, SmartModel):
raise TypeError('update_only_changed_fields can be used only with SmartModel')
save_kwargs = save_kwargs if save_kwargs is not None else {}
if update_only_changed_fields:
save_kwargs['update_only_changed_fields'] = True
obj.save(**save_kwargs)
return obj | Changes a given `changed_fields` on object, saves it and returns changed object. | Below is the the instruction that describes the task:
### Input:
Changes a given `changed_fields` on object, saves it and returns changed object.
### Response:
def change_and_save(obj, update_only_changed_fields=False, save_kwargs=None, **changed_fields):
"""
Changes a given `changed_fields` on object, saves it and returns changed object.
"""
from chamber.models import SmartModel
change(obj, **changed_fields)
if update_only_changed_fields and not isinstance(obj, SmartModel):
raise TypeError('update_only_changed_fields can be used only with SmartModel')
save_kwargs = save_kwargs if save_kwargs is not None else {}
if update_only_changed_fields:
save_kwargs['update_only_changed_fields'] = True
obj.save(**save_kwargs)
return obj |
def einfo(self, db=None):
"""query the einfo endpoint
:param db: string (optional)
:rtype: EInfo or EInfoDB object
If db is None, the reply is a list of databases, which is returned
in an EInfo object (which has a databases() method).
If db is not None, the reply is information about the specified
database, which is returned in an EInfoDB object. (Version 2.0
data is automatically requested.)
"""
if db is None:
return EInfoResult(self._qs.einfo()).dblist
return EInfoResult(self._qs.einfo({'db': db, 'version': '2.0'})).dbinfo | query the einfo endpoint
:param db: string (optional)
:rtype: EInfo or EInfoDB object
If db is None, the reply is a list of databases, which is returned
in an EInfo object (which has a databases() method).
If db is not None, the reply is information about the specified
database, which is returned in an EInfoDB object. (Version 2.0
data is automatically requested.) | Below is the the instruction that describes the task:
### Input:
query the einfo endpoint
:param db: string (optional)
:rtype: EInfo or EInfoDB object
If db is None, the reply is a list of databases, which is returned
in an EInfo object (which has a databases() method).
If db is not None, the reply is information about the specified
database, which is returned in an EInfoDB object. (Version 2.0
data is automatically requested.)
### Response:
def einfo(self, db=None):
"""query the einfo endpoint
:param db: string (optional)
:rtype: EInfo or EInfoDB object
If db is None, the reply is a list of databases, which is returned
in an EInfo object (which has a databases() method).
If db is not None, the reply is information about the specified
database, which is returned in an EInfoDB object. (Version 2.0
data is automatically requested.)
"""
if db is None:
return EInfoResult(self._qs.einfo()).dblist
return EInfoResult(self._qs.einfo({'db': db, 'version': '2.0'})).dbinfo |
async def register(*address_list, cluster=None, loop=None):
"""Start Raft node (server)
Args:
address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...]
cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...]
"""
loop = loop or asyncio.get_event_loop()
for address in address_list:
host, port = address.rsplit(':', 1)
node = Node(address=(host, int(port)), loop=loop)
await node.start()
for address in cluster:
host, port = address.rsplit(':', 1)
port = int(port)
if (host, port) != (node.host, node.port):
node.update_cluster((host, port)) | Start Raft node (server)
Args:
address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...]
cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...] | Below is the the instruction that describes the task:
### Input:
Start Raft node (server)
Args:
address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...]
cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...]
### Response:
async def register(*address_list, cluster=None, loop=None):
"""Start Raft node (server)
Args:
address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...]
cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...]
"""
loop = loop or asyncio.get_event_loop()
for address in address_list:
host, port = address.rsplit(':', 1)
node = Node(address=(host, int(port)), loop=loop)
await node.start()
for address in cluster:
host, port = address.rsplit(':', 1)
port = int(port)
if (host, port) != (node.host, node.port):
node.update_cluster((host, port)) |
def revert_prompt(self,
old_prompt_name,
new_expect=None):
"""Reverts the prompt to the previous value (passed-in).
It should be fairly rare to need this. Most of the time you would just
exit a subshell rather than resetting the prompt.
- old_prompt_name -
- new_expect -
- child - See send()
"""
shutit = self.shutit
expect = new_expect or self.default_expect
# v the space is intentional, to avoid polluting bash history.
self.send(ShutItSendSpec(self,
send=(' PS1="${PS1_%s}" && unset PS1_%s') % (old_prompt_name, old_prompt_name),
expect=expect,
check_exit=False,
fail_on_empty_before=False,
echo=False,
loglevel=logging.DEBUG,
ignore_background=True))
if not new_expect:
shutit.log('Resetting default expect to default', level=logging.DEBUG)
shutit.set_default_shutit_pexpect_session_expect()
_ = self.init_pexpect_session_environment(old_prompt_name) | Reverts the prompt to the previous value (passed-in).
It should be fairly rare to need this. Most of the time you would just
exit a subshell rather than resetting the prompt.
- old_prompt_name -
- new_expect -
- child - See send() | Below is the the instruction that describes the task:
### Input:
Reverts the prompt to the previous value (passed-in).
It should be fairly rare to need this. Most of the time you would just
exit a subshell rather than resetting the prompt.
- old_prompt_name -
- new_expect -
- child - See send()
### Response:
def revert_prompt(self,
old_prompt_name,
new_expect=None):
"""Reverts the prompt to the previous value (passed-in).
It should be fairly rare to need this. Most of the time you would just
exit a subshell rather than resetting the prompt.
- old_prompt_name -
- new_expect -
- child - See send()
"""
shutit = self.shutit
expect = new_expect or self.default_expect
# v the space is intentional, to avoid polluting bash history.
self.send(ShutItSendSpec(self,
send=(' PS1="${PS1_%s}" && unset PS1_%s') % (old_prompt_name, old_prompt_name),
expect=expect,
check_exit=False,
fail_on_empty_before=False,
echo=False,
loglevel=logging.DEBUG,
ignore_background=True))
if not new_expect:
shutit.log('Resetting default expect to default', level=logging.DEBUG)
shutit.set_default_shutit_pexpect_session_expect()
_ = self.init_pexpect_session_environment(old_prompt_name) |
def registerDirs(self,json_dirs):
"""
Registers multiple new server directories.
Inputs:
json_dirs - Array of Server Directories in JSON format.
"""
url = self._url + "/directories/registerDirs"
params = {
"f" : "json",
"directories" : json_dirs
}
res = self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return res | Registers multiple new server directories.
Inputs:
json_dirs - Array of Server Directories in JSON format. | Below is the the instruction that describes the task:
### Input:
Registers multiple new server directories.
Inputs:
json_dirs - Array of Server Directories in JSON format.
### Response:
def registerDirs(self,json_dirs):
"""
Registers multiple new server directories.
Inputs:
json_dirs - Array of Server Directories in JSON format.
"""
url = self._url + "/directories/registerDirs"
params = {
"f" : "json",
"directories" : json_dirs
}
res = self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return res |
def bhp2pascal(bhp, cfm, fan_tot_eff):
"""return inputs for E+ in pascal and m3/s"""
inh2o = bhp * 6356.0 * fan_tot_eff / cfm
pascal = inh2o2pascal(inh2o)
m3s = cfm2m3s(cfm)
return pascal, m3s | return inputs for E+ in pascal and m3/s | Below is the the instruction that describes the task:
### Input:
return inputs for E+ in pascal and m3/s
### Response:
def bhp2pascal(bhp, cfm, fan_tot_eff):
"""return inputs for E+ in pascal and m3/s"""
inh2o = bhp * 6356.0 * fan_tot_eff / cfm
pascal = inh2o2pascal(inh2o)
m3s = cfm2m3s(cfm)
return pascal, m3s |
def itruediv(a, b):
"Same as a /= b."
if type(a) == int or type(a) == long:
a = float(a)
a /= b
return a | Same as a /= b. | Below is the the instruction that describes the task:
### Input:
Same as a /= b.
### Response:
def itruediv(a, b):
"Same as a /= b."
if type(a) == int or type(a) == long:
a = float(a)
a /= b
return a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.