code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath]) | check if element is present by css | Below is the the instruction that describes the task:
### Input:
check if element is present by css
### Response:
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath]) |
def _check_function(self):
''' make some basic checks on the function to make sure it is valid'''
# note, callable is valid for Python 2 and Python 3.2 onwards but
# not inbetween
if not callable(self._function):
raise RuntimeError(
"provided function '{0}' is not callable".
format(str(self._function)))
from inspect import getargspec
arg_info = getargspec(self._function)
if len(arg_info.args) != 1:
print str(arg_info)
raise RuntimeError(
"provided function should have one argument but found "
"{0}".format(len(arg_info.args))) | make some basic checks on the function to make sure it is valid | Below is the the instruction that describes the task:
### Input:
make some basic checks on the function to make sure it is valid
### Response:
def _check_function(self):
''' make some basic checks on the function to make sure it is valid'''
# note, callable is valid for Python 2 and Python 3.2 onwards but
# not inbetween
if not callable(self._function):
raise RuntimeError(
"provided function '{0}' is not callable".
format(str(self._function)))
from inspect import getargspec
arg_info = getargspec(self._function)
if len(arg_info.args) != 1:
print str(arg_info)
raise RuntimeError(
"provided function should have one argument but found "
"{0}".format(len(arg_info.args))) |
def _reverse(self):
""" reverse patch direction (this doesn't touch filenames) """
for p in self.items:
for h in p.hunks:
h.startsrc, h.starttgt = h.starttgt, h.startsrc
h.linessrc, h.linestgt = h.linestgt, h.linessrc
for i,line in enumerate(h.text):
# need to use line[0:1] here, because line[0]
# returns int instead of bytes on Python 3
if line[0:1] == b'+':
h.text[i] = b'-' + line[1:]
elif line[0:1] == b'-':
h.text[i] = b'+' +line[1:] | reverse patch direction (this doesn't touch filenames) | Below is the the instruction that describes the task:
### Input:
reverse patch direction (this doesn't touch filenames)
### Response:
def _reverse(self):
""" reverse patch direction (this doesn't touch filenames) """
for p in self.items:
for h in p.hunks:
h.startsrc, h.starttgt = h.starttgt, h.startsrc
h.linessrc, h.linestgt = h.linestgt, h.linessrc
for i,line in enumerate(h.text):
# need to use line[0:1] here, because line[0]
# returns int instead of bytes on Python 3
if line[0:1] == b'+':
h.text[i] = b'-' + line[1:]
elif line[0:1] == b'-':
h.text[i] = b'+' +line[1:] |
def export_keys(output_path, stash, passphrase, backend):
"""Export all keys to a file
"""
stash = _get_stash(backend, stash, passphrase)
try:
click.echo('Exporting stash to {0}...'.format(output_path))
stash.export(output_path=output_path)
click.echo('Export complete!')
except GhostError as ex:
sys.exit(ex) | Export all keys to a file | Below is the the instruction that describes the task:
### Input:
Export all keys to a file
### Response:
def export_keys(output_path, stash, passphrase, backend):
"""Export all keys to a file
"""
stash = _get_stash(backend, stash, passphrase)
try:
click.echo('Exporting stash to {0}...'.format(output_path))
stash.export(output_path=output_path)
click.echo('Export complete!')
except GhostError as ex:
sys.exit(ex) |
def _add_missing_schema_attributes(self):
'''
Adds any missed schema attributes to the _attributes list
The attributes can be class attributes and they won't be
included in the _attributes list automatically
'''
for attr in [attr for attr in dir(self) if not attr.startswith('__')]:
attr_val = getattr(self, attr)
if isinstance(getattr(self, attr), SchemaItem) and \
attr not in self._attributes:
self._attributes.append(attr) | Adds any missed schema attributes to the _attributes list
The attributes can be class attributes and they won't be
included in the _attributes list automatically | Below is the the instruction that describes the task:
### Input:
Adds any missed schema attributes to the _attributes list
The attributes can be class attributes and they won't be
included in the _attributes list automatically
### Response:
def _add_missing_schema_attributes(self):
'''
Adds any missed schema attributes to the _attributes list
The attributes can be class attributes and they won't be
included in the _attributes list automatically
'''
for attr in [attr for attr in dir(self) if not attr.startswith('__')]:
attr_val = getattr(self, attr)
if isinstance(getattr(self, attr), SchemaItem) and \
attr not in self._attributes:
self._attributes.append(attr) |
def clear(self):
"""
Clears the registry
"""
with self.__svc_lock:
self.__svc_registry.clear()
self.__svc_factories.clear()
self.__svc_specs.clear()
self.__bundle_svc.clear()
self.__bundle_imports.clear()
self.__factory_usage.clear()
self.__pending_services.clear() | Clears the registry | Below is the the instruction that describes the task:
### Input:
Clears the registry
### Response:
def clear(self):
"""
Clears the registry
"""
with self.__svc_lock:
self.__svc_registry.clear()
self.__svc_factories.clear()
self.__svc_specs.clear()
self.__bundle_svc.clear()
self.__bundle_imports.clear()
self.__factory_usage.clear()
self.__pending_services.clear() |
def _ftp_nlst(self, dir_name):
"""Variant of `self.ftp.nlst()` that supports encoding-fallback."""
assert compat.is_native(dir_name)
lines = []
def _add_line(status, line):
lines.append(line)
cmd = "NLST " + dir_name
self._ftp_retrlines_native(cmd, _add_line, self.encoding)
# print(cmd, lines)
return lines | Variant of `self.ftp.nlst()` that supports encoding-fallback. | Below is the the instruction that describes the task:
### Input:
Variant of `self.ftp.nlst()` that supports encoding-fallback.
### Response:
def _ftp_nlst(self, dir_name):
"""Variant of `self.ftp.nlst()` that supports encoding-fallback."""
assert compat.is_native(dir_name)
lines = []
def _add_line(status, line):
lines.append(line)
cmd = "NLST " + dir_name
self._ftp_retrlines_native(cmd, _add_line, self.encoding)
# print(cmd, lines)
return lines |
def extract_source_params(src):
"""
Extract params from source object.
"""
tags = get_taglist(src)
data = []
for key, param, vtype in BASE_PARAMS:
if key in src.attrib:
if vtype == "c":
data.append((param, src.attrib[key]))
elif vtype == "f":
data.append((param, float(src.attrib[key])))
else:
data.append((param, None))
elif key in tags:
if vtype == "c":
data.append((param, src.nodes[tags.index(key)].text))
elif vtype == "f":
data.append((param, float(src.nodes[tags.index(key)].text)))
else:
data.append((param, None))
else:
data.append((param, None))
return dict(data) | Extract params from source object. | Below is the the instruction that describes the task:
### Input:
Extract params from source object.
### Response:
def extract_source_params(src):
"""
Extract params from source object.
"""
tags = get_taglist(src)
data = []
for key, param, vtype in BASE_PARAMS:
if key in src.attrib:
if vtype == "c":
data.append((param, src.attrib[key]))
elif vtype == "f":
data.append((param, float(src.attrib[key])))
else:
data.append((param, None))
elif key in tags:
if vtype == "c":
data.append((param, src.nodes[tags.index(key)].text))
elif vtype == "f":
data.append((param, float(src.nodes[tags.index(key)].text)))
else:
data.append((param, None))
else:
data.append((param, None))
return dict(data) |
def _choice_getter(self):
"""
Return a function object suitable for the "get" side of the property
descriptor.
"""
def get_group_member_element(obj):
return obj.first_child_found_in(*self._member_nsptagnames)
get_group_member_element.__doc__ = (
'Return the child element belonging to this element group, or '
'|None| if no member child is present.'
)
return get_group_member_element | Return a function object suitable for the "get" side of the property
descriptor. | Below is the the instruction that describes the task:
### Input:
Return a function object suitable for the "get" side of the property
descriptor.
### Response:
def _choice_getter(self):
"""
Return a function object suitable for the "get" side of the property
descriptor.
"""
def get_group_member_element(obj):
return obj.first_child_found_in(*self._member_nsptagnames)
get_group_member_element.__doc__ = (
'Return the child element belonging to this element group, or '
'|None| if no member child is present.'
)
return get_group_member_element |
def save_figure(self, event=None, panel='top'):
""" save figure image to file"""
panel = self.get_panel(panel)
panel.save_figure(event=event) | save figure image to file | Below is the the instruction that describes the task:
### Input:
save figure image to file
### Response:
def save_figure(self, event=None, panel='top'):
""" save figure image to file"""
panel = self.get_panel(panel)
panel.save_figure(event=event) |
def find_project_directory(start_path) -> str:
""" Locate top-level project directory """
start_path = os.path.realpath(start_path)
possible_name = os.path.join(start_path, ModelConfig.PROJECT_FILE_NAME)
if os.path.exists(possible_name):
return start_path
else:
up_path = os.path.realpath(os.path.join(start_path, '..'))
if os.path.realpath(start_path) == up_path:
raise RuntimeError(f"Couldn't find project file starting from {start_path}")
else:
return ModelConfig.find_project_directory(up_path) | Locate top-level project directory | Below is the the instruction that describes the task:
### Input:
Locate top-level project directory
### Response:
def find_project_directory(start_path) -> str:
""" Locate top-level project directory """
start_path = os.path.realpath(start_path)
possible_name = os.path.join(start_path, ModelConfig.PROJECT_FILE_NAME)
if os.path.exists(possible_name):
return start_path
else:
up_path = os.path.realpath(os.path.join(start_path, '..'))
if os.path.realpath(start_path) == up_path:
raise RuntimeError(f"Couldn't find project file starting from {start_path}")
else:
return ModelConfig.find_project_directory(up_path) |
def compute_two_dimensional_near_isotropic_downsampling_scales(
size,
voxel_size,
max_scales=float('inf'),
max_downsampling=DEFAULT_MAX_DOWNSAMPLING,
max_downsampled_size=DEFAULT_MAX_DOWNSAMPLED_SIZE):
"""Compute a list of successive downsampling factors for 2-d tiles."""
max_scales = min(max_scales, 10)
# First compute a set of 2-d downsamplings for XY, XZ, and YZ with a high
# number of max_scales, and ignoring other criteria.
scales_transpose = [
compute_near_isotropic_downsampling_scales(
size=size,
voxel_size=voxel_size,
dimensions_to_downsample=dimensions_to_downsample,
max_scales=max_scales,
max_downsampling=float('inf'),
max_downsampled_size=0, ) for dimensions_to_downsample in [[0, 1], [0, 2], [1, 2]]
]
# Truncate all list of scales to the same length, once the stopping criteria
# is reached for all values of dimensions_to_downsample.
scales = [((1, ) * 3, ) * 3]
size = np.array(size)
def scale_satisfies_criteria(scale):
return np.prod(scale) < max_downsampling and (size / scale).max() > max_downsampled_size
for i in range(1, max_scales):
cur_scales = tuple(scales_transpose[d][i] for d in range(3))
if all(not scale_satisfies_criteria(scale) for scale in cur_scales):
break
scales.append(cur_scales)
return scales | Compute a list of successive downsampling factors for 2-d tiles. | Below is the the instruction that describes the task:
### Input:
Compute a list of successive downsampling factors for 2-d tiles.
### Response:
def compute_two_dimensional_near_isotropic_downsampling_scales(
size,
voxel_size,
max_scales=float('inf'),
max_downsampling=DEFAULT_MAX_DOWNSAMPLING,
max_downsampled_size=DEFAULT_MAX_DOWNSAMPLED_SIZE):
"""Compute a list of successive downsampling factors for 2-d tiles."""
max_scales = min(max_scales, 10)
# First compute a set of 2-d downsamplings for XY, XZ, and YZ with a high
# number of max_scales, and ignoring other criteria.
scales_transpose = [
compute_near_isotropic_downsampling_scales(
size=size,
voxel_size=voxel_size,
dimensions_to_downsample=dimensions_to_downsample,
max_scales=max_scales,
max_downsampling=float('inf'),
max_downsampled_size=0, ) for dimensions_to_downsample in [[0, 1], [0, 2], [1, 2]]
]
# Truncate all list of scales to the same length, once the stopping criteria
# is reached for all values of dimensions_to_downsample.
scales = [((1, ) * 3, ) * 3]
size = np.array(size)
def scale_satisfies_criteria(scale):
return np.prod(scale) < max_downsampling and (size / scale).max() > max_downsampled_size
for i in range(1, max_scales):
cur_scales = tuple(scales_transpose[d][i] for d in range(3))
if all(not scale_satisfies_criteria(scale) for scale in cur_scales):
break
scales.append(cur_scales)
return scales |
def revnet(inputs, hparams, reuse=None):
"""Uses Tensor2Tensor memory optimized RevNet block to build a RevNet.
Args:
inputs: [NxHxWx3] tensor of input images to the model.
hparams: HParams object that contains the following parameters,
in addition to the parameters contained in the basic_params1() object in
the common_hparams module:
num_channels_first - A Python list where each element represents the
depth of the first and third convolutional layers in the bottleneck
residual unit for a given block.
num_channels_second - A Python list where each element represents the
depth of the second convolutional layer in the bottleneck residual
unit for a given block.
num_layers_per_block - A Python list containing the number of RevNet
layers for each block.
first_batch_norm - A Python list containing booleans representing the
presence of a batch norm layer at the beginning of a given block.
strides - A Python list containing integers representing the stride of
the residual function for each block.
num_channels_init_block - An integer representing the number of channels
for the convolutional layer in the initial block.
dimension - A string (either "2d" or "3d") that decides if the RevNet is
2-dimensional or 3-dimensional.
reuse: Whether to reuse the default variable scope.
Returns:
[batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet.
"""
training = hparams.mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('RevNet', reuse=reuse):
x1, x2 = init(inputs,
num_channels=hparams.num_channels_init_block,
dim=hparams.dim,
kernel_size=hparams.init_kernel_size,
maxpool=hparams.init_maxpool,
stride=hparams.init_stride,
training=training)
for block_num in range(len(hparams.num_layers_per_block)):
block = {'depth': hparams.num_channels[block_num],
'num_layers': hparams.num_layers_per_block[block_num],
'first_batch_norm': hparams.first_batch_norm[block_num],
'stride': hparams.strides[block_num],
'bottleneck': hparams.bottleneck}
x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training,
**block)
pre_logits = final_block(x1, x2, dim=hparams.dim, training=training)
return pre_logits | Uses Tensor2Tensor memory optimized RevNet block to build a RevNet.
Args:
inputs: [NxHxWx3] tensor of input images to the model.
hparams: HParams object that contains the following parameters,
in addition to the parameters contained in the basic_params1() object in
the common_hparams module:
num_channels_first - A Python list where each element represents the
depth of the first and third convolutional layers in the bottleneck
residual unit for a given block.
num_channels_second - A Python list where each element represents the
depth of the second convolutional layer in the bottleneck residual
unit for a given block.
num_layers_per_block - A Python list containing the number of RevNet
layers for each block.
first_batch_norm - A Python list containing booleans representing the
presence of a batch norm layer at the beginning of a given block.
strides - A Python list containing integers representing the stride of
the residual function for each block.
num_channels_init_block - An integer representing the number of channels
for the convolutional layer in the initial block.
dimension - A string (either "2d" or "3d") that decides if the RevNet is
2-dimensional or 3-dimensional.
reuse: Whether to reuse the default variable scope.
Returns:
[batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet. | Below is the the instruction that describes the task:
### Input:
Uses Tensor2Tensor memory optimized RevNet block to build a RevNet.
Args:
inputs: [NxHxWx3] tensor of input images to the model.
hparams: HParams object that contains the following parameters,
in addition to the parameters contained in the basic_params1() object in
the common_hparams module:
num_channels_first - A Python list where each element represents the
depth of the first and third convolutional layers in the bottleneck
residual unit for a given block.
num_channels_second - A Python list where each element represents the
depth of the second convolutional layer in the bottleneck residual
unit for a given block.
num_layers_per_block - A Python list containing the number of RevNet
layers for each block.
first_batch_norm - A Python list containing booleans representing the
presence of a batch norm layer at the beginning of a given block.
strides - A Python list containing integers representing the stride of
the residual function for each block.
num_channels_init_block - An integer representing the number of channels
for the convolutional layer in the initial block.
dimension - A string (either "2d" or "3d") that decides if the RevNet is
2-dimensional or 3-dimensional.
reuse: Whether to reuse the default variable scope.
Returns:
[batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet.
### Response:
def revnet(inputs, hparams, reuse=None):
"""Uses Tensor2Tensor memory optimized RevNet block to build a RevNet.
Args:
inputs: [NxHxWx3] tensor of input images to the model.
hparams: HParams object that contains the following parameters,
in addition to the parameters contained in the basic_params1() object in
the common_hparams module:
num_channels_first - A Python list where each element represents the
depth of the first and third convolutional layers in the bottleneck
residual unit for a given block.
num_channels_second - A Python list where each element represents the
depth of the second convolutional layer in the bottleneck residual
unit for a given block.
num_layers_per_block - A Python list containing the number of RevNet
layers for each block.
first_batch_norm - A Python list containing booleans representing the
presence of a batch norm layer at the beginning of a given block.
strides - A Python list containing integers representing the stride of
the residual function for each block.
num_channels_init_block - An integer representing the number of channels
for the convolutional layer in the initial block.
dimension - A string (either "2d" or "3d") that decides if the RevNet is
2-dimensional or 3-dimensional.
reuse: Whether to reuse the default variable scope.
Returns:
[batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet.
"""
training = hparams.mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('RevNet', reuse=reuse):
x1, x2 = init(inputs,
num_channels=hparams.num_channels_init_block,
dim=hparams.dim,
kernel_size=hparams.init_kernel_size,
maxpool=hparams.init_maxpool,
stride=hparams.init_stride,
training=training)
for block_num in range(len(hparams.num_layers_per_block)):
block = {'depth': hparams.num_channels[block_num],
'num_layers': hparams.num_layers_per_block[block_num],
'first_batch_norm': hparams.first_batch_norm[block_num],
'stride': hparams.strides[block_num],
'bottleneck': hparams.bottleneck}
x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training,
**block)
pre_logits = final_block(x1, x2, dim=hparams.dim, training=training)
return pre_logits |
def set_led(self, led, action=None,
cabinet=Required, frame=Required, board=Required):
"""Set or toggle the state of an LED.
.. note::
At the time of writing, LED 7 is only set by the BMP on start-up to
indicate that the watchdog timer reset the board. After this point,
the LED is available for use by applications.
Parameters
----------
led : int or iterable
Number of the LED or an iterable of LEDs to set the state of (0-7)
action : bool or None
State to set the LED to. True for on, False for off, None to
toggle (default).
board : int or iterable
Specifies the board to control the LEDs of. This may also be an
iterable of multiple boards (in the same frame). The command will
actually be sent to the first board in the iterable.
"""
if isinstance(led, int):
leds = [led]
else:
leds = led
if isinstance(board, int):
boards = [board]
else:
boards = list(board)
board = boards[0]
# LED setting actions
arg1 = sum(LEDAction.from_bool(action) << (led * 2) for led in leds)
# Bitmask of boards to control
arg2 = sum(1 << b for b in boards)
self._send_scp(cabinet, frame, board, SCPCommands.led, arg1=arg1,
arg2=arg2, expected_args=0) | Set or toggle the state of an LED.
.. note::
At the time of writing, LED 7 is only set by the BMP on start-up to
indicate that the watchdog timer reset the board. After this point,
the LED is available for use by applications.
Parameters
----------
led : int or iterable
Number of the LED or an iterable of LEDs to set the state of (0-7)
action : bool or None
State to set the LED to. True for on, False for off, None to
toggle (default).
board : int or iterable
Specifies the board to control the LEDs of. This may also be an
iterable of multiple boards (in the same frame). The command will
actually be sent to the first board in the iterable. | Below is the the instruction that describes the task:
### Input:
Set or toggle the state of an LED.
.. note::
At the time of writing, LED 7 is only set by the BMP on start-up to
indicate that the watchdog timer reset the board. After this point,
the LED is available for use by applications.
Parameters
----------
led : int or iterable
Number of the LED or an iterable of LEDs to set the state of (0-7)
action : bool or None
State to set the LED to. True for on, False for off, None to
toggle (default).
board : int or iterable
Specifies the board to control the LEDs of. This may also be an
iterable of multiple boards (in the same frame). The command will
actually be sent to the first board in the iterable.
### Response:
def set_led(self, led, action=None,
cabinet=Required, frame=Required, board=Required):
"""Set or toggle the state of an LED.
.. note::
At the time of writing, LED 7 is only set by the BMP on start-up to
indicate that the watchdog timer reset the board. After this point,
the LED is available for use by applications.
Parameters
----------
led : int or iterable
Number of the LED or an iterable of LEDs to set the state of (0-7)
action : bool or None
State to set the LED to. True for on, False for off, None to
toggle (default).
board : int or iterable
Specifies the board to control the LEDs of. This may also be an
iterable of multiple boards (in the same frame). The command will
actually be sent to the first board in the iterable.
"""
if isinstance(led, int):
leds = [led]
else:
leds = led
if isinstance(board, int):
boards = [board]
else:
boards = list(board)
board = boards[0]
# LED setting actions
arg1 = sum(LEDAction.from_bool(action) << (led * 2) for led in leds)
# Bitmask of boards to control
arg2 = sum(1 << b for b in boards)
self._send_scp(cabinet, frame, board, SCPCommands.led, arg1=arg1,
arg2=arg2, expected_args=0) |
def unzip(self, directory):
"""
Write contents of zipfile to directory
"""
if not os.path.exists(directory):
os.makedirs(directory)
shutil.copytree(self.src_dir, directory) | Write contents of zipfile to directory | Below is the the instruction that describes the task:
### Input:
Write contents of zipfile to directory
### Response:
def unzip(self, directory):
"""
Write contents of zipfile to directory
"""
if not os.path.exists(directory):
os.makedirs(directory)
shutil.copytree(self.src_dir, directory) |
def load_model(itos_filename, classifier_filename, num_classes):
"""Load the classifier and int to string mapping
Args:
itos_filename (str): The filename of the int to string mapping file (usually called itos.pkl)
classifier_filename (str): The filename of the trained classifier
Returns:
string to int mapping, trained classifer model
"""
# load the int to string mapping file
itos = pickle.load(Path(itos_filename).open('rb'))
# turn it into a string to int mapping (which is what we need)
stoi = collections.defaultdict(lambda:0, {str(v):int(k) for k,v in enumerate(itos)})
# these parameters aren't used, but this is the easiest way to get a model
bptt,em_sz,nh,nl = 70,400,1150,3
dps = np.array([0.4,0.5,0.05,0.3,0.4])*0.5
vs = len(itos)
model = get_rnn_classifer(bptt, 20*70, num_classes, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1,
layers=[em_sz*3, 50, num_classes], drops=[dps[4], 0.1],
dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])
# load the trained classifier
model.load_state_dict(torch.load(classifier_filename, map_location=lambda storage, loc: storage))
# put the classifier into evaluation mode
model.reset()
model.eval()
return stoi, model | Load the classifier and int to string mapping
Args:
itos_filename (str): The filename of the int to string mapping file (usually called itos.pkl)
classifier_filename (str): The filename of the trained classifier
Returns:
string to int mapping, trained classifer model | Below is the the instruction that describes the task:
### Input:
Load the classifier and int to string mapping
Args:
itos_filename (str): The filename of the int to string mapping file (usually called itos.pkl)
classifier_filename (str): The filename of the trained classifier
Returns:
string to int mapping, trained classifer model
### Response:
def load_model(itos_filename, classifier_filename, num_classes):
"""Load the classifier and int to string mapping
Args:
itos_filename (str): The filename of the int to string mapping file (usually called itos.pkl)
classifier_filename (str): The filename of the trained classifier
Returns:
string to int mapping, trained classifer model
"""
# load the int to string mapping file
itos = pickle.load(Path(itos_filename).open('rb'))
# turn it into a string to int mapping (which is what we need)
stoi = collections.defaultdict(lambda:0, {str(v):int(k) for k,v in enumerate(itos)})
# these parameters aren't used, but this is the easiest way to get a model
bptt,em_sz,nh,nl = 70,400,1150,3
dps = np.array([0.4,0.5,0.05,0.3,0.4])*0.5
vs = len(itos)
model = get_rnn_classifer(bptt, 20*70, num_classes, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1,
layers=[em_sz*3, 50, num_classes], drops=[dps[4], 0.1],
dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])
# load the trained classifier
model.load_state_dict(torch.load(classifier_filename, map_location=lambda storage, loc: storage))
# put the classifier into evaluation mode
model.reset()
model.eval()
return stoi, model |
def get_default_preds():
"""dynamically build autocomplete options based on an external file"""
g = ontospy.Ontospy(rdfsschema, text=True, verbose=False, hide_base_schemas=False)
classes = [(x.qname, x.bestDescription()) for x in g.all_classes]
properties = [(x.qname, x.bestDescription()) for x in g.all_properties]
commands = [('exit', 'exits the terminal'), ('show', 'show current buffer')]
return rdfschema + owlschema + classes + properties + commands | dynamically build autocomplete options based on an external file | Below is the the instruction that describes the task:
### Input:
dynamically build autocomplete options based on an external file
### Response:
def get_default_preds():
"""dynamically build autocomplete options based on an external file"""
g = ontospy.Ontospy(rdfsschema, text=True, verbose=False, hide_base_schemas=False)
classes = [(x.qname, x.bestDescription()) for x in g.all_classes]
properties = [(x.qname, x.bestDescription()) for x in g.all_properties]
commands = [('exit', 'exits the terminal'), ('show', 'show current buffer')]
return rdfschema + owlschema + classes + properties + commands |
def columnSchema(self):
"""
Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0
"""
if self._columnSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.columnSchema()
self._columnSchema = _parse_datatype_json_string(jschema.json())
return self._columnSchema | Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0 | Below is the the instruction that describes the task:
### Input:
Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0
### Response:
def columnSchema(self):
"""
Returns the schema for the image column.
:return: a :class:`StructType` for image column,
``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``.
.. versionadded:: 2.4.0
"""
if self._columnSchema is None:
ctx = SparkContext._active_spark_context
jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.columnSchema()
self._columnSchema = _parse_datatype_json_string(jschema.json())
return self._columnSchema |
def add_cookies_to_web_driver(driver, cookies):
"""
Sets cookies in an existing WebDriver session.
"""
for cookie in cookies:
driver.add_cookie(convert_cookie_to_dict(cookie))
return driver | Sets cookies in an existing WebDriver session. | Below is the the instruction that describes the task:
### Input:
Sets cookies in an existing WebDriver session.
### Response:
def add_cookies_to_web_driver(driver, cookies):
"""
Sets cookies in an existing WebDriver session.
"""
for cookie in cookies:
driver.add_cookie(convert_cookie_to_dict(cookie))
return driver |
def send(self, request_body):
''' Sends request body. '''
if not request_body:
self._httprequest.send()
else:
self._httprequest.send(request_body) | Sends request body. | Below is the the instruction that describes the task:
### Input:
Sends request body.
### Response:
def send(self, request_body):
''' Sends request body. '''
if not request_body:
self._httprequest.send()
else:
self._httprequest.send(request_body) |
def parse_problem_name(name):
"""Determines if problem_name specifies a copy and/or reversal.
Args:
name: str, problem name, possibly with suffixes.
Returns:
ProblemSpec: namedtuple with ["base_name", "was_reversed", "was_copy"]
Raises:
ValueError if name contains multiple suffixes of the same type
('_rev' or '_copy'). One of each is ok.
"""
# Recursively strip tags until we reach a base name.
if name.endswith("_rev"):
base, was_reversed, was_copy = parse_problem_name(name[:-4])
if was_reversed:
# duplicate rev
raise ValueError(
"Invalid problem name %s: multiple '_rev' instances" % name)
return ProblemSpec(base, True, was_copy)
elif name.endswith("_copy"):
base, was_reversed, was_copy = parse_problem_name(name[:-5])
if was_copy:
raise ValueError(
"Invalid problem_name %s: multiple '_copy' instances" % name)
return ProblemSpec(base, was_reversed, True)
else:
return ProblemSpec(name, False, False) | Determines if problem_name specifies a copy and/or reversal.
Args:
name: str, problem name, possibly with suffixes.
Returns:
ProblemSpec: namedtuple with ["base_name", "was_reversed", "was_copy"]
Raises:
ValueError if name contains multiple suffixes of the same type
('_rev' or '_copy'). One of each is ok. | Below is the the instruction that describes the task:
### Input:
Determines if problem_name specifies a copy and/or reversal.
Args:
name: str, problem name, possibly with suffixes.
Returns:
ProblemSpec: namedtuple with ["base_name", "was_reversed", "was_copy"]
Raises:
ValueError if name contains multiple suffixes of the same type
('_rev' or '_copy'). One of each is ok.
### Response:
def parse_problem_name(name):
"""Determines if problem_name specifies a copy and/or reversal.
Args:
name: str, problem name, possibly with suffixes.
Returns:
ProblemSpec: namedtuple with ["base_name", "was_reversed", "was_copy"]
Raises:
ValueError if name contains multiple suffixes of the same type
('_rev' or '_copy'). One of each is ok.
"""
# Recursively strip tags until we reach a base name.
if name.endswith("_rev"):
base, was_reversed, was_copy = parse_problem_name(name[:-4])
if was_reversed:
# duplicate rev
raise ValueError(
"Invalid problem name %s: multiple '_rev' instances" % name)
return ProblemSpec(base, True, was_copy)
elif name.endswith("_copy"):
base, was_reversed, was_copy = parse_problem_name(name[:-5])
if was_copy:
raise ValueError(
"Invalid problem_name %s: multiple '_copy' instances" % name)
return ProblemSpec(base, was_reversed, True)
else:
return ProblemSpec(name, False, False) |
def system(command, answer=''):
"""commands.getoutput() replacement that also works on windows"""
p = subprocess.Popen(command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=MUST_CLOSE_FDS)
i, o, e = (p.stdin, p.stdout, p.stderr)
if answer:
i.write(answer)
i.close()
result = o.read() + e.read()
o.close()
e.close()
return result.decode('utf8') | commands.getoutput() replacement that also works on windows | Below is the the instruction that describes the task:
### Input:
commands.getoutput() replacement that also works on windows
### Response:
def system(command, answer=''):
"""commands.getoutput() replacement that also works on windows"""
p = subprocess.Popen(command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=MUST_CLOSE_FDS)
i, o, e = (p.stdin, p.stdout, p.stderr)
if answer:
i.write(answer)
i.close()
result = o.read() + e.read()
o.close()
e.close()
return result.decode('utf8') |
def anonymous_required(func=None, url=None):
"""Required that the user is not logged in."""
url = url or "/"
def _dec(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(url)
else:
return view_func(request, *args, **kwargs)
return _wrapped_view
if func is None:
return _dec
else:
return _dec(func) | Required that the user is not logged in. | Below is the the instruction that describes the task:
### Input:
Required that the user is not logged in.
### Response:
def anonymous_required(func=None, url=None):
"""Required that the user is not logged in."""
url = url or "/"
def _dec(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(url)
else:
return view_func(request, *args, **kwargs)
return _wrapped_view
if func is None:
return _dec
else:
return _dec(func) |
def smart_email_list(self, status="all", client_id=None):
"""Gets the smart email list."""
if client_id is None:
response = self._get(
"/transactional/smartEmail?status=%s" % status)
else:
response = self._get(
"/transactional/smartEmail?status=%s&clientID=%s" % (status, client_id))
return json_to_py(response) | Gets the smart email list. | Below is the the instruction that describes the task:
### Input:
Gets the smart email list.
### Response:
def smart_email_list(self, status="all", client_id=None):
"""Gets the smart email list."""
if client_id is None:
response = self._get(
"/transactional/smartEmail?status=%s" % status)
else:
response = self._get(
"/transactional/smartEmail?status=%s&clientID=%s" % (status, client_id))
return json_to_py(response) |
def update(version=None):
'''
Update the salt minion from the URL defined in opts['update_url']
SaltStack, Inc provides the latest builds here:
update_url: https://repo.saltstack.com/windows/
Be aware that as of 2014-8-11 there's a bug in esky such that only the
latest version available in the update_url can be downloaded and installed.
This feature requires the minion to be running a bdist_esky build.
The version number is optional and will default to the most recent version
available at opts['update_url'].
Returns details about the transaction upon completion.
CLI Examples:
.. code-block:: bash
salt '*' saltutil.update
salt '*' saltutil.update 0.10.3
'''
ret = {}
if not HAS_ESKY:
ret['_error'] = 'Esky not available as import'
return ret
if not getattr(sys, 'frozen', False):
ret['_error'] = 'Minion is not running an Esky build'
return ret
if not __salt__['config.option']('update_url'):
ret['_error'] = '"update_url" not configured on this minion'
return ret
app = esky.Esky(sys.executable, __opts__['update_url'])
oldversion = __grains__['saltversion']
if not version:
try:
version = app.find_update()
except URLError as exc:
ret['_error'] = 'Could not connect to update_url. Error: {0}'.format(exc)
return ret
if not version:
ret['_error'] = 'No updates available'
return ret
try:
app.fetch_version(version)
except EskyVersionError as exc:
ret['_error'] = 'Unable to fetch version {0}. Error: {1}'.format(version, exc)
return ret
try:
app.install_version(version)
except EskyVersionError as exc:
ret['_error'] = 'Unable to install version {0}. Error: {1}'.format(version, exc)
return ret
try:
app.cleanup()
except Exception as exc:
ret['_error'] = 'Unable to cleanup. Error: {0}'.format(exc)
restarted = {}
for service in __opts__['update_restart_services']:
restarted[service] = __salt__['service.restart'](service)
ret['comment'] = 'Updated from {0} to {1}'.format(oldversion, version)
ret['restarted'] = restarted
return ret | Update the salt minion from the URL defined in opts['update_url']
SaltStack, Inc provides the latest builds here:
update_url: https://repo.saltstack.com/windows/
Be aware that as of 2014-8-11 there's a bug in esky such that only the
latest version available in the update_url can be downloaded and installed.
This feature requires the minion to be running a bdist_esky build.
The version number is optional and will default to the most recent version
available at opts['update_url'].
Returns details about the transaction upon completion.
CLI Examples:
.. code-block:: bash
salt '*' saltutil.update
salt '*' saltutil.update 0.10.3 | Below is the the instruction that describes the task:
### Input:
Update the salt minion from the URL defined in opts['update_url']
SaltStack, Inc provides the latest builds here:
update_url: https://repo.saltstack.com/windows/
Be aware that as of 2014-8-11 there's a bug in esky such that only the
latest version available in the update_url can be downloaded and installed.
This feature requires the minion to be running a bdist_esky build.
The version number is optional and will default to the most recent version
available at opts['update_url'].
Returns details about the transaction upon completion.
CLI Examples:
.. code-block:: bash
salt '*' saltutil.update
salt '*' saltutil.update 0.10.3
### Response:
def update(version=None):
'''
Update the salt minion from the URL defined in opts['update_url']
SaltStack, Inc provides the latest builds here:
update_url: https://repo.saltstack.com/windows/
Be aware that as of 2014-8-11 there's a bug in esky such that only the
latest version available in the update_url can be downloaded and installed.
This feature requires the minion to be running a bdist_esky build.
The version number is optional and will default to the most recent version
available at opts['update_url'].
Returns details about the transaction upon completion.
CLI Examples:
.. code-block:: bash
salt '*' saltutil.update
salt '*' saltutil.update 0.10.3
'''
ret = {}
if not HAS_ESKY:
ret['_error'] = 'Esky not available as import'
return ret
if not getattr(sys, 'frozen', False):
ret['_error'] = 'Minion is not running an Esky build'
return ret
if not __salt__['config.option']('update_url'):
ret['_error'] = '"update_url" not configured on this minion'
return ret
app = esky.Esky(sys.executable, __opts__['update_url'])
oldversion = __grains__['saltversion']
if not version:
try:
version = app.find_update()
except URLError as exc:
ret['_error'] = 'Could not connect to update_url. Error: {0}'.format(exc)
return ret
if not version:
ret['_error'] = 'No updates available'
return ret
try:
app.fetch_version(version)
except EskyVersionError as exc:
ret['_error'] = 'Unable to fetch version {0}. Error: {1}'.format(version, exc)
return ret
try:
app.install_version(version)
except EskyVersionError as exc:
ret['_error'] = 'Unable to install version {0}. Error: {1}'.format(version, exc)
return ret
try:
app.cleanup()
except Exception as exc:
ret['_error'] = 'Unable to cleanup. Error: {0}'.format(exc)
restarted = {}
for service in __opts__['update_restart_services']:
restarted[service] = __salt__['service.restart'](service)
ret['comment'] = 'Updated from {0} to {1}'.format(oldversion, version)
ret['restarted'] = restarted
return ret |
def load_alerts(self):
"""
NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts
"""
self._feed = AlertsFeed(state=self.scope, maxage=self.cachetime)
parser = CapParser(self._feed.raw_cap(), geo=self.geo)
self._alerts = parser.get_alerts() | NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts | Below is the the instruction that describes the task:
### Input:
NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts
### Response:
def load_alerts(self):
"""
NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts
"""
self._feed = AlertsFeed(state=self.scope, maxage=self.cachetime)
parser = CapParser(self._feed.raw_cap(), geo=self.geo)
self._alerts = parser.get_alerts() |
def _parse_merged_entities(self):
"""set self._merged_entities to the longest possible(wrapping) tokens
"""
self._merged_entities = list(filterfalse(
lambda token: self._is_wrapped(token, self.entities),
self.entities)) | set self._merged_entities to the longest possible(wrapping) tokens | Below is the the instruction that describes the task:
### Input:
set self._merged_entities to the longest possible(wrapping) tokens
### Response:
def _parse_merged_entities(self):
"""set self._merged_entities to the longest possible(wrapping) tokens
"""
self._merged_entities = list(filterfalse(
lambda token: self._is_wrapped(token, self.entities),
self.entities)) |
def revoke_auth(preserve_minion_cache=False):
'''
The minion sends a request to the master to revoke its own key.
Note that the minion session will be revoked and the minion may
not be able to return the result of this command back to the master.
If the 'preserve_minion_cache' flag is set to True, the master
cache for this minion will not be removed.
CLI Example:
.. code-block:: bash
salt '*' saltutil.revoke_auth
'''
masters = list()
ret = True
if 'master_uri_list' in __opts__:
for master_uri in __opts__['master_uri_list']:
masters.append(master_uri)
else:
masters.append(__opts__['master_uri'])
for master in masters:
channel = salt.transport.client.ReqChannel.factory(__opts__, master_uri=master)
tok = channel.auth.gen_token(b'salt')
load = {'cmd': 'revoke_auth',
'id': __opts__['id'],
'tok': tok,
'preserve_minion_cache': preserve_minion_cache}
try:
channel.send(load)
except SaltReqTimeoutError:
ret = False
finally:
channel.close()
return ret | The minion sends a request to the master to revoke its own key.
Note that the minion session will be revoked and the minion may
not be able to return the result of this command back to the master.
If the 'preserve_minion_cache' flag is set to True, the master
cache for this minion will not be removed.
CLI Example:
.. code-block:: bash
salt '*' saltutil.revoke_auth | Below is the the instruction that describes the task:
### Input:
The minion sends a request to the master to revoke its own key.
Note that the minion session will be revoked and the minion may
not be able to return the result of this command back to the master.
If the 'preserve_minion_cache' flag is set to True, the master
cache for this minion will not be removed.
CLI Example:
.. code-block:: bash
salt '*' saltutil.revoke_auth
### Response:
def revoke_auth(preserve_minion_cache=False):
'''
The minion sends a request to the master to revoke its own key.
Note that the minion session will be revoked and the minion may
not be able to return the result of this command back to the master.
If the 'preserve_minion_cache' flag is set to True, the master
cache for this minion will not be removed.
CLI Example:
.. code-block:: bash
salt '*' saltutil.revoke_auth
'''
masters = list()
ret = True
if 'master_uri_list' in __opts__:
for master_uri in __opts__['master_uri_list']:
masters.append(master_uri)
else:
masters.append(__opts__['master_uri'])
for master in masters:
channel = salt.transport.client.ReqChannel.factory(__opts__, master_uri=master)
tok = channel.auth.gen_token(b'salt')
load = {'cmd': 'revoke_auth',
'id': __opts__['id'],
'tok': tok,
'preserve_minion_cache': preserve_minion_cache}
try:
channel.send(load)
except SaltReqTimeoutError:
ret = False
finally:
channel.close()
return ret |
def read_client_secrets():
'''for private or protected registries, a client secrets file is required
to be located at .sregistry. If no secrets are found, we use default
of Singularity Hub, and return a dummy secrets.
'''
client_secrets = _default_client_secrets()
# If token file not provided, check environment
secrets = get_secrets_file()
# If exists, load
if secrets is not None:
client_secrets = read_json(secrets)
# Otherwise, initialize
else:
from sregistry.defaults import SREGISTRY_CLIENT_SECRETS
write_json(client_secrets, SREGISTRY_CLIENT_SECRETS)
return client_secrets | for private or protected registries, a client secrets file is required
to be located at .sregistry. If no secrets are found, we use default
of Singularity Hub, and return a dummy secrets. | Below is the the instruction that describes the task:
### Input:
for private or protected registries, a client secrets file is required
to be located at .sregistry. If no secrets are found, we use default
of Singularity Hub, and return a dummy secrets.
### Response:
def read_client_secrets():
'''for private or protected registries, a client secrets file is required
to be located at .sregistry. If no secrets are found, we use default
of Singularity Hub, and return a dummy secrets.
'''
client_secrets = _default_client_secrets()
# If token file not provided, check environment
secrets = get_secrets_file()
# If exists, load
if secrets is not None:
client_secrets = read_json(secrets)
# Otherwise, initialize
else:
from sregistry.defaults import SREGISTRY_CLIENT_SECRETS
write_json(client_secrets, SREGISTRY_CLIENT_SECRETS)
return client_secrets |
def checksum(digits):
"""
Calculate and return control digit for given list of digits based on
ISO7064, MOD 11,10 standard.
"""
remainder = 10
for digit in digits:
remainder = (remainder + digit) % 10
if remainder == 0:
remainder = 10
remainder = (remainder * 2) % 11
control_digit = 11 - remainder
if control_digit == 10:
control_digit = 0
return control_digit | Calculate and return control digit for given list of digits based on
ISO7064, MOD 11,10 standard. | Below is the the instruction that describes the task:
### Input:
Calculate and return control digit for given list of digits based on
ISO7064, MOD 11,10 standard.
### Response:
def checksum(digits):
"""
Calculate and return control digit for given list of digits based on
ISO7064, MOD 11,10 standard.
"""
remainder = 10
for digit in digits:
remainder = (remainder + digit) % 10
if remainder == 0:
remainder = 10
remainder = (remainder * 2) % 11
control_digit = 11 - remainder
if control_digit == 10:
control_digit = 0
return control_digit |
def describe_group(record, region):
"""Attempts to describe group ids."""
account_id = record['account']
group_name = cloudwatch.filter_request_parameters('groupName', record)
vpc_id = cloudwatch.filter_request_parameters('vpcId', record)
group_id = cloudwatch.filter_request_parameters('groupId', record, look_in_response=True)
# Did this get collected already by the poller?
if cloudwatch.get_collected_details(record):
LOG.debug(f"[<--] Received already collected security group data: {record['detail']['collected']}")
return [record['detail']['collected']]
try:
# Always depend on Group ID first:
if group_id: # pylint: disable=R1705
return describe_security_groups(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=region,
GroupIds=[group_id]
)['SecurityGroups']
elif vpc_id and group_name:
return describe_security_groups(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=region,
Filters=[
{
'Name': 'group-name',
'Values': [group_name]
},
{
'Name': 'vpc-id',
'Values': [vpc_id]
}
]
)['SecurityGroups']
else:
raise Exception('[X] Did not receive Group ID or VPC/Group Name pairs. '
f'We got: ID: {group_id} VPC/Name: {vpc_id}/{group_name}.')
except ClientError as exc:
if exc.response['Error']['Code'] == 'InvalidGroup.NotFound':
return []
raise exc | Attempts to describe group ids. | Below is the the instruction that describes the task:
### Input:
Attempts to describe group ids.
### Response:
def describe_group(record, region):
"""Attempts to describe group ids."""
account_id = record['account']
group_name = cloudwatch.filter_request_parameters('groupName', record)
vpc_id = cloudwatch.filter_request_parameters('vpcId', record)
group_id = cloudwatch.filter_request_parameters('groupId', record, look_in_response=True)
# Did this get collected already by the poller?
if cloudwatch.get_collected_details(record):
LOG.debug(f"[<--] Received already collected security group data: {record['detail']['collected']}")
return [record['detail']['collected']]
try:
# Always depend on Group ID first:
if group_id: # pylint: disable=R1705
return describe_security_groups(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=region,
GroupIds=[group_id]
)['SecurityGroups']
elif vpc_id and group_name:
return describe_security_groups(
account_number=account_id,
assume_role=HISTORICAL_ROLE,
region=region,
Filters=[
{
'Name': 'group-name',
'Values': [group_name]
},
{
'Name': 'vpc-id',
'Values': [vpc_id]
}
]
)['SecurityGroups']
else:
raise Exception('[X] Did not receive Group ID or VPC/Group Name pairs. '
f'We got: ID: {group_id} VPC/Name: {vpc_id}/{group_name}.')
except ClientError as exc:
if exc.response['Error']['Code'] == 'InvalidGroup.NotFound':
return []
raise exc |
def _mangle_prefix(res):
""" Mangle prefix result
"""
# fugly cast from large numbers to string to deal with XML-RPC
res['total_addresses'] = unicode(res['total_addresses'])
res['used_addresses'] = unicode(res['used_addresses'])
res['free_addresses'] = unicode(res['free_addresses'])
# postgres has notion of infinite while datetime hasn't, if expires
# is equal to the max datetime we assume it is infinity and instead
# represent that as None
if res['expires'].tzinfo is None:
res['expires'] = pytz.utc.localize(res['expires'])
if res['expires'] == pytz.utc.localize(datetime.datetime.max):
res['expires'] = None
return res | Mangle prefix result | Below is the the instruction that describes the task:
### Input:
Mangle prefix result
### Response:
def _mangle_prefix(res):
""" Mangle prefix result
"""
# fugly cast from large numbers to string to deal with XML-RPC
res['total_addresses'] = unicode(res['total_addresses'])
res['used_addresses'] = unicode(res['used_addresses'])
res['free_addresses'] = unicode(res['free_addresses'])
# postgres has notion of infinite while datetime hasn't, if expires
# is equal to the max datetime we assume it is infinity and instead
# represent that as None
if res['expires'].tzinfo is None:
res['expires'] = pytz.utc.localize(res['expires'])
if res['expires'] == pytz.utc.localize(datetime.datetime.max):
res['expires'] = None
return res |
def extract_variables(href):
"""Return a list of variable names used in a URI template."""
patterns = [re.sub(r'\*|:\d+', '', pattern)
for pattern in re.findall(r'{[\+#\./;\?&]?([^}]+)*}', href)]
variables = []
for pattern in patterns:
for part in pattern.split(","):
if not part in variables:
variables.append(part)
return variables | Return a list of variable names used in a URI template. | Below is the the instruction that describes the task:
### Input:
Return a list of variable names used in a URI template.
### Response:
def extract_variables(href):
"""Return a list of variable names used in a URI template."""
patterns = [re.sub(r'\*|:\d+', '', pattern)
for pattern in re.findall(r'{[\+#\./;\?&]?([^}]+)*}', href)]
variables = []
for pattern in patterns:
for part in pattern.split(","):
if not part in variables:
variables.append(part)
return variables |
def _addProtein(self, proteinId, proteinName, sequence, fastaHeader,
headerInfo, isDecoy=False, isContaminant=False):
"""#TODO"""
proteinEntry = ProteinEntry(
proteinId, proteinName, sequence, fastaHeader, headerInfo,
isDecoy=isDecoy, isContaminant=isContaminant
)
self.proteins[proteinEntry.id] = proteinEntry | #TODO | Below is the the instruction that describes the task:
### Input:
#TODO
### Response:
def _addProtein(self, proteinId, proteinName, sequence, fastaHeader,
headerInfo, isDecoy=False, isContaminant=False):
"""#TODO"""
proteinEntry = ProteinEntry(
proteinId, proteinName, sequence, fastaHeader, headerInfo,
isDecoy=isDecoy, isContaminant=isContaminant
)
self.proteins[proteinEntry.id] = proteinEntry |
def render(self, file_path, **kwargs):
""" Save the content of the .text file in the PDF.
Parameters
----------
file_path: str
Path to the output file.
"""
temp = get_tempfile(suffix='.tex')
self.save_content(temp.name)
try:
self._render_function(temp.name, file_path, output_format='pdf')
except:
log.exception('Error exporting file {} to PDF.'.format(file_path))
raise | Save the content of the .text file in the PDF.
Parameters
----------
file_path: str
Path to the output file. | Below is the the instruction that describes the task:
### Input:
Save the content of the .text file in the PDF.
Parameters
----------
file_path: str
Path to the output file.
### Response:
def render(self, file_path, **kwargs):
""" Save the content of the .text file in the PDF.
Parameters
----------
file_path: str
Path to the output file.
"""
temp = get_tempfile(suffix='.tex')
self.save_content(temp.name)
try:
self._render_function(temp.name, file_path, output_format='pdf')
except:
log.exception('Error exporting file {} to PDF.'.format(file_path))
raise |
def lu_solve(LU, b):
r"""Solve for LU decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the LU factorization of :math:`\mathrm A`.
Args:
LU (array_like): LU decomposition.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
scipy.linalg.lu_factor : LU decomposition.
scipy.linalg.lu_solve : Solve linear equations given LU factorization.
"""
from scipy.linalg import lu_solve as sp_lu_solve
LU = (asarray(LU[0], float), asarray(LU[1], float))
b = asarray(b, float)
return sp_lu_solve(LU, b, check_finite=False) | r"""Solve for LU decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the LU factorization of :math:`\mathrm A`.
Args:
LU (array_like): LU decomposition.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
scipy.linalg.lu_factor : LU decomposition.
scipy.linalg.lu_solve : Solve linear equations given LU factorization. | Below is the the instruction that describes the task:
### Input:
r"""Solve for LU decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the LU factorization of :math:`\mathrm A`.
Args:
LU (array_like): LU decomposition.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
scipy.linalg.lu_factor : LU decomposition.
scipy.linalg.lu_solve : Solve linear equations given LU factorization.
### Response:
def lu_solve(LU, b):
r"""Solve for LU decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the LU factorization of :math:`\mathrm A`.
Args:
LU (array_like): LU decomposition.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
scipy.linalg.lu_factor : LU decomposition.
scipy.linalg.lu_solve : Solve linear equations given LU factorization.
"""
from scipy.linalg import lu_solve as sp_lu_solve
LU = (asarray(LU[0], float), asarray(LU[1], float))
b = asarray(b, float)
return sp_lu_solve(LU, b, check_finite=False) |
def set_file(self, filename):
""" Analyse the file with the captured content """
# Use the file name as prefix if none is given
if self.output_prefix is None:
_, self.output_prefix = os.path.split(filename)
# Check if the file is present, since rdpcap will not do that
if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
print 'The file \'{0}\' is either not present or not readable. '\
'Exiting!'.format(filename)
sys.exit(1)
try:
packets = rdpcap(filename)
except NameError:
# Due probably to a bug in rdpcap, this kind of error raises a
# NameError, because the exception that is tried to raise, is not
# defined
print 'The file \'{}\' is not a pcap capture file. Exiting!'\
.format(filename)
sys.exit(2)
for number, packet in enumerate(packets):
# See if there is a field called load
self._debug('\nNUMBER {0}'.format(number), no_prefix=True)
try:
# Will cause AttributeError if there is no load
packet.getfieldval('load')
# Get the full load
load = packet.sprintf('%TCP.payload%')
self._debug('PAYLOAD LENGTH {0}'.format(len(load)),
no_prefix=True)
self._debug(load, load=True)
self._parse_load(load)
except AttributeError:
self._debug('LOAD EXCEPTION', no_prefix=True)
if len(self.messages) > 0 and not self.messages[-1].write_closed:
self._debug('DELETE LAST OPEN FILE')
del self.messages[-1]
if self.args.debug_analysis:
sys.exit(0) | Analyse the file with the captured content | Below is the the instruction that describes the task:
### Input:
Analyse the file with the captured content
### Response:
def set_file(self, filename):
""" Analyse the file with the captured content """
# Use the file name as prefix if none is given
if self.output_prefix is None:
_, self.output_prefix = os.path.split(filename)
# Check if the file is present, since rdpcap will not do that
if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
print 'The file \'{0}\' is either not present or not readable. '\
'Exiting!'.format(filename)
sys.exit(1)
try:
packets = rdpcap(filename)
except NameError:
# Due probably to a bug in rdpcap, this kind of error raises a
# NameError, because the exception that is tried to raise, is not
# defined
print 'The file \'{}\' is not a pcap capture file. Exiting!'\
.format(filename)
sys.exit(2)
for number, packet in enumerate(packets):
# See if there is a field called load
self._debug('\nNUMBER {0}'.format(number), no_prefix=True)
try:
# Will cause AttributeError if there is no load
packet.getfieldval('load')
# Get the full load
load = packet.sprintf('%TCP.payload%')
self._debug('PAYLOAD LENGTH {0}'.format(len(load)),
no_prefix=True)
self._debug(load, load=True)
self._parse_load(load)
except AttributeError:
self._debug('LOAD EXCEPTION', no_prefix=True)
if len(self.messages) > 0 and not self.messages[-1].write_closed:
self._debug('DELETE LAST OPEN FILE')
del self.messages[-1]
if self.args.debug_analysis:
sys.exit(0) |
def ec_geometric_series(p0, T, n):
r"""Compute expected transition counts for Markov chain after n
steps.
Expected counts are computed according to ..math::
E[C_{ij}^{(n)}]=\sum_{k=0}^{n-1} (p_0^t T^{k})_{i} p_{ij}
The sum is computed using the eigenvalue decomposition of T and
applying the expression for a finite geometric series to each of
the eigenvalues.
For small n the computation of the eigenvalue decomposition can be
much more expensive than a direct computation. In this case it is
beneficial to compute the expected counts using successively
computed matrix vector products p_1^t=p_0^t T, ... as increments.
Parameters
----------
p0 : (M,) ndarray
Starting (probability) vector of the chain.
T : (M, M) ndarray
Transition matrix of the chain.
n : int
Number of steps to take from initial state.
Returns
--------
EC : (M, M) ndarray
Expected value for transition counts after N steps.
"""
if (n <= 0):
EC = np.zeros(T.shape)
return EC
else:
R, D, L = rdl_decomposition(T)
w = np.diagonal(D)
L = np.transpose(L)
D_sum = np.diag(geometric_series(w, n - 1))
T_sum = np.dot(np.dot(R, D_sum), np.conjugate(np.transpose(L)))
p_sum = np.dot(p0, T_sum)
EC = p_sum[:, np.newaxis] * T
"""Truncate imginary part - which is zero, but we want real
return values"""
EC = EC.real
return EC | r"""Compute expected transition counts for Markov chain after n
steps.
Expected counts are computed according to ..math::
E[C_{ij}^{(n)}]=\sum_{k=0}^{n-1} (p_0^t T^{k})_{i} p_{ij}
The sum is computed using the eigenvalue decomposition of T and
applying the expression for a finite geometric series to each of
the eigenvalues.
For small n the computation of the eigenvalue decomposition can be
much more expensive than a direct computation. In this case it is
beneficial to compute the expected counts using successively
computed matrix vector products p_1^t=p_0^t T, ... as increments.
Parameters
----------
p0 : (M,) ndarray
Starting (probability) vector of the chain.
T : (M, M) ndarray
Transition matrix of the chain.
n : int
Number of steps to take from initial state.
Returns
--------
EC : (M, M) ndarray
Expected value for transition counts after N steps. | Below is the the instruction that describes the task:
### Input:
r"""Compute expected transition counts for Markov chain after n
steps.
Expected counts are computed according to ..math::
E[C_{ij}^{(n)}]=\sum_{k=0}^{n-1} (p_0^t T^{k})_{i} p_{ij}
The sum is computed using the eigenvalue decomposition of T and
applying the expression for a finite geometric series to each of
the eigenvalues.
For small n the computation of the eigenvalue decomposition can be
much more expensive than a direct computation. In this case it is
beneficial to compute the expected counts using successively
computed matrix vector products p_1^t=p_0^t T, ... as increments.
Parameters
----------
p0 : (M,) ndarray
Starting (probability) vector of the chain.
T : (M, M) ndarray
Transition matrix of the chain.
n : int
Number of steps to take from initial state.
Returns
--------
EC : (M, M) ndarray
Expected value for transition counts after N steps.
### Response:
def ec_geometric_series(p0, T, n):
r"""Compute expected transition counts for Markov chain after n
steps.
Expected counts are computed according to ..math::
E[C_{ij}^{(n)}]=\sum_{k=0}^{n-1} (p_0^t T^{k})_{i} p_{ij}
The sum is computed using the eigenvalue decomposition of T and
applying the expression for a finite geometric series to each of
the eigenvalues.
For small n the computation of the eigenvalue decomposition can be
much more expensive than a direct computation. In this case it is
beneficial to compute the expected counts using successively
computed matrix vector products p_1^t=p_0^t T, ... as increments.
Parameters
----------
p0 : (M,) ndarray
Starting (probability) vector of the chain.
T : (M, M) ndarray
Transition matrix of the chain.
n : int
Number of steps to take from initial state.
Returns
--------
EC : (M, M) ndarray
Expected value for transition counts after N steps.
"""
if (n <= 0):
EC = np.zeros(T.shape)
return EC
else:
R, D, L = rdl_decomposition(T)
w = np.diagonal(D)
L = np.transpose(L)
D_sum = np.diag(geometric_series(w, n - 1))
T_sum = np.dot(np.dot(R, D_sum), np.conjugate(np.transpose(L)))
p_sum = np.dot(p0, T_sum)
EC = p_sum[:, np.newaxis] * T
"""Truncate imginary part - which is zero, but we want real
return values"""
EC = EC.real
return EC |
def csch(x, context=None):
"""
Return the hyperbolic cosecant of x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_csch,
(BigFloat._implicit_convert(x),),
context,
) | Return the hyperbolic cosecant of x. | Below is the the instruction that describes the task:
### Input:
Return the hyperbolic cosecant of x.
### Response:
def csch(x, context=None):
"""
Return the hyperbolic cosecant of x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_csch,
(BigFloat._implicit_convert(x),),
context,
) |
def create(embedding_name, **kwargs):
"""Creates an instance of token embedding.
Creates a token embedding instance by loading embedding vectors from an externally hosted
pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid
`embedding_name` and `source`, use :func:`gluonnlp.embedding.list_sources`.
Parameters
----------
embedding_name : str
The token embedding name (case-insensitive).
kwargs : dict
All other keyword arguments are passed to the initializer of token
embedding class. For example `create(embedding_name='fasttext',
source='wiki.simple', load_ngrams=True)` will return
`FastText(source='wiki.simple', load_ngrams=True)`.
Returns
-------
An instance of :class:`gluonnlp.embedding.TokenEmbedding`:
A token embedding instance that loads embedding vectors from an externally hosted
pre-trained token embedding file.
"""
create_text_embedding = registry.get_create_func(TokenEmbedding, 'token embedding')
return create_text_embedding(embedding_name, **kwargs) | Creates an instance of token embedding.
Creates a token embedding instance by loading embedding vectors from an externally hosted
pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid
`embedding_name` and `source`, use :func:`gluonnlp.embedding.list_sources`.
Parameters
----------
embedding_name : str
The token embedding name (case-insensitive).
kwargs : dict
All other keyword arguments are passed to the initializer of token
embedding class. For example `create(embedding_name='fasttext',
source='wiki.simple', load_ngrams=True)` will return
`FastText(source='wiki.simple', load_ngrams=True)`.
Returns
-------
An instance of :class:`gluonnlp.embedding.TokenEmbedding`:
A token embedding instance that loads embedding vectors from an externally hosted
pre-trained token embedding file. | Below is the the instruction that describes the task:
### Input:
Creates an instance of token embedding.
Creates a token embedding instance by loading embedding vectors from an externally hosted
pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid
`embedding_name` and `source`, use :func:`gluonnlp.embedding.list_sources`.
Parameters
----------
embedding_name : str
The token embedding name (case-insensitive).
kwargs : dict
All other keyword arguments are passed to the initializer of token
embedding class. For example `create(embedding_name='fasttext',
source='wiki.simple', load_ngrams=True)` will return
`FastText(source='wiki.simple', load_ngrams=True)`.
Returns
-------
An instance of :class:`gluonnlp.embedding.TokenEmbedding`:
A token embedding instance that loads embedding vectors from an externally hosted
pre-trained token embedding file.
### Response:
def create(embedding_name, **kwargs):
"""Creates an instance of token embedding.
Creates a token embedding instance by loading embedding vectors from an externally hosted
pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid
`embedding_name` and `source`, use :func:`gluonnlp.embedding.list_sources`.
Parameters
----------
embedding_name : str
The token embedding name (case-insensitive).
kwargs : dict
All other keyword arguments are passed to the initializer of token
embedding class. For example `create(embedding_name='fasttext',
source='wiki.simple', load_ngrams=True)` will return
`FastText(source='wiki.simple', load_ngrams=True)`.
Returns
-------
An instance of :class:`gluonnlp.embedding.TokenEmbedding`:
A token embedding instance that loads embedding vectors from an externally hosted
pre-trained token embedding file.
"""
create_text_embedding = registry.get_create_func(TokenEmbedding, 'token embedding')
return create_text_embedding(embedding_name, **kwargs) |
def has_bounds(self):
"""return True, if any variable is bounded"""
bounds = self.bounds
if bounds in (None, [None, None]):
return False
for ib, bound in enumerate(bounds):
if bound is not None:
sign_ = 2 * ib - 1
for bound_i in bound:
if bound_i is not None and sign_ * bound_i < np.inf:
return True
return False | return True, if any variable is bounded | Below is the the instruction that describes the task:
### Input:
return True, if any variable is bounded
### Response:
def has_bounds(self):
"""return True, if any variable is bounded"""
bounds = self.bounds
if bounds in (None, [None, None]):
return False
for ib, bound in enumerate(bounds):
if bound is not None:
sign_ = 2 * ib - 1
for bound_i in bound:
if bound_i is not None and sign_ * bound_i < np.inf:
return True
return False |
def read(self, n=1):
"""Return n bytes
:param n: number of bytes to return
:type n: :class:`int`
:return: bytes
:rtype: :class:`bytes`
"""
self.offset += n
return self.data[self.offset - n:self.offset] | Return n bytes
:param n: number of bytes to return
:type n: :class:`int`
:return: bytes
:rtype: :class:`bytes` | Below is the the instruction that describes the task:
### Input:
Return n bytes
:param n: number of bytes to return
:type n: :class:`int`
:return: bytes
:rtype: :class:`bytes`
### Response:
def read(self, n=1):
"""Return n bytes
:param n: number of bytes to return
:type n: :class:`int`
:return: bytes
:rtype: :class:`bytes`
"""
self.offset += n
return self.data[self.offset - n:self.offset] |
def debug(ftn, txt):
"""Used for debugging."""
if debug_p:
sys.stdout.write("{0}.{1}:{2}\n".format(modname, ftn, txt))
sys.stdout.flush() | Used for debugging. | Below is the the instruction that describes the task:
### Input:
Used for debugging.
### Response:
def debug(ftn, txt):
"""Used for debugging."""
if debug_p:
sys.stdout.write("{0}.{1}:{2}\n".format(modname, ftn, txt))
sys.stdout.flush() |
def _notify_mutated(self, obj, old, hint=None):
''' A method to call when a container is mutated "behind our back"
and we detect it with our |PropertyContainer| wrappers.
Args:
obj (HasProps) :
The object who's container value was mutated
old (object) :
The "old" value of the container
In this case, somewhat weirdly, ``old`` is a copy and the
new value should already be set unless we change it due to
validation.
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
Returns:
None
'''
value = self.__get__(obj, obj.__class__)
# re-validate because the contents of 'old' have changed,
# in some cases this could give us a new object for the value
value = self.property.prepare_value(obj, self.name, value)
self._real_set(obj, old, value, hint=hint) | A method to call when a container is mutated "behind our back"
and we detect it with our |PropertyContainer| wrappers.
Args:
obj (HasProps) :
The object who's container value was mutated
old (object) :
The "old" value of the container
In this case, somewhat weirdly, ``old`` is a copy and the
new value should already be set unless we change it due to
validation.
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
Returns:
None | Below is the the instruction that describes the task:
### Input:
A method to call when a container is mutated "behind our back"
and we detect it with our |PropertyContainer| wrappers.
Args:
obj (HasProps) :
The object who's container value was mutated
old (object) :
The "old" value of the container
In this case, somewhat weirdly, ``old`` is a copy and the
new value should already be set unless we change it due to
validation.
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
Returns:
None
### Response:
def _notify_mutated(self, obj, old, hint=None):
''' A method to call when a container is mutated "behind our back"
and we detect it with our |PropertyContainer| wrappers.
Args:
obj (HasProps) :
The object who's container value was mutated
old (object) :
The "old" value of the container
In this case, somewhat weirdly, ``old`` is a copy and the
new value should already be set unless we change it due to
validation.
hint (event hint or None, optional)
An optional update event hint, e.g. ``ColumnStreamedEvent``
(default: None)
Update event hints are usually used at times when better
update performance can be obtained by special-casing in
some way (e.g. streaming or patching column data sources)
Returns:
None
'''
value = self.__get__(obj, obj.__class__)
# re-validate because the contents of 'old' have changed,
# in some cases this could give us a new object for the value
value = self.property.prepare_value(obj, self.name, value)
self._real_set(obj, old, value, hint=hint) |
def _from_arrays(self, x, y, z):
"""
Create VTK rectilinear grid directly from numpy arrays. Each array
gives the uniques coordinates of the mesh along each axial direction.
To help ensure you are using this correctly, we take the unique values
of each argument.
Parameters
----------
x : np.ndarray
Coordinates of the nodes in x direction.
y : np.ndarray
Coordinates of the nodes in y direction.
z : np.ndarray
Coordinates of the nodes in z direction.
"""
x = np.unique(x.ravel())
y = np.unique(y.ravel())
z = np.unique(z.ravel())
# Set the cell spacings and dimensions of the grid
self.SetDimensions(len(x), len(y), len(z))
self.SetXCoordinates(numpy_to_vtk(x))
self.SetYCoordinates(numpy_to_vtk(y))
self.SetZCoordinates(numpy_to_vtk(z)) | Create VTK rectilinear grid directly from numpy arrays. Each array
gives the uniques coordinates of the mesh along each axial direction.
To help ensure you are using this correctly, we take the unique values
of each argument.
Parameters
----------
x : np.ndarray
Coordinates of the nodes in x direction.
y : np.ndarray
Coordinates of the nodes in y direction.
z : np.ndarray
Coordinates of the nodes in z direction. | Below is the the instruction that describes the task:
### Input:
Create VTK rectilinear grid directly from numpy arrays. Each array
gives the uniques coordinates of the mesh along each axial direction.
To help ensure you are using this correctly, we take the unique values
of each argument.
Parameters
----------
x : np.ndarray
Coordinates of the nodes in x direction.
y : np.ndarray
Coordinates of the nodes in y direction.
z : np.ndarray
Coordinates of the nodes in z direction.
### Response:
def _from_arrays(self, x, y, z):
"""
Create VTK rectilinear grid directly from numpy arrays. Each array
gives the uniques coordinates of the mesh along each axial direction.
To help ensure you are using this correctly, we take the unique values
of each argument.
Parameters
----------
x : np.ndarray
Coordinates of the nodes in x direction.
y : np.ndarray
Coordinates of the nodes in y direction.
z : np.ndarray
Coordinates of the nodes in z direction.
"""
x = np.unique(x.ravel())
y = np.unique(y.ravel())
z = np.unique(z.ravel())
# Set the cell spacings and dimensions of the grid
self.SetDimensions(len(x), len(y), len(z))
self.SetXCoordinates(numpy_to_vtk(x))
self.SetYCoordinates(numpy_to_vtk(y))
self.SetZCoordinates(numpy_to_vtk(z)) |
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces') | Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Below is the the instruction that describes the task:
### Input:
Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
### Response:
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces') |
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
def vec_factory(elemType):
"""Summary
Args:
elemType (TYPE): Description
Returns:
TYPE: Description
"""
class Vec(Structure):
"""Summary
"""
_fields_ = [
("ptr", POINTER(elemType.ctype_class)),
("size", c_long),
]
return Vec
if self.elemType not in WeldVec._singletons:
WeldVec._singletons[self.elemType] = vec_factory(self.elemType)
return WeldVec._singletons[self.elemType] | Summary
Returns:
TYPE: Description | Below is the the instruction that describes the task:
### Input:
Summary
Returns:
TYPE: Description
### Response:
def ctype_class(self):
"""Summary
Returns:
TYPE: Description
"""
def vec_factory(elemType):
"""Summary
Args:
elemType (TYPE): Description
Returns:
TYPE: Description
"""
class Vec(Structure):
"""Summary
"""
_fields_ = [
("ptr", POINTER(elemType.ctype_class)),
("size", c_long),
]
return Vec
if self.elemType not in WeldVec._singletons:
WeldVec._singletons[self.elemType] = vec_factory(self.elemType)
return WeldVec._singletons[self.elemType] |
def process_corpus(self):
"""Q.process_corpus() -- processes the queries defined by us,
by tokenizing, stemming, and removing stop words.
"""
for doc in self.corpus_list:
doc = wt(doc)
sentence = []
for word in doc:
if word not in self.stop_words and word not in self.punctuation:
word = self.stemmer.stem(word)
sentence.append(word)
self.processed_corpus.append(sentence) | Q.process_corpus() -- processes the queries defined by us,
by tokenizing, stemming, and removing stop words. | Below is the the instruction that describes the task:
### Input:
Q.process_corpus() -- processes the queries defined by us,
by tokenizing, stemming, and removing stop words.
### Response:
def process_corpus(self):
"""Q.process_corpus() -- processes the queries defined by us,
by tokenizing, stemming, and removing stop words.
"""
for doc in self.corpus_list:
doc = wt(doc)
sentence = []
for word in doc:
if word not in self.stop_words and word not in self.punctuation:
word = self.stemmer.stem(word)
sentence.append(word)
self.processed_corpus.append(sentence) |
def getTopologyInfo(self, topologyName, cluster, role, environ):
"""
Returns the JSON representation of a topology
by its name, cluster, environ, and an optional role parameter.
Raises exception if no such topology is found.
"""
# Iterate over the values to filter the desired topology.
for (topology_name, _), topologyInfo in self.topologyInfos.items():
executionState = topologyInfo["execution_state"]
if (topologyName == topology_name and
cluster == executionState["cluster"] and
environ == executionState["environ"]):
# If role is specified, first try to match "role" field. If "role" field
# does not exist, try to match "submission_user" field.
if not role or executionState.get("role") == role:
return topologyInfo
if role is not None:
Log.info("Could not find topology info for topology: %s," \
"cluster: %s, role: %s, and environ: %s",
topologyName, cluster, role, environ)
else:
Log.info("Could not find topology info for topology: %s," \
"cluster: %s and environ: %s", topologyName, cluster, environ)
raise Exception("No topology found") | Returns the JSON representation of a topology
by its name, cluster, environ, and an optional role parameter.
Raises exception if no such topology is found. | Below is the the instruction that describes the task:
### Input:
Returns the JSON representation of a topology
by its name, cluster, environ, and an optional role parameter.
Raises exception if no such topology is found.
### Response:
def getTopologyInfo(self, topologyName, cluster, role, environ):
"""
Returns the JSON representation of a topology
by its name, cluster, environ, and an optional role parameter.
Raises exception if no such topology is found.
"""
# Iterate over the values to filter the desired topology.
for (topology_name, _), topologyInfo in self.topologyInfos.items():
executionState = topologyInfo["execution_state"]
if (topologyName == topology_name and
cluster == executionState["cluster"] and
environ == executionState["environ"]):
# If role is specified, first try to match "role" field. If "role" field
# does not exist, try to match "submission_user" field.
if not role or executionState.get("role") == role:
return topologyInfo
if role is not None:
Log.info("Could not find topology info for topology: %s," \
"cluster: %s, role: %s, and environ: %s",
topologyName, cluster, role, environ)
else:
Log.info("Could not find topology info for topology: %s," \
"cluster: %s and environ: %s", topologyName, cluster, environ)
raise Exception("No topology found") |
def update_fixed(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo, lens_add_fixed=[],
source_add_fixed=[], lens_light_add_fixed=[], ps_add_fixed=[], cosmo_add_fixed=[], lens_remove_fixed=[],
source_remove_fixed=[], lens_light_remove_fixed=[], ps_remove_fixed=[], cosmo_remove_fixed=[]):
"""
adds the values of the keyword arguments that are stated in the _add_fixed to the existing fixed arguments.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:param kwargs_cosmo:
:param lens_add_fixed:
:param source_add_fixed:
:param lens_light_add_fixed:
:param ps_add_fixed:
:param cosmo_add_fixed:
:return: updated kwargs fixed
"""
lens_fixed = self._add_fixed(kwargs_lens, self._lens_fixed, lens_add_fixed)
lens_fixed = self._remove_fixed(lens_fixed, lens_remove_fixed)
source_fixed = self._add_fixed(kwargs_source, self._source_fixed, source_add_fixed)
source_fixed = self._remove_fixed(source_fixed, source_remove_fixed)
lens_light_fixed = self._add_fixed(kwargs_lens_light, self._lens_light_fixed, lens_light_add_fixed)
lens_light_fixed = self._remove_fixed(lens_light_fixed, lens_light_remove_fixed)
ps_fixed = self._add_fixed(kwargs_ps, self._ps_fixed, ps_add_fixed)
ps_fixed = self._remove_fixed(ps_fixed, ps_remove_fixed)
cosmo_fixed = copy.deepcopy(self._cosmo_fixed)
for param_name in cosmo_add_fixed:
if param_name in cosmo_fixed:
pass
else:
cosmo_fixed[param_name] = kwargs_cosmo[param_name]
for param_name in cosmo_remove_fixed:
if param_name in cosmo_fixed:
del cosmo_fixed[param_name]
self._lens_fixed, self._source_fixed, self._lens_light_fixed, self._ps_fixed, self._cosmo_fixed = lens_fixed, source_fixed, lens_light_fixed, ps_fixed, cosmo_fixed | adds the values of the keyword arguments that are stated in the _add_fixed to the existing fixed arguments.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:param kwargs_cosmo:
:param lens_add_fixed:
:param source_add_fixed:
:param lens_light_add_fixed:
:param ps_add_fixed:
:param cosmo_add_fixed:
:return: updated kwargs fixed | Below is the the instruction that describes the task:
### Input:
adds the values of the keyword arguments that are stated in the _add_fixed to the existing fixed arguments.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:param kwargs_cosmo:
:param lens_add_fixed:
:param source_add_fixed:
:param lens_light_add_fixed:
:param ps_add_fixed:
:param cosmo_add_fixed:
:return: updated kwargs fixed
### Response:
def update_fixed(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo, lens_add_fixed=[],
source_add_fixed=[], lens_light_add_fixed=[], ps_add_fixed=[], cosmo_add_fixed=[], lens_remove_fixed=[],
source_remove_fixed=[], lens_light_remove_fixed=[], ps_remove_fixed=[], cosmo_remove_fixed=[]):
"""
adds the values of the keyword arguments that are stated in the _add_fixed to the existing fixed arguments.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:param kwargs_cosmo:
:param lens_add_fixed:
:param source_add_fixed:
:param lens_light_add_fixed:
:param ps_add_fixed:
:param cosmo_add_fixed:
:return: updated kwargs fixed
"""
lens_fixed = self._add_fixed(kwargs_lens, self._lens_fixed, lens_add_fixed)
lens_fixed = self._remove_fixed(lens_fixed, lens_remove_fixed)
source_fixed = self._add_fixed(kwargs_source, self._source_fixed, source_add_fixed)
source_fixed = self._remove_fixed(source_fixed, source_remove_fixed)
lens_light_fixed = self._add_fixed(kwargs_lens_light, self._lens_light_fixed, lens_light_add_fixed)
lens_light_fixed = self._remove_fixed(lens_light_fixed, lens_light_remove_fixed)
ps_fixed = self._add_fixed(kwargs_ps, self._ps_fixed, ps_add_fixed)
ps_fixed = self._remove_fixed(ps_fixed, ps_remove_fixed)
cosmo_fixed = copy.deepcopy(self._cosmo_fixed)
for param_name in cosmo_add_fixed:
if param_name in cosmo_fixed:
pass
else:
cosmo_fixed[param_name] = kwargs_cosmo[param_name]
for param_name in cosmo_remove_fixed:
if param_name in cosmo_fixed:
del cosmo_fixed[param_name]
self._lens_fixed, self._source_fixed, self._lens_light_fixed, self._ps_fixed, self._cosmo_fixed = lens_fixed, source_fixed, lens_light_fixed, ps_fixed, cosmo_fixed |
def Call(func_name, args=None, prefix=None):
"""A function call"""
node = Node(syms.power, [func_name, ArgList(args)])
if prefix is not None:
node.prefix = prefix
return node | A function call | Below is the the instruction that describes the task:
### Input:
A function call
### Response:
def Call(func_name, args=None, prefix=None):
"""A function call"""
node = Node(syms.power, [func_name, ArgList(args)])
if prefix is not None:
node.prefix = prefix
return node |
def _set_peer(self, v, load=False):
"""
Setter method for peer, mapped from YANG variable /ntp/peer (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_peer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_peer() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("peer_ip",peer.peer, yang_name="peer", rest_name="peer", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='peer-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'Configure NTP peer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'36', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'ntp-peer'}}), is_container='list', yang_name="peer", rest_name="peer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'Configure NTP peer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'36', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'ntp-peer'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """peer must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("peer_ip",peer.peer, yang_name="peer", rest_name="peer", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='peer-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'Configure NTP peer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'36', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'ntp-peer'}}), is_container='list', yang_name="peer", rest_name="peer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'Configure NTP peer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'36', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'ntp-peer'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='list', is_config=True)""",
})
self.__peer = t
if hasattr(self, '_set'):
self._set() | Setter method for peer, mapped from YANG variable /ntp/peer (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_peer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_peer() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for peer, mapped from YANG variable /ntp/peer (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_peer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_peer() directly.
### Response:
def _set_peer(self, v, load=False):
"""
Setter method for peer, mapped from YANG variable /ntp/peer (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_peer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_peer() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("peer_ip",peer.peer, yang_name="peer", rest_name="peer", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='peer-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'Configure NTP peer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'36', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'ntp-peer'}}), is_container='list', yang_name="peer", rest_name="peer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'Configure NTP peer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'36', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'ntp-peer'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """peer must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("peer_ip",peer.peer, yang_name="peer", rest_name="peer", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='peer-ip', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'Configure NTP peer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'36', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'ntp-peer'}}), is_container='list', yang_name="peer", rest_name="peer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'Configure NTP peer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'36', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'ntp-peer'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='list', is_config=True)""",
})
self.__peer = t
if hasattr(self, '_set'):
self._set() |
def set_(key, value, service=None, profile=None): # pylint: disable=W0613
'''
Set a key/value pair in the REST interface
'''
return query(key, value, service, profile) | Set a key/value pair in the REST interface | Below is the the instruction that describes the task:
### Input:
Set a key/value pair in the REST interface
### Response:
def set_(key, value, service=None, profile=None): # pylint: disable=W0613
'''
Set a key/value pair in the REST interface
'''
return query(key, value, service, profile) |
def start(logger, full_id, fetch=True, env=None, volumes=None, cpus=None, memory=None, gpu_devices=None, offline=False):
"""
Starts the job with all logging of a job_id
"""
owner, name, id = unpack_full_job_id(full_id)
if isinstance(sys.stdout, GeneralLogger):
# we don't want to have stuff written to stdout before in job's log
sys.stdout.clear_buffer()
job_backend = JobBackend(model_name=owner + '/' + name)
if fetch:
job_backend.fetch(id)
job_backend.restart(id)
job_backend.start(collect_system=False, offline=offline)
job_backend.set_status('PREPARE', add_section=False)
job = job_backend.get_job_model()
if not cpus:
cpus = job.get_cpu()
if not memory:
memory = job.get_memory()
if not gpu_devices and job.get_gpu():
# if requested 2 GPUs and we have 3 GPUs with id [0,1,2], gpus should be [0,1]
gpu_devices = []
for i in range(0, job.get_gpu()):
gpu_devices.append(i)
start_command(logger, job_backend, env, volumes, cpus=cpus, memory=memory, gpu_devices=gpu_devices, offline=offline) | Starts the job with all logging of a job_id | Below is the the instruction that describes the task:
### Input:
Starts the job with all logging of a job_id
### Response:
def start(logger, full_id, fetch=True, env=None, volumes=None, cpus=None, memory=None, gpu_devices=None, offline=False):
"""
Starts the job with all logging of a job_id
"""
owner, name, id = unpack_full_job_id(full_id)
if isinstance(sys.stdout, GeneralLogger):
# we don't want to have stuff written to stdout before in job's log
sys.stdout.clear_buffer()
job_backend = JobBackend(model_name=owner + '/' + name)
if fetch:
job_backend.fetch(id)
job_backend.restart(id)
job_backend.start(collect_system=False, offline=offline)
job_backend.set_status('PREPARE', add_section=False)
job = job_backend.get_job_model()
if not cpus:
cpus = job.get_cpu()
if not memory:
memory = job.get_memory()
if not gpu_devices and job.get_gpu():
# if requested 2 GPUs and we have 3 GPUs with id [0,1,2], gpus should be [0,1]
gpu_devices = []
for i in range(0, job.get_gpu()):
gpu_devices.append(i)
start_command(logger, job_backend, env, volumes, cpus=cpus, memory=memory, gpu_devices=gpu_devices, offline=offline) |
def unflatten(d, splitter='tuple', inverse=False):
"""Unflatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be unflattened.
splitter: {'tuple', 'path', function} (default: 'tuple')
The key splitting method. If a function is given, the function will be
used to split.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use ``pathlib.Path.parts`` to split keys.
inverse: bool (default: False)
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict: dict
"""
if isinstance(splitter, str):
splitter = SPLITTER_DICT[splitter]
unflattened_dict = {}
for flat_key, value in six.viewitems(d):
if inverse:
flat_key, value = value, flat_key
key_tuple = splitter(flat_key)
nested_set_dict(unflattened_dict, key_tuple, value)
return unflattened_dict | Unflatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be unflattened.
splitter: {'tuple', 'path', function} (default: 'tuple')
The key splitting method. If a function is given, the function will be
used to split.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use ``pathlib.Path.parts`` to split keys.
inverse: bool (default: False)
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict: dict | Below is the the instruction that describes the task:
### Input:
Unflatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be unflattened.
splitter: {'tuple', 'path', function} (default: 'tuple')
The key splitting method. If a function is given, the function will be
used to split.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use ``pathlib.Path.parts`` to split keys.
inverse: bool (default: False)
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict: dict
### Response:
def unflatten(d, splitter='tuple', inverse=False):
"""Unflatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be unflattened.
splitter: {'tuple', 'path', function} (default: 'tuple')
The key splitting method. If a function is given, the function will be
used to split.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use ``pathlib.Path.parts`` to split keys.
inverse: bool (default: False)
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict: dict
"""
if isinstance(splitter, str):
splitter = SPLITTER_DICT[splitter]
unflattened_dict = {}
for flat_key, value in six.viewitems(d):
if inverse:
flat_key, value = value, flat_key
key_tuple = splitter(flat_key)
nested_set_dict(unflattened_dict, key_tuple, value)
return unflattened_dict |
def type_str(tp, assumed_globals=None, update_assumed_globals=None,
implicit_globals=None, bound_Generic=None, bound_typevars=None):
"""Generates a nicely readable string representation of the given type.
The returned representation is workable as a source code string and would
reconstruct the given type if handed to eval, provided that globals/locals
are configured appropriately (e.g. assumes that various types from typing
have been imported).
Used as type-formatting backend of ptypes' code generator abilities
in modules typelogger and stubfile_2_converter.
If tp contains unbound TypeVars and bound_Generic is provided, this
function attempts to retrieve corresponding values for the unbound TypeVars
from bound_Generic.
For semantics of assumed_globals and update_assumed_globals see
_tp_relfq_name. Its doc applies to every argument or result contained in
tp (recursively) and to tp itself.
"""
if assumed_globals is None and update_assumed_globals is None:
if implicit_globals is None:
implicit_globals = set()
else:
implicit_globals = implicit_globals.copy()
implicit_globals.add(sys.modules['typing'])
implicit_globals.add(sys.modules['__main__'])
if isinstance(tp, tuple):
return '('+', '.join([type_str(tp0, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars) for tp0 in tp])+')'
try:
return type_str(tp.__orig_class__, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars)
except AttributeError:
pass
tp = _match_stub_type(tp)
if isinstance(tp, TypeVar):
prm = None
if not bound_typevars is None:
try:
prm = bound_typevars[tp]
except:
pass
if prm is None and not bound_typevars is None and tp in bound_typevars:
prm = bound_typevars[tp]
if prm is None and not bound_Generic is None:
prm = get_arg_for_TypeVar(tp, bound_Generic)
if not prm is None:
return type_str(prm, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars)
return tp.__name__
elif isinstance(tp, ForwardRef):
return "'%s'" % tp.__forward_arg__
elif isclass(tp) and not is_Generic(tp) \
and not hasattr(typing, tp.__name__):
tp_name = _tp_relfq_name(tp, None, assumed_globals, update_assumed_globals,
implicit_globals)
prm = ''
if hasattr(tp, '__args__') and not tp.__args__ is None:
params = [type_str(param, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars) for param in tp.__args__]
prm = '[%s]'%', '.join(params)
return tp_name+prm
elif is_Union(tp):
prms = get_Union_params(tp)
params = [type_str(param, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars) for param in prms]
# See: https://github.com/Stewori/pytypes/issues/44
if pytypes.canonical_type_str:
params = sorted(params)
return '%s[%s]'%(_tp_relfq_name(Union, 'Union', assumed_globals,
update_assumed_globals, implicit_globals), ', '.join(params))
elif is_Tuple(tp):
prms = get_Tuple_params(tp)
tpl_params = [type_str(param, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars) for param in prms]
return '%s[%s]'%(_tp_relfq_name(Tuple, 'Tuple', assumed_globals,
update_assumed_globals, implicit_globals), ', '.join(tpl_params))
elif hasattr(tp, '__args__'):
tp_name = _tp_relfq_name(tp, None, assumed_globals, update_assumed_globals,
implicit_globals)
if tp.__args__ is None:
if hasattr(tp, '__parameters__') and \
hasattr(tp, '__origin__') and tp.__origin__ is Generic and \
not tp.__parameters__ is None and len(tp.__parameters__) > 0:
args = tp.__parameters__
else:
return tp_name
else:
args = tp.__args__
params = [type_str(param, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars) for param in args]
if hasattr(tp, '__result__'):
return '%s[[%s], %s]'%(tp_name, ', '.join(params),
type_str(tp.__result__, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars))
elif is_Callable(tp):
return '%s[[%s], %s]'%(tp_name, ', '.join(params[:-1]),
type_str(params[-1], assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars))
else:
return '%s[%s]'%(tp_name, ', '.join(params))
elif hasattr(tp, '__name__'):
result = _tp_relfq_name(tp, None, assumed_globals, update_assumed_globals,
implicit_globals)
elif tp is Any:
# In Python 3.6 Any does not have __name__.
result = _tp_relfq_name(tp, 'Any', assumed_globals, update_assumed_globals,
implicit_globals)
else:
# Todo: Care for other special types from typing where necessary.
result = str(tp)
if not implicit_globals is None:
for s in implicit_globals:
result = result.replace(s.__name__+'.', '')
return result | Generates a nicely readable string representation of the given type.
The returned representation is workable as a source code string and would
reconstruct the given type if handed to eval, provided that globals/locals
are configured appropriately (e.g. assumes that various types from typing
have been imported).
Used as type-formatting backend of ptypes' code generator abilities
in modules typelogger and stubfile_2_converter.
If tp contains unbound TypeVars and bound_Generic is provided, this
function attempts to retrieve corresponding values for the unbound TypeVars
from bound_Generic.
For semantics of assumed_globals and update_assumed_globals see
_tp_relfq_name. Its doc applies to every argument or result contained in
tp (recursively) and to tp itself. | Below is the the instruction that describes the task:
### Input:
Generates a nicely readable string representation of the given type.
The returned representation is workable as a source code string and would
reconstruct the given type if handed to eval, provided that globals/locals
are configured appropriately (e.g. assumes that various types from typing
have been imported).
Used as type-formatting backend of ptypes' code generator abilities
in modules typelogger and stubfile_2_converter.
If tp contains unbound TypeVars and bound_Generic is provided, this
function attempts to retrieve corresponding values for the unbound TypeVars
from bound_Generic.
For semantics of assumed_globals and update_assumed_globals see
_tp_relfq_name. Its doc applies to every argument or result contained in
tp (recursively) and to tp itself.
### Response:
def type_str(tp, assumed_globals=None, update_assumed_globals=None,
implicit_globals=None, bound_Generic=None, bound_typevars=None):
"""Generates a nicely readable string representation of the given type.
The returned representation is workable as a source code string and would
reconstruct the given type if handed to eval, provided that globals/locals
are configured appropriately (e.g. assumes that various types from typing
have been imported).
Used as type-formatting backend of ptypes' code generator abilities
in modules typelogger and stubfile_2_converter.
If tp contains unbound TypeVars and bound_Generic is provided, this
function attempts to retrieve corresponding values for the unbound TypeVars
from bound_Generic.
For semantics of assumed_globals and update_assumed_globals see
_tp_relfq_name. Its doc applies to every argument or result contained in
tp (recursively) and to tp itself.
"""
if assumed_globals is None and update_assumed_globals is None:
if implicit_globals is None:
implicit_globals = set()
else:
implicit_globals = implicit_globals.copy()
implicit_globals.add(sys.modules['typing'])
implicit_globals.add(sys.modules['__main__'])
if isinstance(tp, tuple):
return '('+', '.join([type_str(tp0, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars) for tp0 in tp])+')'
try:
return type_str(tp.__orig_class__, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars)
except AttributeError:
pass
tp = _match_stub_type(tp)
if isinstance(tp, TypeVar):
prm = None
if not bound_typevars is None:
try:
prm = bound_typevars[tp]
except:
pass
if prm is None and not bound_typevars is None and tp in bound_typevars:
prm = bound_typevars[tp]
if prm is None and not bound_Generic is None:
prm = get_arg_for_TypeVar(tp, bound_Generic)
if not prm is None:
return type_str(prm, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars)
return tp.__name__
elif isinstance(tp, ForwardRef):
return "'%s'" % tp.__forward_arg__
elif isclass(tp) and not is_Generic(tp) \
and not hasattr(typing, tp.__name__):
tp_name = _tp_relfq_name(tp, None, assumed_globals, update_assumed_globals,
implicit_globals)
prm = ''
if hasattr(tp, '__args__') and not tp.__args__ is None:
params = [type_str(param, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars) for param in tp.__args__]
prm = '[%s]'%', '.join(params)
return tp_name+prm
elif is_Union(tp):
prms = get_Union_params(tp)
params = [type_str(param, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars) for param in prms]
# See: https://github.com/Stewori/pytypes/issues/44
if pytypes.canonical_type_str:
params = sorted(params)
return '%s[%s]'%(_tp_relfq_name(Union, 'Union', assumed_globals,
update_assumed_globals, implicit_globals), ', '.join(params))
elif is_Tuple(tp):
prms = get_Tuple_params(tp)
tpl_params = [type_str(param, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars) for param in prms]
return '%s[%s]'%(_tp_relfq_name(Tuple, 'Tuple', assumed_globals,
update_assumed_globals, implicit_globals), ', '.join(tpl_params))
elif hasattr(tp, '__args__'):
tp_name = _tp_relfq_name(tp, None, assumed_globals, update_assumed_globals,
implicit_globals)
if tp.__args__ is None:
if hasattr(tp, '__parameters__') and \
hasattr(tp, '__origin__') and tp.__origin__ is Generic and \
not tp.__parameters__ is None and len(tp.__parameters__) > 0:
args = tp.__parameters__
else:
return tp_name
else:
args = tp.__args__
params = [type_str(param, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars) for param in args]
if hasattr(tp, '__result__'):
return '%s[[%s], %s]'%(tp_name, ', '.join(params),
type_str(tp.__result__, assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars))
elif is_Callable(tp):
return '%s[[%s], %s]'%(tp_name, ', '.join(params[:-1]),
type_str(params[-1], assumed_globals, update_assumed_globals,
implicit_globals, bound_Generic, bound_typevars))
else:
return '%s[%s]'%(tp_name, ', '.join(params))
elif hasattr(tp, '__name__'):
result = _tp_relfq_name(tp, None, assumed_globals, update_assumed_globals,
implicit_globals)
elif tp is Any:
# In Python 3.6 Any does not have __name__.
result = _tp_relfq_name(tp, 'Any', assumed_globals, update_assumed_globals,
implicit_globals)
else:
# Todo: Care for other special types from typing where necessary.
result = str(tp)
if not implicit_globals is None:
for s in implicit_globals:
result = result.replace(s.__name__+'.', '')
return result |
def bed_to_interval(orig_bed, bam_file):
"""Add header and format BED bait and target files for Picard if necessary.
"""
with open(orig_bed) as in_handle:
line = in_handle.readline()
if line.startswith("@"):
yield orig_bed
else:
with pysam.Samfile(bam_file, "rb") as bam_handle:
header = bam_handle.text
with tmpfile(dir=os.path.dirname(orig_bed), prefix="picardbed") as tmp_bed:
with open(tmp_bed, "w") as out_handle:
out_handle.write(header)
with open(orig_bed) as in_handle:
for i, line in enumerate(in_handle):
parts = line.rstrip().split("\t")
if len(parts) == 4:
chrom, start, end, name = parts
strand = "+"
elif len(parts) >= 3:
chrom, start, end = parts[:3]
strand = "+"
name = "r%s" % i
out = [chrom, start, end, strand, name]
out_handle.write("\t".join(out) + "\n")
yield tmp_bed | Add header and format BED bait and target files for Picard if necessary. | Below is the the instruction that describes the task:
### Input:
Add header and format BED bait and target files for Picard if necessary.
### Response:
def bed_to_interval(orig_bed, bam_file):
"""Add header and format BED bait and target files for Picard if necessary.
"""
with open(orig_bed) as in_handle:
line = in_handle.readline()
if line.startswith("@"):
yield orig_bed
else:
with pysam.Samfile(bam_file, "rb") as bam_handle:
header = bam_handle.text
with tmpfile(dir=os.path.dirname(orig_bed), prefix="picardbed") as tmp_bed:
with open(tmp_bed, "w") as out_handle:
out_handle.write(header)
with open(orig_bed) as in_handle:
for i, line in enumerate(in_handle):
parts = line.rstrip().split("\t")
if len(parts) == 4:
chrom, start, end, name = parts
strand = "+"
elif len(parts) >= 3:
chrom, start, end = parts[:3]
strand = "+"
name = "r%s" % i
out = [chrom, start, end, strand, name]
out_handle.write("\t".join(out) + "\n")
yield tmp_bed |
def edf_greenhall(alpha, d, m, N, overlapping=False, modified=False, verbose=False):
""" returns Equivalent degrees of freedom
Parameters
----------
alpha: int
noise type, +2...-4
d: int
1 first-difference variance
2 Allan variance
3 Hadamard variance
require alpha+2*d>1
m: int
averaging factor
tau = m*tau0 = m*(1/rate)
N: int
number of phase observations (length of time-series)
overlapping: bool
True for oadev, ohdev
modified: bool
True for mdev, tdev
Returns
-------
edf: float
Equivalent degrees of freedom
Greenhall, Riley, 2004
https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20050061319.pdf
UNCERTAINTY OF STABILITY VARIANCES BASED ON FINITE DIFFERENCES
Notes
-----
Used for the following deviations (see http://www.wriley.com/CI2.pdf page 8)
adev()
oadev()
mdev()
tdev()
hdev()
ohdev()
"""
if modified:
F = 1 # F filter factor, 1 modified variance, m unmodified variance
else:
F = int(m)
if overlapping:
S = int(m) # S stride factor, 1 nonoverlapped estimator, m overlapped estimator (estimator stride = tau/S )
else:
S = 1
assert(alpha+2*d > 1.0)
L = m/F+m*d # length of filter applied to phase samples
M = 1 + np.floor(S*(N-L) / m)
J = min(M, (d+1)*S)
J_max = 100
r = M/S
if int(F) == 1 and modified: # case 1, modified variances, all alpha
if J <= J_max:
inv_edf = (1.0/(pow(greenhall_sz(0, 1, alpha, d), 2)*M))* \
greenhall_BasicSum(J, M, S, 1, alpha, d)
if verbose:
print("case 1.1 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif r > d+1:
(a0, a1) = greenhall_table1(alpha, d)
inv_edf = (1.0/r)*(a0-a1/r)
if verbose:
print("case 1.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
else:
m_prime = J_max/r
inv_edf = (1.0/(pow(greenhall_sz(0, F, alpha, d), 2)*J_max))* \
greenhall_BasicSum(J_max, J_max, m_prime, 1, alpha, d)
if verbose:
print("case 1.3 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif int(F) == int(m) and int(alpha) <= 0 and not modified:
# case 2, unmodified variances, alpha <= 0
if J <= J_max:
if m*(d+1) <= J_max:
m_prime = m
variant = "a"
else:
m_prime = float('inf')
variant = "b"
inv_edf = (1.0/(pow(greenhall_sz(0, m_prime, alpha, d), 2)*M))* \
greenhall_BasicSum(J, M, S, m_prime, alpha, d)
if verbose:
print("case 2.1%s edf= %3f" % (variant, float(1.0/inv_edf)))
return 1.0/inv_edf
elif r > d+1:
(a0, a1) = greenhall_table2(alpha, d)
inv_edf = (1.0/r)*(a0-a1/r)
if verbose:
print("case 2.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
else:
m_prime = J_max/r
inv_edf = (1.0/(pow(greenhall_sz(0, float('inf'), alpha, d), 2)*J_max))* \
greenhall_BasicSum(J_max, J_max, m_prime, float('inf'), alpha, d)
if verbose:
print("case 2.3 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif int(F) == int(m) and int(alpha) == 1 and not modified:
# case 3, unmodified variances, alpha=1
if J <= J_max:
inv_edf = (1.0/(pow(greenhall_sz(0, m, 1, d), 2)*M))* \
greenhall_BasicSum(J, M, S, m, 1, d) # note: m<1e6 to avoid roundoff
if verbose:
print("case 3.1 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif r > d+1:
(a0, a1) = greenhall_table2(alpha, d)
(b0, b1) = greenhall_table3(alpha, d)
inv_edf = (1.0/(pow(b0+b1*np.log(m), 2)*r))*(a0-a1/r)
if verbose:
print("case 3.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
else:
m_prime = J_max/r
(b0, b1) = greenhall_table3(alpha, d)
inv_edf = (1.0/(pow(b0+b1*np.log(m), 2)*J_max))* \
greenhall_BasicSum(J_max, J_max, m_prime, m_prime, 1, d)
if verbose:
print("case 3.3 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif int(F) == int(m) and int(alpha) == 2 and not modified:
# case 4, unmodified variances, alpha=2
K = np.ceil(r)
if K <= d:
raise NotImplementedError # FIXME: add formula from the paper here!
else:
a0 = scipy.special.binom(4*d, 2*d) / pow(scipy.special.binom(2*d, d), 2)
a1 = d/2.0
inv_edf = (1.0/M)*(a0-a1/r)
if verbose:
print("case 4.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
print("greenhall_edf() no matching case!")
raise NotImplementedError | returns Equivalent degrees of freedom
Parameters
----------
alpha: int
noise type, +2...-4
d: int
1 first-difference variance
2 Allan variance
3 Hadamard variance
require alpha+2*d>1
m: int
averaging factor
tau = m*tau0 = m*(1/rate)
N: int
number of phase observations (length of time-series)
overlapping: bool
True for oadev, ohdev
modified: bool
True for mdev, tdev
Returns
-------
edf: float
Equivalent degrees of freedom
Greenhall, Riley, 2004
https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20050061319.pdf
UNCERTAINTY OF STABILITY VARIANCES BASED ON FINITE DIFFERENCES
Notes
-----
Used for the following deviations (see http://www.wriley.com/CI2.pdf page 8)
adev()
oadev()
mdev()
tdev()
hdev()
ohdev() | Below is the the instruction that describes the task:
### Input:
returns Equivalent degrees of freedom
Parameters
----------
alpha: int
noise type, +2...-4
d: int
1 first-difference variance
2 Allan variance
3 Hadamard variance
require alpha+2*d>1
m: int
averaging factor
tau = m*tau0 = m*(1/rate)
N: int
number of phase observations (length of time-series)
overlapping: bool
True for oadev, ohdev
modified: bool
True for mdev, tdev
Returns
-------
edf: float
Equivalent degrees of freedom
Greenhall, Riley, 2004
https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20050061319.pdf
UNCERTAINTY OF STABILITY VARIANCES BASED ON FINITE DIFFERENCES
Notes
-----
Used for the following deviations (see http://www.wriley.com/CI2.pdf page 8)
adev()
oadev()
mdev()
tdev()
hdev()
ohdev()
### Response:
def edf_greenhall(alpha, d, m, N, overlapping=False, modified=False, verbose=False):
""" returns Equivalent degrees of freedom
Parameters
----------
alpha: int
noise type, +2...-4
d: int
1 first-difference variance
2 Allan variance
3 Hadamard variance
require alpha+2*d>1
m: int
averaging factor
tau = m*tau0 = m*(1/rate)
N: int
number of phase observations (length of time-series)
overlapping: bool
True for oadev, ohdev
modified: bool
True for mdev, tdev
Returns
-------
edf: float
Equivalent degrees of freedom
Greenhall, Riley, 2004
https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20050061319.pdf
UNCERTAINTY OF STABILITY VARIANCES BASED ON FINITE DIFFERENCES
Notes
-----
Used for the following deviations (see http://www.wriley.com/CI2.pdf page 8)
adev()
oadev()
mdev()
tdev()
hdev()
ohdev()
"""
if modified:
F = 1 # F filter factor, 1 modified variance, m unmodified variance
else:
F = int(m)
if overlapping:
S = int(m) # S stride factor, 1 nonoverlapped estimator, m overlapped estimator (estimator stride = tau/S )
else:
S = 1
assert(alpha+2*d > 1.0)
L = m/F+m*d # length of filter applied to phase samples
M = 1 + np.floor(S*(N-L) / m)
J = min(M, (d+1)*S)
J_max = 100
r = M/S
if int(F) == 1 and modified: # case 1, modified variances, all alpha
if J <= J_max:
inv_edf = (1.0/(pow(greenhall_sz(0, 1, alpha, d), 2)*M))* \
greenhall_BasicSum(J, M, S, 1, alpha, d)
if verbose:
print("case 1.1 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif r > d+1:
(a0, a1) = greenhall_table1(alpha, d)
inv_edf = (1.0/r)*(a0-a1/r)
if verbose:
print("case 1.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
else:
m_prime = J_max/r
inv_edf = (1.0/(pow(greenhall_sz(0, F, alpha, d), 2)*J_max))* \
greenhall_BasicSum(J_max, J_max, m_prime, 1, alpha, d)
if verbose:
print("case 1.3 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif int(F) == int(m) and int(alpha) <= 0 and not modified:
# case 2, unmodified variances, alpha <= 0
if J <= J_max:
if m*(d+1) <= J_max:
m_prime = m
variant = "a"
else:
m_prime = float('inf')
variant = "b"
inv_edf = (1.0/(pow(greenhall_sz(0, m_prime, alpha, d), 2)*M))* \
greenhall_BasicSum(J, M, S, m_prime, alpha, d)
if verbose:
print("case 2.1%s edf= %3f" % (variant, float(1.0/inv_edf)))
return 1.0/inv_edf
elif r > d+1:
(a0, a1) = greenhall_table2(alpha, d)
inv_edf = (1.0/r)*(a0-a1/r)
if verbose:
print("case 2.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
else:
m_prime = J_max/r
inv_edf = (1.0/(pow(greenhall_sz(0, float('inf'), alpha, d), 2)*J_max))* \
greenhall_BasicSum(J_max, J_max, m_prime, float('inf'), alpha, d)
if verbose:
print("case 2.3 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif int(F) == int(m) and int(alpha) == 1 and not modified:
# case 3, unmodified variances, alpha=1
if J <= J_max:
inv_edf = (1.0/(pow(greenhall_sz(0, m, 1, d), 2)*M))* \
greenhall_BasicSum(J, M, S, m, 1, d) # note: m<1e6 to avoid roundoff
if verbose:
print("case 3.1 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif r > d+1:
(a0, a1) = greenhall_table2(alpha, d)
(b0, b1) = greenhall_table3(alpha, d)
inv_edf = (1.0/(pow(b0+b1*np.log(m), 2)*r))*(a0-a1/r)
if verbose:
print("case 3.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
else:
m_prime = J_max/r
(b0, b1) = greenhall_table3(alpha, d)
inv_edf = (1.0/(pow(b0+b1*np.log(m), 2)*J_max))* \
greenhall_BasicSum(J_max, J_max, m_prime, m_prime, 1, d)
if verbose:
print("case 3.3 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
elif int(F) == int(m) and int(alpha) == 2 and not modified:
# case 4, unmodified variances, alpha=2
K = np.ceil(r)
if K <= d:
raise NotImplementedError # FIXME: add formula from the paper here!
else:
a0 = scipy.special.binom(4*d, 2*d) / pow(scipy.special.binom(2*d, d), 2)
a1 = d/2.0
inv_edf = (1.0/M)*(a0-a1/r)
if verbose:
print("case 4.2 edf= %3f" % float(1.0/inv_edf))
return 1.0/inv_edf
print("greenhall_edf() no matching case!")
raise NotImplementedError |
def ndmeshgrid(*arrs):
"""Return a mesh grid for N dimensions.
The input are N arrays, each of which contains the values along one axis of
the coordinate system. The arrays do not have to have the same number of
entries. The function returns arrays that can be fed into numpy functions
so that they produce values for *all* points spanned by the axes *arrs*.
Original from
http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d and fixed.
.. SeeAlso: :func:`numpy.meshgrid` for the 2D case.
"""
#arrs = tuple(reversed(arrs)) <-- wrong on stackoverflow.com
arrs = tuple(arrs)
lens = list(map(len, arrs))
dim = len(arrs)
sz = 1
for s in lens:
sz *= s
ans = []
for i, arr in enumerate(arrs):
slc = [1] * dim
slc[i] = lens[i]
arr2 = numpy.asanyarray(arr).reshape(slc)
for j, sz in enumerate(lens):
if j != i:
arr2 = arr2.repeat(sz, axis=j)
ans.append(arr2)
return tuple(ans) | Return a mesh grid for N dimensions.
The input are N arrays, each of which contains the values along one axis of
the coordinate system. The arrays do not have to have the same number of
entries. The function returns arrays that can be fed into numpy functions
so that they produce values for *all* points spanned by the axes *arrs*.
Original from
http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d and fixed.
.. SeeAlso: :func:`numpy.meshgrid` for the 2D case. | Below is the the instruction that describes the task:
### Input:
Return a mesh grid for N dimensions.
The input are N arrays, each of which contains the values along one axis of
the coordinate system. The arrays do not have to have the same number of
entries. The function returns arrays that can be fed into numpy functions
so that they produce values for *all* points spanned by the axes *arrs*.
Original from
http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d and fixed.
.. SeeAlso: :func:`numpy.meshgrid` for the 2D case.
### Response:
def ndmeshgrid(*arrs):
"""Return a mesh grid for N dimensions.
The input are N arrays, each of which contains the values along one axis of
the coordinate system. The arrays do not have to have the same number of
entries. The function returns arrays that can be fed into numpy functions
so that they produce values for *all* points spanned by the axes *arrs*.
Original from
http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d and fixed.
.. SeeAlso: :func:`numpy.meshgrid` for the 2D case.
"""
#arrs = tuple(reversed(arrs)) <-- wrong on stackoverflow.com
arrs = tuple(arrs)
lens = list(map(len, arrs))
dim = len(arrs)
sz = 1
for s in lens:
sz *= s
ans = []
for i, arr in enumerate(arrs):
slc = [1] * dim
slc[i] = lens[i]
arr2 = numpy.asanyarray(arr).reshape(slc)
for j, sz in enumerate(lens):
if j != i:
arr2 = arr2.repeat(sz, axis=j)
ans.append(arr2)
return tuple(ans) |
def to_local_name(acs, attr):
"""
:param acs: List of AttributeConverter instances
:param attr: an Attribute instance
:return: The local attribute name
"""
for aconv in acs:
lattr = aconv.from_format(attr)
if lattr:
return lattr
return attr.friendly_name | :param acs: List of AttributeConverter instances
:param attr: an Attribute instance
:return: The local attribute name | Below is the the instruction that describes the task:
### Input:
:param acs: List of AttributeConverter instances
:param attr: an Attribute instance
:return: The local attribute name
### Response:
def to_local_name(acs, attr):
"""
:param acs: List of AttributeConverter instances
:param attr: an Attribute instance
:return: The local attribute name
"""
for aconv in acs:
lattr = aconv.from_format(attr)
if lattr:
return lattr
return attr.friendly_name |
def vswitch_query(self, vswitch_name):
"""Check the virtual switch status
:param str vswitch_name: the name of the virtual switch
:returns: Dictionary describing virtual switch info
:rtype: dict
"""
action = "get virtual switch information"
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._networkops.vswitch_query(vswitch_name) | Check the virtual switch status
:param str vswitch_name: the name of the virtual switch
:returns: Dictionary describing virtual switch info
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Check the virtual switch status
:param str vswitch_name: the name of the virtual switch
:returns: Dictionary describing virtual switch info
:rtype: dict
### Response:
def vswitch_query(self, vswitch_name):
"""Check the virtual switch status
:param str vswitch_name: the name of the virtual switch
:returns: Dictionary describing virtual switch info
:rtype: dict
"""
action = "get virtual switch information"
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._networkops.vswitch_query(vswitch_name) |
def sync(self):
"""Retrieve zones from ElkM1"""
self.elk.send(az_encode())
self.elk.send(zd_encode())
self.elk.send(zp_encode())
self.elk.send(zs_encode())
self.get_descriptions(TextDescriptions.ZONE.value) | Retrieve zones from ElkM1 | Below is the the instruction that describes the task:
### Input:
Retrieve zones from ElkM1
### Response:
def sync(self):
"""Retrieve zones from ElkM1"""
self.elk.send(az_encode())
self.elk.send(zd_encode())
self.elk.send(zp_encode())
self.elk.send(zs_encode())
self.get_descriptions(TextDescriptions.ZONE.value) |
def elem_wrap(self, tree, debug=False, root_id=None):
"""takes a DGParentedTree and puts a nucleus or satellite on top,
depending on the nuclearity of the root element of the tree.
"""
if root_id is None:
root_id = tree.root_id
elem = self.elem_dict[root_id]
if elem['nuclearity'] == 'nucleus':
return n_wrap(tree, debug=debug, root_id=root_id)
else:
return s_wrap(tree, debug=debug, root_id=root_id) | takes a DGParentedTree and puts a nucleus or satellite on top,
depending on the nuclearity of the root element of the tree. | Below is the the instruction that describes the task:
### Input:
takes a DGParentedTree and puts a nucleus or satellite on top,
depending on the nuclearity of the root element of the tree.
### Response:
def elem_wrap(self, tree, debug=False, root_id=None):
"""takes a DGParentedTree and puts a nucleus or satellite on top,
depending on the nuclearity of the root element of the tree.
"""
if root_id is None:
root_id = tree.root_id
elem = self.elem_dict[root_id]
if elem['nuclearity'] == 'nucleus':
return n_wrap(tree, debug=debug, root_id=root_id)
else:
return s_wrap(tree, debug=debug, root_id=root_id) |
def create_jail(name, arch, version="9.0-RELEASE"):
'''
Creates a new poudriere jail if one does not exist
*NOTE* creating a new jail will take some time the master is not hanging
CLI Example:
.. code-block:: bash
salt '*' poudriere.create_jail 90amd64 amd64
'''
# Config file must be on system to create a poudriere jail
_check_config_exists()
# Check if the jail is there
if is_jail(name):
return '{0} already exists'.format(name)
cmd = 'poudriere jails -c -j {0} -v {1} -a {2}'.format(name, version, arch)
__salt__['cmd.run'](cmd)
# Make jail pkgng aware
make_pkgng_aware(name)
# Make sure the jail was created
if is_jail(name):
return 'Created jail {0}'.format(name)
return 'Issue creating jail {0}'.format(name) | Creates a new poudriere jail if one does not exist
*NOTE* creating a new jail will take some time the master is not hanging
CLI Example:
.. code-block:: bash
salt '*' poudriere.create_jail 90amd64 amd64 | Below is the the instruction that describes the task:
### Input:
Creates a new poudriere jail if one does not exist
*NOTE* creating a new jail will take some time the master is not hanging
CLI Example:
.. code-block:: bash
salt '*' poudriere.create_jail 90amd64 amd64
### Response:
def create_jail(name, arch, version="9.0-RELEASE"):
'''
Creates a new poudriere jail if one does not exist
*NOTE* creating a new jail will take some time the master is not hanging
CLI Example:
.. code-block:: bash
salt '*' poudriere.create_jail 90amd64 amd64
'''
# Config file must be on system to create a poudriere jail
_check_config_exists()
# Check if the jail is there
if is_jail(name):
return '{0} already exists'.format(name)
cmd = 'poudriere jails -c -j {0} -v {1} -a {2}'.format(name, version, arch)
__salt__['cmd.run'](cmd)
# Make jail pkgng aware
make_pkgng_aware(name)
# Make sure the jail was created
if is_jail(name):
return 'Created jail {0}'.format(name)
return 'Issue creating jail {0}'.format(name) |
def _split_what(what):
"""
Returns a tuple of `frozenset`s of classes and attributes.
"""
return (
frozenset(cls for cls in what if isclass(cls)),
frozenset(cls for cls in what if isinstance(cls, Attribute)),
) | Returns a tuple of `frozenset`s of classes and attributes. | Below is the the instruction that describes the task:
### Input:
Returns a tuple of `frozenset`s of classes and attributes.
### Response:
def _split_what(what):
"""
Returns a tuple of `frozenset`s of classes and attributes.
"""
return (
frozenset(cls for cls in what if isclass(cls)),
frozenset(cls for cls in what if isinstance(cls, Attribute)),
) |
def patch_resource(self, session, json_data, api_type, obj_id):
"""
Replacement of resource values.
:param session: SQLAlchemy session
:param json_data: Request JSON Data
:param api_type: Type of the resource
:param obj_id: ID of the resource
"""
model = self._fetch_model(api_type)
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.EDIT)
self._check_json_data(json_data)
orm_desc_keys = resource.__mapper__.all_orm_descriptors.keys()
if not ({'type', 'id'} <= set(json_data['data'].keys())):
raise BadRequestError('Missing type or id')
if str(json_data['data']['id']) != str(resource.id):
raise BadRequestError('IDs do not match')
if json_data['data']['type'] != resource.__jsonapi_type__:
raise BadRequestError('Type does not match')
json_data['data'].setdefault('relationships', {})
json_data['data'].setdefault('attributes', {})
missing_keys = set(json_data['data'].get('relationships', {}).keys()) \
- set(resource.__jsonapi_map_to_py__.keys())
if missing_keys:
raise BadRequestError(
'{} not relationships for {}.{}'.format(
', '.join(list(missing_keys)),
model.__jsonapi_type__, resource.id))
attrs_to_ignore = {'__mapper__', 'id'}
session.add(resource)
try:
for key, relationship in resource.__mapper__.relationships.items():
api_key = resource.__jsonapi_map_to_api__[key]
attrs_to_ignore |= set(relationship.local_columns) | {key}
if api_key not in json_data['data']['relationships'].keys():
continue
self.patch_relationship(
session, json_data['data']['relationships'][api_key],
model.__jsonapi_type__, resource.id, api_key)
data_keys = set(map((
lambda x: resource.__jsonapi_map_to_py__.get(x, None)),
json_data['data']['attributes'].keys()))
model_keys = set(orm_desc_keys) - attrs_to_ignore
if not data_keys <= model_keys:
raise BadRequestError(
'{} not attributes for {}.{}'.format(
', '.join(list(data_keys - model_keys)),
model.__jsonapi_type__, resource.id))
for key in data_keys & model_keys:
setter = get_attr_desc(resource, key, AttributeActions.SET)
setter(resource, json_data['data']['attributes'][resource.__jsonapi_map_to_api__[key]]) # NOQA
session.commit()
except IntegrityError as e:
session.rollback()
raise ValidationError(str(e.orig))
except AssertionError as e:
# pragma: no cover
session.rollback()
raise ValidationError(e.msg)
except TypeError as e:
session.rollback()
raise ValidationError('Incompatible data type')
return self.get_resource(
session, {}, model.__jsonapi_type__, resource.id) | Replacement of resource values.
:param session: SQLAlchemy session
:param json_data: Request JSON Data
:param api_type: Type of the resource
:param obj_id: ID of the resource | Below is the the instruction that describes the task:
### Input:
Replacement of resource values.
:param session: SQLAlchemy session
:param json_data: Request JSON Data
:param api_type: Type of the resource
:param obj_id: ID of the resource
### Response:
def patch_resource(self, session, json_data, api_type, obj_id):
"""
Replacement of resource values.
:param session: SQLAlchemy session
:param json_data: Request JSON Data
:param api_type: Type of the resource
:param obj_id: ID of the resource
"""
model = self._fetch_model(api_type)
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.EDIT)
self._check_json_data(json_data)
orm_desc_keys = resource.__mapper__.all_orm_descriptors.keys()
if not ({'type', 'id'} <= set(json_data['data'].keys())):
raise BadRequestError('Missing type or id')
if str(json_data['data']['id']) != str(resource.id):
raise BadRequestError('IDs do not match')
if json_data['data']['type'] != resource.__jsonapi_type__:
raise BadRequestError('Type does not match')
json_data['data'].setdefault('relationships', {})
json_data['data'].setdefault('attributes', {})
missing_keys = set(json_data['data'].get('relationships', {}).keys()) \
- set(resource.__jsonapi_map_to_py__.keys())
if missing_keys:
raise BadRequestError(
'{} not relationships for {}.{}'.format(
', '.join(list(missing_keys)),
model.__jsonapi_type__, resource.id))
attrs_to_ignore = {'__mapper__', 'id'}
session.add(resource)
try:
for key, relationship in resource.__mapper__.relationships.items():
api_key = resource.__jsonapi_map_to_api__[key]
attrs_to_ignore |= set(relationship.local_columns) | {key}
if api_key not in json_data['data']['relationships'].keys():
continue
self.patch_relationship(
session, json_data['data']['relationships'][api_key],
model.__jsonapi_type__, resource.id, api_key)
data_keys = set(map((
lambda x: resource.__jsonapi_map_to_py__.get(x, None)),
json_data['data']['attributes'].keys()))
model_keys = set(orm_desc_keys) - attrs_to_ignore
if not data_keys <= model_keys:
raise BadRequestError(
'{} not attributes for {}.{}'.format(
', '.join(list(data_keys - model_keys)),
model.__jsonapi_type__, resource.id))
for key in data_keys & model_keys:
setter = get_attr_desc(resource, key, AttributeActions.SET)
setter(resource, json_data['data']['attributes'][resource.__jsonapi_map_to_api__[key]]) # NOQA
session.commit()
except IntegrityError as e:
session.rollback()
raise ValidationError(str(e.orig))
except AssertionError as e:
# pragma: no cover
session.rollback()
raise ValidationError(e.msg)
except TypeError as e:
session.rollback()
raise ValidationError('Incompatible data type')
return self.get_resource(
session, {}, model.__jsonapi_type__, resource.id) |
def gettext(message):
"""
Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
_default = _default or translation(DEFAULT_LANGUAGE)
translation_object = getattr(_active, 'value', _default)
result = translation_object.gettext(message)
return result | Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object. | Below is the the instruction that describes the task:
### Input:
Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
### Response:
def gettext(message):
"""
Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
_default = _default or translation(DEFAULT_LANGUAGE)
translation_object = getattr(_active, 'value', _default)
result = translation_object.gettext(message)
return result |
def upload_pgp_keys():
""" upload and/or update the PGP keys for editors, import them into PGP"""
get_vars()
upload_target = '/tmp/pgp_pubkeys.tmp'
with fab.settings(fab.hide('running')):
fab.run('rm -rf %s' % upload_target)
fab.run('mkdir %s' % upload_target)
local_key_path = path.join(fab.env['config_base'], fab.env.instance.config['local_pgpkey_path'])
remote_key_path = '/var/briefkasten/pgp_pubkeys/'.format(**AV)
rsync('-av', local_key_path, '{host_string}:%s' % upload_target)
fab.run('chown -R %s %s' % (AV['appuser'], remote_key_path))
fab.run('chmod 700 %s' % remote_key_path)
with fab.shell_env(GNUPGHOME=remote_key_path):
fab.sudo('''gpg --import %s/*.*''' % upload_target,
user=AV['appuser'], shell_escape=False)
fab.run('rm -rf %s' % upload_target) | upload and/or update the PGP keys for editors, import them into PGP | Below is the the instruction that describes the task:
### Input:
upload and/or update the PGP keys for editors, import them into PGP
### Response:
def upload_pgp_keys():
""" upload and/or update the PGP keys for editors, import them into PGP"""
get_vars()
upload_target = '/tmp/pgp_pubkeys.tmp'
with fab.settings(fab.hide('running')):
fab.run('rm -rf %s' % upload_target)
fab.run('mkdir %s' % upload_target)
local_key_path = path.join(fab.env['config_base'], fab.env.instance.config['local_pgpkey_path'])
remote_key_path = '/var/briefkasten/pgp_pubkeys/'.format(**AV)
rsync('-av', local_key_path, '{host_string}:%s' % upload_target)
fab.run('chown -R %s %s' % (AV['appuser'], remote_key_path))
fab.run('chmod 700 %s' % remote_key_path)
with fab.shell_env(GNUPGHOME=remote_key_path):
fab.sudo('''gpg --import %s/*.*''' % upload_target,
user=AV['appuser'], shell_escape=False)
fab.run('rm -rf %s' % upload_target) |
def top_corr(self, df):
"""Give aggregation counts and correlations"""
tag_freq = df.sum()
tag_freq.sort(ascending=False)
corr = df.corr().fillna(1)
corr_dict = corr.to_dict()
for tag, count in tag_freq.iteritems():
print ' %s%s: %s%s%s (' % (color.Green, tag, color.LightBlue, count, color.Normal),
tag_corrs = sorted(corr_dict[tag].iteritems(), key=operator.itemgetter(1), reverse=True)
for corr_tag, value in tag_corrs[:5]:
if corr_tag != tag and (value > .2):
print '%s%s:%s%.1f' % (color.Green, corr_tag, color.LightBlue, value),
print '%s)' % color.Normal | Give aggregation counts and correlations | Below is the the instruction that describes the task:
### Input:
Give aggregation counts and correlations
### Response:
def top_corr(self, df):
"""Give aggregation counts and correlations"""
tag_freq = df.sum()
tag_freq.sort(ascending=False)
corr = df.corr().fillna(1)
corr_dict = corr.to_dict()
for tag, count in tag_freq.iteritems():
print ' %s%s: %s%s%s (' % (color.Green, tag, color.LightBlue, count, color.Normal),
tag_corrs = sorted(corr_dict[tag].iteritems(), key=operator.itemgetter(1), reverse=True)
for corr_tag, value in tag_corrs[:5]:
if corr_tag != tag and (value > .2):
print '%s%s:%s%.1f' % (color.Green, corr_tag, color.LightBlue, value),
print '%s)' % color.Normal |
def CoarsePepper(p=0, size_px=None, size_percent=None, per_channel=False, min_size=4, name=None, deterministic=False,
random_state=None):
"""
Adds coarse pepper noise to an image, i.e. rectangles that contain noisy black-ish pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b.``
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being replaced.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
Replaces 5 percent of all pixels with pepper in an image that has
1 to 10 percent of the input image size, then upscales the results
to the input image size, leading to large rectangular areas being replaced.
"""
mask = iap.handle_probability_param(p, "p", tuple_to_uniform=True, list_to_choice=True)
if size_px is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_px=size_px, min_size=min_size)
elif size_percent is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=False,
mode="invert"
) + 0.5
replacement = replacement01 * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=mask_low,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
) | Adds coarse pepper noise to an image, i.e. rectangles that contain noisy black-ish pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b.``
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being replaced.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
Replaces 5 percent of all pixels with pepper in an image that has
1 to 10 percent of the input image size, then upscales the results
to the input image size, leading to large rectangular areas being replaced. | Below is the the instruction that describes the task:
### Input:
Adds coarse pepper noise to an image, i.e. rectangles that contain noisy black-ish pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b.``
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being replaced.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
Replaces 5 percent of all pixels with pepper in an image that has
1 to 10 percent of the input image size, then upscales the results
to the input image size, leading to large rectangular areas being replaced.
### Response:
def CoarsePepper(p=0, size_px=None, size_percent=None, per_channel=False, min_size=4, name=None, deterministic=False,
random_state=None):
"""
Adds coarse pepper noise to an image, i.e. rectangles that contain noisy black-ish pixels.
dtype support::
See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.
Parameters
----------
p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional
Probability of changing a pixel to pepper noise.
* If a float, then that value will be used for all images as the
probability.
* If a tuple ``(a, b)``, then a probability will be sampled per image
from the range ``a <= x <= b.``
* If a list, then a random value will be sampled from that list
per image.
* If a StochasticParameter, then this parameter will be used as
the *mask*, i.e. it is expected to contain values between
0.0 and 1.0, where 1.0 means that pepper is to be added
at that location.
size_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask in absolute pixel dimensions.
* If an integer, then that size will be used for both height and
width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then
upsampled to ``HxW``, where ``H`` is the image size and W the image width.
* If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the
range ``[a..b]`` and the mask will be generated at size ``MxN``, then
upsampled to ``HxW``.
* If a StochasticParameter, then this parameter will be used to
determine the sizes. It is expected to be discrete.
size_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The size of the lower resolution image from which to sample the noise
mask *in percent* of the input image.
* If a float, then that value will be used as the percentage of the
height and width (relative to the original size). E.g. for value
p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled
to ``HxW``.
* If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the
interval ``(a, b)`` and used as the percentages, i.e the mask size
will be ``(m*H)x(n*W)``.
* If a StochasticParameter, then this parameter will be used to
sample the percentage values. It is expected to be continuous.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
min_size : int, optional
Minimum size of the low resolution mask, both width and height. If
`size_percent` or `size_px` leads to a lower value than this, `min_size`
will be used instead. This should never have a value of less than 2,
otherwise one may end up with a 1x1 low resolution mask, leading easily
to the whole image being replaced.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
Replaces 5 percent of all pixels with pepper in an image that has
1 to 10 percent of the input image size, then upscales the results
to the input image size, leading to large rectangular areas being replaced.
"""
mask = iap.handle_probability_param(p, "p", tuple_to_uniform=True, list_to_choice=True)
if size_px is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_px=size_px, min_size=min_size)
elif size_percent is not None:
mask_low = iap.FromLowerResolution(other_param=mask, size_percent=size_percent, min_size=min_size)
else:
raise Exception("Either size_px or size_percent must be set.")
replacement01 = iap.ForceSign(
iap.Beta(0.5, 0.5) - 0.5,
positive=False,
mode="invert"
) + 0.5
replacement = replacement01 * 255
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return ReplaceElementwise(
mask=mask_low,
replacement=replacement,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
) |
def cli(env):
"""List all active quotes on an account"""
table = formatting.Table([
'Id', 'Name', 'Created', 'Expiration', 'Status', 'Package Name', 'Package Id'
])
table.align['Name'] = 'l'
table.align['Package Name'] = 'r'
table.align['Package Id'] = 'l'
manager = ordering.OrderingManager(env.client)
items = manager.get_quotes()
for item in items:
package = item['order']['items'][0]['package']
table.add_row([
item.get('id'),
item.get('name'),
clean_time(item.get('createDate')),
clean_time(item.get('modifyDate')),
item.get('status'),
package.get('keyName'),
package.get('id')
])
env.fout(table) | List all active quotes on an account | Below is the the instruction that describes the task:
### Input:
List all active quotes on an account
### Response:
def cli(env):
"""List all active quotes on an account"""
table = formatting.Table([
'Id', 'Name', 'Created', 'Expiration', 'Status', 'Package Name', 'Package Id'
])
table.align['Name'] = 'l'
table.align['Package Name'] = 'r'
table.align['Package Id'] = 'l'
manager = ordering.OrderingManager(env.client)
items = manager.get_quotes()
for item in items:
package = item['order']['items'][0]['package']
table.add_row([
item.get('id'),
item.get('name'),
clean_time(item.get('createDate')),
clean_time(item.get('modifyDate')),
item.get('status'),
package.get('keyName'),
package.get('id')
])
env.fout(table) |
def HasColumn(self, table_name, column_name):
"""Determines if a specific column exists.
Args:
table_name (str): name of the table.
column_name (str): name of the column.
Returns:
bool: True if the column exists.
Raises:
IOError: if the database file is not opened.
OSError: if the database file is not opened.
"""
if not self._connection:
raise IOError('Not opened.')
if not column_name:
return False
table_name = table_name.lower()
column_names = self._column_names_per_table.get(table_name, None)
if column_names is None:
column_names = []
self._cursor.execute(self._HAS_COLUMN_QUERY.format(table_name))
for row in self._cursor.fetchall():
if not row[1]:
continue
row_column_name = row[1]
if isinstance(row_column_name, bytes):
row_column_name = row_column_name.decode('utf-8')
column_names.append(row_column_name.lower())
self._column_names_per_table[table_name] = column_names
column_name = column_name.lower()
return column_name in column_names | Determines if a specific column exists.
Args:
table_name (str): name of the table.
column_name (str): name of the column.
Returns:
bool: True if the column exists.
Raises:
IOError: if the database file is not opened.
OSError: if the database file is not opened. | Below is the the instruction that describes the task:
### Input:
Determines if a specific column exists.
Args:
table_name (str): name of the table.
column_name (str): name of the column.
Returns:
bool: True if the column exists.
Raises:
IOError: if the database file is not opened.
OSError: if the database file is not opened.
### Response:
def HasColumn(self, table_name, column_name):
"""Determines if a specific column exists.
Args:
table_name (str): name of the table.
column_name (str): name of the column.
Returns:
bool: True if the column exists.
Raises:
IOError: if the database file is not opened.
OSError: if the database file is not opened.
"""
if not self._connection:
raise IOError('Not opened.')
if not column_name:
return False
table_name = table_name.lower()
column_names = self._column_names_per_table.get(table_name, None)
if column_names is None:
column_names = []
self._cursor.execute(self._HAS_COLUMN_QUERY.format(table_name))
for row in self._cursor.fetchall():
if not row[1]:
continue
row_column_name = row[1]
if isinstance(row_column_name, bytes):
row_column_name = row_column_name.decode('utf-8')
column_names.append(row_column_name.lower())
self._column_names_per_table[table_name] = column_names
column_name = column_name.lower()
return column_name in column_names |
def _parse_sequence_tag(self):
'''Parses the sequence and atomic mass.'''
#main_tags = self._dom.getElementsByTagName("uniprot")
#assert(len(main_tags) == 1)
#entry_tags = main_tags[0].getElementsByTagName("entry")
#assert(len(entry_tags) == 1)
#entry_tags[0]
entry_tag = self.entry_tag
# only get sequence tags that are direct children of the entry tag (sequence tags can also be children of entry.comment.conflict)
sequence_tags = [child for child in entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'sequence']
assert(len(sequence_tags) == 1)
sequence_tag = sequence_tags[0]
# atomic mass, sequence, CRC64 digest
self.atomic_mass = float(sequence_tag.getAttribute("mass"))
self.sequence = "".join(sequence_tag.firstChild.nodeValue.strip().split("\n"))
self.sequence_length = int(sequence_tag.getAttribute("length"))
self.CRC64Digest = sequence_tag.getAttribute("checksum") | Parses the sequence and atomic mass. | Below is the the instruction that describes the task:
### Input:
Parses the sequence and atomic mass.
### Response:
def _parse_sequence_tag(self):
'''Parses the sequence and atomic mass.'''
#main_tags = self._dom.getElementsByTagName("uniprot")
#assert(len(main_tags) == 1)
#entry_tags = main_tags[0].getElementsByTagName("entry")
#assert(len(entry_tags) == 1)
#entry_tags[0]
entry_tag = self.entry_tag
# only get sequence tags that are direct children of the entry tag (sequence tags can also be children of entry.comment.conflict)
sequence_tags = [child for child in entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'sequence']
assert(len(sequence_tags) == 1)
sequence_tag = sequence_tags[0]
# atomic mass, sequence, CRC64 digest
self.atomic_mass = float(sequence_tag.getAttribute("mass"))
self.sequence = "".join(sequence_tag.firstChild.nodeValue.strip().split("\n"))
self.sequence_length = int(sequence_tag.getAttribute("length"))
self.CRC64Digest = sequence_tag.getAttribute("checksum") |
def gen_lines_from_textfiles(
files: Iterable[TextIO]) -> Generator[str, None, None]:
"""
Generates lines from file-like objects.
Args:
files: iterable of :class:`TextIO` objects
Yields:
each line of all the files
"""
for file in files:
for line in file:
yield line | Generates lines from file-like objects.
Args:
files: iterable of :class:`TextIO` objects
Yields:
each line of all the files | Below is the the instruction that describes the task:
### Input:
Generates lines from file-like objects.
Args:
files: iterable of :class:`TextIO` objects
Yields:
each line of all the files
### Response:
def gen_lines_from_textfiles(
files: Iterable[TextIO]) -> Generator[str, None, None]:
"""
Generates lines from file-like objects.
Args:
files: iterable of :class:`TextIO` objects
Yields:
each line of all the files
"""
for file in files:
for line in file:
yield line |
def total_seconds(td):
"""For those with older versions of Python, a pure-Python
implementation of Python 2.7's :meth:`~datetime.timedelta.total_seconds`.
Args:
td (datetime.timedelta): The timedelta to convert to seconds.
Returns:
float: total number of seconds
>>> td = timedelta(days=4, seconds=33)
>>> total_seconds(td)
345633.0
"""
a_milli = 1000000.0
td_ds = td.seconds + (td.days * 86400) # 24 * 60 * 60
td_micro = td.microseconds + (td_ds * a_milli)
return td_micro / a_milli | For those with older versions of Python, a pure-Python
implementation of Python 2.7's :meth:`~datetime.timedelta.total_seconds`.
Args:
td (datetime.timedelta): The timedelta to convert to seconds.
Returns:
float: total number of seconds
>>> td = timedelta(days=4, seconds=33)
>>> total_seconds(td)
345633.0 | Below is the the instruction that describes the task:
### Input:
For those with older versions of Python, a pure-Python
implementation of Python 2.7's :meth:`~datetime.timedelta.total_seconds`.
Args:
td (datetime.timedelta): The timedelta to convert to seconds.
Returns:
float: total number of seconds
>>> td = timedelta(days=4, seconds=33)
>>> total_seconds(td)
345633.0
### Response:
def total_seconds(td):
"""For those with older versions of Python, a pure-Python
implementation of Python 2.7's :meth:`~datetime.timedelta.total_seconds`.
Args:
td (datetime.timedelta): The timedelta to convert to seconds.
Returns:
float: total number of seconds
>>> td = timedelta(days=4, seconds=33)
>>> total_seconds(td)
345633.0
"""
a_milli = 1000000.0
td_ds = td.seconds + (td.days * 86400) # 24 * 60 * 60
td_micro = td.microseconds + (td_ds * a_milli)
return td_micro / a_milli |
def calc_log_size(request, calc_id):
"""
Get the current number of lines in the log
"""
try:
response_data = logs.dbcmd('get_log_size', calc_id)
except dbapi.NotFound:
return HttpResponseNotFound()
return HttpResponse(content=json.dumps(response_data), content_type=JSON) | Get the current number of lines in the log | Below is the the instruction that describes the task:
### Input:
Get the current number of lines in the log
### Response:
def calc_log_size(request, calc_id):
"""
Get the current number of lines in the log
"""
try:
response_data = logs.dbcmd('get_log_size', calc_id)
except dbapi.NotFound:
return HttpResponseNotFound()
return HttpResponse(content=json.dumps(response_data), content_type=JSON) |
def _handle_status(self, key, value):
"""Parse a status code from the attached GnuPG process.
:raises: :exc:`~exceptions.ValueError` if the status message is unknown.
"""
if key in (
"USERID_HINT",
"NEED_PASSPHRASE",
"BAD_PASSPHRASE",
"GOOD_PASSPHRASE",
"MISSING_PASSPHRASE",
"PINENTRY_LAUNCHED",
"BEGIN_SIGNING",
"CARDCTRL",
"INV_SGNR",
"SIGEXPIRED",
"KEY_CONSIDERED",
):
self.status = key.replace("_", " ").lower()
elif key == "SIG_CREATED":
(self.sig_type, self.sig_algo, self.sig_hash_algo,
self.what, self.timestamp, self.fingerprint) = value.split()
elif key == "KEYEXPIRED":
self.status = "skipped signing key, key expired"
if (value is not None) and (len(value) > 0):
self.status += " on {}".format(str(value))
elif key == "KEYREVOKED":
self.status = "skipped signing key, key revoked"
if (value is not None) and (len(value) > 0):
self.status += " on {}".format(str(value))
elif key == "NODATA":
self.status = nodata(value)
elif key == "PROGRESS":
self.status = progress(value.split(' ', 1)[0])
else:
raise ValueError("Unknown status message: %r" % key) | Parse a status code from the attached GnuPG process.
:raises: :exc:`~exceptions.ValueError` if the status message is unknown. | Below is the the instruction that describes the task:
### Input:
Parse a status code from the attached GnuPG process.
:raises: :exc:`~exceptions.ValueError` if the status message is unknown.
### Response:
def _handle_status(self, key, value):
"""Parse a status code from the attached GnuPG process.
:raises: :exc:`~exceptions.ValueError` if the status message is unknown.
"""
if key in (
"USERID_HINT",
"NEED_PASSPHRASE",
"BAD_PASSPHRASE",
"GOOD_PASSPHRASE",
"MISSING_PASSPHRASE",
"PINENTRY_LAUNCHED",
"BEGIN_SIGNING",
"CARDCTRL",
"INV_SGNR",
"SIGEXPIRED",
"KEY_CONSIDERED",
):
self.status = key.replace("_", " ").lower()
elif key == "SIG_CREATED":
(self.sig_type, self.sig_algo, self.sig_hash_algo,
self.what, self.timestamp, self.fingerprint) = value.split()
elif key == "KEYEXPIRED":
self.status = "skipped signing key, key expired"
if (value is not None) and (len(value) > 0):
self.status += " on {}".format(str(value))
elif key == "KEYREVOKED":
self.status = "skipped signing key, key revoked"
if (value is not None) and (len(value) > 0):
self.status += " on {}".format(str(value))
elif key == "NODATA":
self.status = nodata(value)
elif key == "PROGRESS":
self.status = progress(value.split(' ', 1)[0])
else:
raise ValueError("Unknown status message: %r" % key) |
def poll_parser(poll):
"""
Parses a poll object
"""
if __is_deleted(poll):
return deleted_parser(poll)
if poll['type'] not in poll_types:
raise Exception('Not a poll type')
return Poll(
poll['id'],
poll['by'],
__check_key('kids', poll), # poll and pollopt differ this property
__check_key('parts', poll), # poll and pollopt differ this property
poll['score'],
poll['text'],
poll['time'],
poll['title'],
poll['type'],
) | Parses a poll object | Below is the the instruction that describes the task:
### Input:
Parses a poll object
### Response:
def poll_parser(poll):
"""
Parses a poll object
"""
if __is_deleted(poll):
return deleted_parser(poll)
if poll['type'] not in poll_types:
raise Exception('Not a poll type')
return Poll(
poll['id'],
poll['by'],
__check_key('kids', poll), # poll and pollopt differ this property
__check_key('parts', poll), # poll and pollopt differ this property
poll['score'],
poll['text'],
poll['time'],
poll['title'],
poll['type'],
) |
def get_items_of_credit_note_per_page(self, credit_note_id, per_page=1000, page=1):
"""
Get items of credit note per page
:param credit_note_id: the credit note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(
resource=CREDIT_NOTE_ITEMS,
per_page=per_page,
page=page,
params={'credit_note_id': credit_note_id},
) | Get items of credit note per page
:param credit_note_id: the credit note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list | Below is the the instruction that describes the task:
### Input:
Get items of credit note per page
:param credit_note_id: the credit note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
### Response:
def get_items_of_credit_note_per_page(self, credit_note_id, per_page=1000, page=1):
"""
Get items of credit note per page
:param credit_note_id: the credit note id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(
resource=CREDIT_NOTE_ITEMS,
per_page=per_page,
page=page,
params={'credit_note_id': credit_note_id},
) |
def get_input_files(dirname, *ext):
"""Returns files in passed directory, filtered by extension.
- dirname - path to input directory
- *ext - list of arguments describing permitted file extensions
"""
filelist = [f for f in os.listdir(dirname) if
os.path.splitext(f)[-1] in ext]
return [os.path.join(dirname, f) for f in filelist] | Returns files in passed directory, filtered by extension.
- dirname - path to input directory
- *ext - list of arguments describing permitted file extensions | Below is the the instruction that describes the task:
### Input:
Returns files in passed directory, filtered by extension.
- dirname - path to input directory
- *ext - list of arguments describing permitted file extensions
### Response:
def get_input_files(dirname, *ext):
"""Returns files in passed directory, filtered by extension.
- dirname - path to input directory
- *ext - list of arguments describing permitted file extensions
"""
filelist = [f for f in os.listdir(dirname) if
os.path.splitext(f)[-1] in ext]
return [os.path.join(dirname, f) for f in filelist] |
def _init_datastore_v3_stub(self, **stub_kwargs):
"""Initializes the datastore stub using nosegae config magic"""
task_args = dict(datastore_file=self._data_path)
task_args.update(stub_kwargs)
self.testbed.init_datastore_v3_stub(**task_args) | Initializes the datastore stub using nosegae config magic | Below is the the instruction that describes the task:
### Input:
Initializes the datastore stub using nosegae config magic
### Response:
def _init_datastore_v3_stub(self, **stub_kwargs):
"""Initializes the datastore stub using nosegae config magic"""
task_args = dict(datastore_file=self._data_path)
task_args.update(stub_kwargs)
self.testbed.init_datastore_v3_stub(**task_args) |
def get_pretty_format(self, include_id=True, max_name_length=0,
abbreviate=True):
"""Returns a nicely formatted string with the GO term information.
Parameters
----------
include_id: bool, optional
Include the GO term ID.
max_name_length: int, optional
Truncate the formatted string so that its total length does not
exceed this value.
abbreviate: bool, optional
Do not use abberviations (see ``_abbrev``) to shorten the GO term
name.
Returns
-------
str
The formatted string.
"""
name = self.name
if abbreviate:
for abb in self._abbrev:
name = re.sub(abb[0], abb[1], name)
if 3 <= max_name_length < len(name):
name = name[:(max_name_length-3)] + '...'
if include_id:
return "%s: %s (%s)" % (self.domain_short, name, self.id)
else:
return "%s: %s" % (self.domain_short, name) | Returns a nicely formatted string with the GO term information.
Parameters
----------
include_id: bool, optional
Include the GO term ID.
max_name_length: int, optional
Truncate the formatted string so that its total length does not
exceed this value.
abbreviate: bool, optional
Do not use abberviations (see ``_abbrev``) to shorten the GO term
name.
Returns
-------
str
The formatted string. | Below is the the instruction that describes the task:
### Input:
Returns a nicely formatted string with the GO term information.
Parameters
----------
include_id: bool, optional
Include the GO term ID.
max_name_length: int, optional
Truncate the formatted string so that its total length does not
exceed this value.
abbreviate: bool, optional
Do not use abberviations (see ``_abbrev``) to shorten the GO term
name.
Returns
-------
str
The formatted string.
### Response:
def get_pretty_format(self, include_id=True, max_name_length=0,
abbreviate=True):
"""Returns a nicely formatted string with the GO term information.
Parameters
----------
include_id: bool, optional
Include the GO term ID.
max_name_length: int, optional
Truncate the formatted string so that its total length does not
exceed this value.
abbreviate: bool, optional
Do not use abberviations (see ``_abbrev``) to shorten the GO term
name.
Returns
-------
str
The formatted string.
"""
name = self.name
if abbreviate:
for abb in self._abbrev:
name = re.sub(abb[0], abb[1], name)
if 3 <= max_name_length < len(name):
name = name[:(max_name_length-3)] + '...'
if include_id:
return "%s: %s (%s)" % (self.domain_short, name, self.id)
else:
return "%s: %s" % (self.domain_short, name) |
def UpdateInfo(self):
'''Updates information about the virtual machine. This information is associated with
the VMGuestLibHandle.
VMGuestLib_UpdateInfo requires similar CPU resources to a system call and
therefore can affect performance. If you are concerned about performance, minimize
the number of calls to VMGuestLib_UpdateInfo.
If your program uses multiple threads, each thread must use a different handle.
Otherwise, you must implement a locking scheme around update calls. The vSphere
Guest API does not implement internal locking around access with a handle.'''
ret = vmGuestLib.VMGuestLib_UpdateInfo(self.handle.value)
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) | Updates information about the virtual machine. This information is associated with
the VMGuestLibHandle.
VMGuestLib_UpdateInfo requires similar CPU resources to a system call and
therefore can affect performance. If you are concerned about performance, minimize
the number of calls to VMGuestLib_UpdateInfo.
If your program uses multiple threads, each thread must use a different handle.
Otherwise, you must implement a locking scheme around update calls. The vSphere
Guest API does not implement internal locking around access with a handle. | Below is the the instruction that describes the task:
### Input:
Updates information about the virtual machine. This information is associated with
the VMGuestLibHandle.
VMGuestLib_UpdateInfo requires similar CPU resources to a system call and
therefore can affect performance. If you are concerned about performance, minimize
the number of calls to VMGuestLib_UpdateInfo.
If your program uses multiple threads, each thread must use a different handle.
Otherwise, you must implement a locking scheme around update calls. The vSphere
Guest API does not implement internal locking around access with a handle.
### Response:
def UpdateInfo(self):
'''Updates information about the virtual machine. This information is associated with
the VMGuestLibHandle.
VMGuestLib_UpdateInfo requires similar CPU resources to a system call and
therefore can affect performance. If you are concerned about performance, minimize
the number of calls to VMGuestLib_UpdateInfo.
If your program uses multiple threads, each thread must use a different handle.
Otherwise, you must implement a locking scheme around update calls. The vSphere
Guest API does not implement internal locking around access with a handle.'''
ret = vmGuestLib.VMGuestLib_UpdateInfo(self.handle.value)
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) |
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]:
"""
Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a
mapping from padding keys to actual lengths, and we just key that dictionary by field name.
"""
lengths = {}
for field_name, field in self.fields.items():
lengths[field_name] = field.get_padding_lengths()
return lengths | Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a
mapping from padding keys to actual lengths, and we just key that dictionary by field name. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a
mapping from padding keys to actual lengths, and we just key that dictionary by field name.
### Response:
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]:
"""
Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a
mapping from padding keys to actual lengths, and we just key that dictionary by field name.
"""
lengths = {}
for field_name, field in self.fields.items():
lengths[field_name] = field.get_padding_lengths()
return lengths |
def _apply_changes(self):
"""Serialize our changes back to the PDF in memory
Depending how we are initialized, leave our metadata mark and producer.
"""
if self.mark:
self[QName(XMP_NS_XMP, 'MetadataDate')] = datetime.now().isoformat()
self[QName(XMP_NS_PDF, 'Producer')] = 'pikepdf ' + pikepdf_version
xml = self._get_xml_bytes()
self._pdf.Root.Metadata = Stream(self._pdf, xml)
self._pdf.Root.Metadata[Name.Type] = Name.Metadata
self._pdf.Root.Metadata[Name.Subtype] = Name.XML
if self.sync_docinfo:
self._update_docinfo() | Serialize our changes back to the PDF in memory
Depending how we are initialized, leave our metadata mark and producer. | Below is the the instruction that describes the task:
### Input:
Serialize our changes back to the PDF in memory
Depending how we are initialized, leave our metadata mark and producer.
### Response:
def _apply_changes(self):
"""Serialize our changes back to the PDF in memory
Depending how we are initialized, leave our metadata mark and producer.
"""
if self.mark:
self[QName(XMP_NS_XMP, 'MetadataDate')] = datetime.now().isoformat()
self[QName(XMP_NS_PDF, 'Producer')] = 'pikepdf ' + pikepdf_version
xml = self._get_xml_bytes()
self._pdf.Root.Metadata = Stream(self._pdf, xml)
self._pdf.Root.Metadata[Name.Type] = Name.Metadata
self._pdf.Root.Metadata[Name.Subtype] = Name.XML
if self.sync_docinfo:
self._update_docinfo() |
def DNN(input_shape,
dense_layers,
output_layer=[1, 'sigmoid'],
optimizer='adam',
loss='binary_crossentropy'):
"""Summary
Args:
input_shape (list): The shape of the input layer
targets (int): Number of targets
dense_layers (list): Dense layer descriptor [fully_connected]
optimizer (str or object optional): Keras optimizer as string or keras optimizer
Returns:
TYPE: model, build_arguments
"""
inputs = Input(shape=input_shape)
dense = inputs
for i, d in enumerate(dense_layers):
dense = Dense(d, activation='relu')(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.3)(dense)
output = Dense(output_layer[0], activation=output_layer[1])(dense)
model = Model(inputs=inputs, outputs=output)
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
return model | Summary
Args:
input_shape (list): The shape of the input layer
targets (int): Number of targets
dense_layers (list): Dense layer descriptor [fully_connected]
optimizer (str or object optional): Keras optimizer as string or keras optimizer
Returns:
TYPE: model, build_arguments | Below is the the instruction that describes the task:
### Input:
Summary
Args:
input_shape (list): The shape of the input layer
targets (int): Number of targets
dense_layers (list): Dense layer descriptor [fully_connected]
optimizer (str or object optional): Keras optimizer as string or keras optimizer
Returns:
TYPE: model, build_arguments
### Response:
def DNN(input_shape,
dense_layers,
output_layer=[1, 'sigmoid'],
optimizer='adam',
loss='binary_crossentropy'):
"""Summary
Args:
input_shape (list): The shape of the input layer
targets (int): Number of targets
dense_layers (list): Dense layer descriptor [fully_connected]
optimizer (str or object optional): Keras optimizer as string or keras optimizer
Returns:
TYPE: model, build_arguments
"""
inputs = Input(shape=input_shape)
dense = inputs
for i, d in enumerate(dense_layers):
dense = Dense(d, activation='relu')(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.3)(dense)
output = Dense(output_layer[0], activation=output_layer[1])(dense)
model = Model(inputs=inputs, outputs=output)
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
return model |
def __add_min_max_value(
parser,
basename,
default_min,
default_max,
initial,
help_template):
"""
Generates parser entries for options
with a min, max, and default value.
Args:
parser: the parser to use.
basename: the base option name. Generated options will have flags
--basename-min, --basename-max, and --basename.
default_min: the default min value
default_max: the default max value
initial: the default initial value
help_template: the help string template.
$mmi will be replaced with min, max, or initial.
$name will be replaced with basename.
"""
help_template = Template(help_template)
parser.add(
'--{0}-min'.format(basename),
default=default_min,
type=float,
required=False,
help=help_template.substitute(mmi='min', name=basename))
parser.add(
'--{0}-max'.format(basename),
default=default_max,
type=float,
required=False,
help=help_template.substitute(mmi='max', name=basename))
parser.add(
'--{0}'.format(basename),
default=initial,
type=float,
required=False,
help=help_template.substitute(mmi='initial', name=basename)) | Generates parser entries for options
with a min, max, and default value.
Args:
parser: the parser to use.
basename: the base option name. Generated options will have flags
--basename-min, --basename-max, and --basename.
default_min: the default min value
default_max: the default max value
initial: the default initial value
help_template: the help string template.
$mmi will be replaced with min, max, or initial.
$name will be replaced with basename. | Below is the the instruction that describes the task:
### Input:
Generates parser entries for options
with a min, max, and default value.
Args:
parser: the parser to use.
basename: the base option name. Generated options will have flags
--basename-min, --basename-max, and --basename.
default_min: the default min value
default_max: the default max value
initial: the default initial value
help_template: the help string template.
$mmi will be replaced with min, max, or initial.
$name will be replaced with basename.
### Response:
def __add_min_max_value(
parser,
basename,
default_min,
default_max,
initial,
help_template):
"""
Generates parser entries for options
with a min, max, and default value.
Args:
parser: the parser to use.
basename: the base option name. Generated options will have flags
--basename-min, --basename-max, and --basename.
default_min: the default min value
default_max: the default max value
initial: the default initial value
help_template: the help string template.
$mmi will be replaced with min, max, or initial.
$name will be replaced with basename.
"""
help_template = Template(help_template)
parser.add(
'--{0}-min'.format(basename),
default=default_min,
type=float,
required=False,
help=help_template.substitute(mmi='min', name=basename))
parser.add(
'--{0}-max'.format(basename),
default=default_max,
type=float,
required=False,
help=help_template.substitute(mmi='max', name=basename))
parser.add(
'--{0}'.format(basename),
default=initial,
type=float,
required=False,
help=help_template.substitute(mmi='initial', name=basename)) |
def validate_uuid(value):
""" UUID 128-bit validator """
if value and not isinstance(value, UUID):
try:
return UUID(str(value), version=4)
except (AttributeError, ValueError):
raise ValidationError('not a valid UUID')
return value | UUID 128-bit validator | Below is the the instruction that describes the task:
### Input:
UUID 128-bit validator
### Response:
def validate_uuid(value):
""" UUID 128-bit validator """
if value and not isinstance(value, UUID):
try:
return UUID(str(value), version=4)
except (AttributeError, ValueError):
raise ValidationError('not a valid UUID')
return value |
def update_shot_browser(self, project, releasetype):
"""Update the shot browser to the given project
:param releasetype: the releasetype for the model
:type releasetype: :data:`djadapter.RELEASETYPES`
:param project: the project of the shots
:type project: :class:`djadapter.models.Project`
:returns: None
:rtype: None
:raises: None
"""
if project is None:
self.shotbrws.set_model(None)
return
shotmodel = self.create_shot_model(project, releasetype)
self.shotbrws.set_model(shotmodel) | Update the shot browser to the given project
:param releasetype: the releasetype for the model
:type releasetype: :data:`djadapter.RELEASETYPES`
:param project: the project of the shots
:type project: :class:`djadapter.models.Project`
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Update the shot browser to the given project
:param releasetype: the releasetype for the model
:type releasetype: :data:`djadapter.RELEASETYPES`
:param project: the project of the shots
:type project: :class:`djadapter.models.Project`
:returns: None
:rtype: None
:raises: None
### Response:
def update_shot_browser(self, project, releasetype):
"""Update the shot browser to the given project
:param releasetype: the releasetype for the model
:type releasetype: :data:`djadapter.RELEASETYPES`
:param project: the project of the shots
:type project: :class:`djadapter.models.Project`
:returns: None
:rtype: None
:raises: None
"""
if project is None:
self.shotbrws.set_model(None)
return
shotmodel = self.create_shot_model(project, releasetype)
self.shotbrws.set_model(shotmodel) |
def get_all(self):
"""Returns all the timings, sorted in decreasing order.
Each value is a dict: { path: <path>, timing: <timing in seconds> }
"""
return [{'label': x[0], 'timing': x[1], 'is_tool': x[0] in self._tool_labels}
for x in sorted(self._timings_by_path.items(), key=lambda x: x[1], reverse=True)] | Returns all the timings, sorted in decreasing order.
Each value is a dict: { path: <path>, timing: <timing in seconds> } | Below is the the instruction that describes the task:
### Input:
Returns all the timings, sorted in decreasing order.
Each value is a dict: { path: <path>, timing: <timing in seconds> }
### Response:
def get_all(self):
"""Returns all the timings, sorted in decreasing order.
Each value is a dict: { path: <path>, timing: <timing in seconds> }
"""
return [{'label': x[0], 'timing': x[1], 'is_tool': x[0] in self._tool_labels}
for x in sorted(self._timings_by_path.items(), key=lambda x: x[1], reverse=True)] |
def _mark_image_file_deleted(cls, mapper, connection, target):
"""When the session flushes, marks images as deleted.
The files of this marked images will be actually deleted
in the image storage when the ongoing transaction succeeds.
If it fails the :attr:`_deleted_images` queue will be just
empty.
"""
cls._deleted_images.add((target, get_current_store())) | When the session flushes, marks images as deleted.
The files of this marked images will be actually deleted
in the image storage when the ongoing transaction succeeds.
If it fails the :attr:`_deleted_images` queue will be just
empty. | Below is the the instruction that describes the task:
### Input:
When the session flushes, marks images as deleted.
The files of this marked images will be actually deleted
in the image storage when the ongoing transaction succeeds.
If it fails the :attr:`_deleted_images` queue will be just
empty.
### Response:
def _mark_image_file_deleted(cls, mapper, connection, target):
"""When the session flushes, marks images as deleted.
The files of this marked images will be actually deleted
in the image storage when the ongoing transaction succeeds.
If it fails the :attr:`_deleted_images` queue will be just
empty.
"""
cls._deleted_images.add((target, get_current_store())) |
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# default formatter leaves a space to the left when formatting
# floats, must be consistent for left-justifying NaNs (GH #25061)
if self.justify == 'left':
na_rep = ' ' + self.na_rep
else:
na_rep = self.na_rep
# separate the wheat from the chaff
values = self.values
is_complex = is_complex_dtype(values)
mask = isna(values)
if hasattr(values, 'to_dense'): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype='object')
values[mask] = na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array([formatter(val)
for val in values.ravel()[imask]])
if self.fixed_width:
if is_complex:
return _trim_zeros_complex(values, na_rep)
else:
return _trim_zeros_float(values, na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None:
if self.fixed_width:
float_format = partial('{value: .{digits:d}f}'.format,
digits=self.digits)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid='ignore'):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = ((abs_vals < 10**(-self.digits)) &
(abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
float_format = partial('{value: .{digits:d}e}'.format,
digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values | Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array | Below is the the instruction that describes the task:
### Input:
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
### Response:
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# default formatter leaves a space to the left when formatting
# floats, must be consistent for left-justifying NaNs (GH #25061)
if self.justify == 'left':
na_rep = ' ' + self.na_rep
else:
na_rep = self.na_rep
# separate the wheat from the chaff
values = self.values
is_complex = is_complex_dtype(values)
mask = isna(values)
if hasattr(values, 'to_dense'): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype='object')
values[mask] = na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array([formatter(val)
for val in values.ravel()[imask]])
if self.fixed_width:
if is_complex:
return _trim_zeros_complex(values, na_rep)
else:
return _trim_zeros_float(values, na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None:
if self.fixed_width:
float_format = partial('{value: .{digits:d}f}'.format,
digits=self.digits)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid='ignore'):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = ((abs_vals < 10**(-self.digits)) &
(abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
float_format = partial('{value: .{digits:d}e}'.format,
digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values |
def read_prov(self, document_id=None):
"""
Load the provenance of this document
.. note::
This method is called automatically if needed when the :py:meth:`prov` property is accessed. Manual use of
this method is unusual.
:param document_id: (optional) set the document id if this is an :py:meth:`abstract` document
:return: :py:class:`prov.model.ProvDocument
"""
if document_id:
if not self.abstract:
raise ImmutableDocumentException()
self._id = document_id
if self.abstract:
raise AbstractDocumentException()
self._prov = self._api.get_document_prov(self.id)
return self._prov | Load the provenance of this document
.. note::
This method is called automatically if needed when the :py:meth:`prov` property is accessed. Manual use of
this method is unusual.
:param document_id: (optional) set the document id if this is an :py:meth:`abstract` document
:return: :py:class:`prov.model.ProvDocument | Below is the the instruction that describes the task:
### Input:
Load the provenance of this document
.. note::
This method is called automatically if needed when the :py:meth:`prov` property is accessed. Manual use of
this method is unusual.
:param document_id: (optional) set the document id if this is an :py:meth:`abstract` document
:return: :py:class:`prov.model.ProvDocument
### Response:
def read_prov(self, document_id=None):
"""
Load the provenance of this document
.. note::
This method is called automatically if needed when the :py:meth:`prov` property is accessed. Manual use of
this method is unusual.
:param document_id: (optional) set the document id if this is an :py:meth:`abstract` document
:return: :py:class:`prov.model.ProvDocument
"""
if document_id:
if not self.abstract:
raise ImmutableDocumentException()
self._id = document_id
if self.abstract:
raise AbstractDocumentException()
self._prov = self._api.get_document_prov(self.id)
return self._prov |
def earwax(self):
'''Makes audio easier to listen to on headphones. Adds ‘cues’ to 44.1kHz
stereo audio so that when listened to on headphones the stereo image is
moved from inside your head (standard for headphones) to outside and in
front of the listener (standard for speakers).
Warning: Will only work properly on 44.1kHz stereo audio!
'''
effect_args = ['earwax']
self.effects.extend(effect_args)
self.effects_log.append('earwax')
return self | Makes audio easier to listen to on headphones. Adds ‘cues’ to 44.1kHz
stereo audio so that when listened to on headphones the stereo image is
moved from inside your head (standard for headphones) to outside and in
front of the listener (standard for speakers).
Warning: Will only work properly on 44.1kHz stereo audio! | Below is the the instruction that describes the task:
### Input:
Makes audio easier to listen to on headphones. Adds ‘cues’ to 44.1kHz
stereo audio so that when listened to on headphones the stereo image is
moved from inside your head (standard for headphones) to outside and in
front of the listener (standard for speakers).
Warning: Will only work properly on 44.1kHz stereo audio!
### Response:
def earwax(self):
'''Makes audio easier to listen to on headphones. Adds ‘cues’ to 44.1kHz
stereo audio so that when listened to on headphones the stereo image is
moved from inside your head (standard for headphones) to outside and in
front of the listener (standard for speakers).
Warning: Will only work properly on 44.1kHz stereo audio!
'''
effect_args = ['earwax']
self.effects.extend(effect_args)
self.effects_log.append('earwax')
return self |
def binned_entropy(x, max_bins):
"""
First bins the values of x into max_bins equidistant bins.
Then calculates the value of
.. math::
- \\sum_{k=0}^{min(max\\_bins, len(x))} p_k log(p_k) \\cdot \\mathbf{1}_{(p_k > 0)}
where :math:`p_k` is the percentage of samples in bin :math:`k`.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param max_bins: the maximal number of bins
:type max_bins: int
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
hist, bin_edges = np.histogram(x, bins=max_bins)
probs = hist / x.size
return - np.sum(p * np.math.log(p) for p in probs if p != 0) | First bins the values of x into max_bins equidistant bins.
Then calculates the value of
.. math::
- \\sum_{k=0}^{min(max\\_bins, len(x))} p_k log(p_k) \\cdot \\mathbf{1}_{(p_k > 0)}
where :math:`p_k` is the percentage of samples in bin :math:`k`.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param max_bins: the maximal number of bins
:type max_bins: int
:return: the value of this feature
:return type: float | Below is the the instruction that describes the task:
### Input:
First bins the values of x into max_bins equidistant bins.
Then calculates the value of
.. math::
- \\sum_{k=0}^{min(max\\_bins, len(x))} p_k log(p_k) \\cdot \\mathbf{1}_{(p_k > 0)}
where :math:`p_k` is the percentage of samples in bin :math:`k`.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param max_bins: the maximal number of bins
:type max_bins: int
:return: the value of this feature
:return type: float
### Response:
def binned_entropy(x, max_bins):
"""
First bins the values of x into max_bins equidistant bins.
Then calculates the value of
.. math::
- \\sum_{k=0}^{min(max\\_bins, len(x))} p_k log(p_k) \\cdot \\mathbf{1}_{(p_k > 0)}
where :math:`p_k` is the percentage of samples in bin :math:`k`.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param max_bins: the maximal number of bins
:type max_bins: int
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
hist, bin_edges = np.histogram(x, bins=max_bins)
probs = hist / x.size
return - np.sum(p * np.math.log(p) for p in probs if p != 0) |
def delete_device_subscriptions(self, device_id):
"""Removes a device's subscriptions
:param device_id: ID of the device (Required)
:returns: None
"""
api = self._get_api(mds.SubscriptionsApi)
return api.delete_endpoint_subscriptions(device_id) | Removes a device's subscriptions
:param device_id: ID of the device (Required)
:returns: None | Below is the the instruction that describes the task:
### Input:
Removes a device's subscriptions
:param device_id: ID of the device (Required)
:returns: None
### Response:
def delete_device_subscriptions(self, device_id):
"""Removes a device's subscriptions
:param device_id: ID of the device (Required)
:returns: None
"""
api = self._get_api(mds.SubscriptionsApi)
return api.delete_endpoint_subscriptions(device_id) |
def status(self):
"""
Poll YubiKey for status.
"""
data = self._read()
self._status = YubiKeyUSBHIDStatus(data)
return self._status | Poll YubiKey for status. | Below is the the instruction that describes the task:
### Input:
Poll YubiKey for status.
### Response:
def status(self):
"""
Poll YubiKey for status.
"""
data = self._read()
self._status = YubiKeyUSBHIDStatus(data)
return self._status |
def segment_radial_dist(seg, pos):
'''Return the radial distance of a tree segment to a given point
The radial distance is the euclidian distance between the mid-point of
the segment and the point in question.
Parameters:
seg: tree segment
pos: origin to which distances are measured. It must have at lease 3
components. The first 3 components are (x, y, z).
'''
return point_dist(pos, np.divide(np.add(seg[0], seg[1]), 2.0)) | Return the radial distance of a tree segment to a given point
The radial distance is the euclidian distance between the mid-point of
the segment and the point in question.
Parameters:
seg: tree segment
pos: origin to which distances are measured. It must have at lease 3
components. The first 3 components are (x, y, z). | Below is the the instruction that describes the task:
### Input:
Return the radial distance of a tree segment to a given point
The radial distance is the euclidian distance between the mid-point of
the segment and the point in question.
Parameters:
seg: tree segment
pos: origin to which distances are measured. It must have at lease 3
components. The first 3 components are (x, y, z).
### Response:
def segment_radial_dist(seg, pos):
'''Return the radial distance of a tree segment to a given point
The radial distance is the euclidian distance between the mid-point of
the segment and the point in question.
Parameters:
seg: tree segment
pos: origin to which distances are measured. It must have at lease 3
components. The first 3 components are (x, y, z).
'''
return point_dist(pos, np.divide(np.add(seg[0], seg[1]), 2.0)) |
def get_calling_namespaces():
"""Return the locals and globals for the function that called
into this module in the current call stack."""
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
# Find the first frame that *isn't* from this file. This means
# that we expect all of the SCons frames that implement an Export()
# or SConscript() call to be in this file, so that we can identify
# the first non-Script.SConscript frame as the user's local calling
# environment, and the locals and globals dictionaries from that
# frame as the calling namespaces. See the comment below preceding
# the DefaultEnvironmentCall block for even more explanation.
while frame.f_globals.get("__name__") == __name__:
frame = frame.f_back
return frame.f_locals, frame.f_globals | Return the locals and globals for the function that called
into this module in the current call stack. | Below is the the instruction that describes the task:
### Input:
Return the locals and globals for the function that called
into this module in the current call stack.
### Response:
def get_calling_namespaces():
"""Return the locals and globals for the function that called
into this module in the current call stack."""
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
# Find the first frame that *isn't* from this file. This means
# that we expect all of the SCons frames that implement an Export()
# or SConscript() call to be in this file, so that we can identify
# the first non-Script.SConscript frame as the user's local calling
# environment, and the locals and globals dictionaries from that
# frame as the calling namespaces. See the comment below preceding
# the DefaultEnvironmentCall block for even more explanation.
while frame.f_globals.get("__name__") == __name__:
frame = frame.f_back
return frame.f_locals, frame.f_globals |
def list(gandi, domain, zone_id, output, format, limit):
"""List DNS zone records for a domain."""
options = {
'items_per_page': limit,
}
output_keys = ['name', 'type', 'value', 'ttl']
if not zone_id:
result = gandi.domain.info(domain)
zone_id = result['zone_id']
if not zone_id:
gandi.echo('No zone records found, domain %s doesn\'t seems to be '
'managed at Gandi.' % domain)
return
records = gandi.record.list(zone_id, options)
if not output and not format:
for num, rec in enumerate(records):
if num:
gandi.separator_line()
output_generic(gandi, rec, output_keys, justify=12)
elif output:
zone_filename = domain + "_" + str(zone_id)
if os.path.isfile(zone_filename):
open(zone_filename, 'w').close()
for record in records:
format_record = ('%s %s IN %s %s' %
(record['name'], record['ttl'],
record['type'], record['value']))
with open(zone_filename, 'ab') as zone_file:
zone_file.write(format_record + '\n')
gandi.echo('Your zone file have been writen in %s' % zone_filename)
elif format:
if format == 'text':
for record in records:
format_record = ('%s %s IN %s %s' %
(record['name'], record['ttl'],
record['type'], record['value']))
gandi.echo(format_record)
if format == 'json':
format_record = json.dumps(records, sort_keys=True,
indent=4, separators=(',', ': '))
gandi.echo(format_record)
return records | List DNS zone records for a domain. | Below is the the instruction that describes the task:
### Input:
List DNS zone records for a domain.
### Response:
def list(gandi, domain, zone_id, output, format, limit):
"""List DNS zone records for a domain."""
options = {
'items_per_page': limit,
}
output_keys = ['name', 'type', 'value', 'ttl']
if not zone_id:
result = gandi.domain.info(domain)
zone_id = result['zone_id']
if not zone_id:
gandi.echo('No zone records found, domain %s doesn\'t seems to be '
'managed at Gandi.' % domain)
return
records = gandi.record.list(zone_id, options)
if not output and not format:
for num, rec in enumerate(records):
if num:
gandi.separator_line()
output_generic(gandi, rec, output_keys, justify=12)
elif output:
zone_filename = domain + "_" + str(zone_id)
if os.path.isfile(zone_filename):
open(zone_filename, 'w').close()
for record in records:
format_record = ('%s %s IN %s %s' %
(record['name'], record['ttl'],
record['type'], record['value']))
with open(zone_filename, 'ab') as zone_file:
zone_file.write(format_record + '\n')
gandi.echo('Your zone file have been writen in %s' % zone_filename)
elif format:
if format == 'text':
for record in records:
format_record = ('%s %s IN %s %s' %
(record['name'], record['ttl'],
record['type'], record['value']))
gandi.echo(format_record)
if format == 'json':
format_record = json.dumps(records, sort_keys=True,
indent=4, separators=(',', ': '))
gandi.echo(format_record)
return records |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.