code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _load_version(cls, unpickler, version):
"""
An function to load an object with a specific version of the class.
Parameters
----------
pickler : file
A GLUnpickler file handle.
version : int
A version number as maintained by the class writer.
"""
obj = unpickler.load()
return TransformerChain(obj._state["steps"]) | An function to load an object with a specific version of the class.
Parameters
----------
pickler : file
A GLUnpickler file handle.
version : int
A version number as maintained by the class writer. | Below is the the instruction that describes the task:
### Input:
An function to load an object with a specific version of the class.
Parameters
----------
pickler : file
A GLUnpickler file handle.
version : int
A version number as maintained by the class writer.
### Response:
def _load_version(cls, unpickler, version):
"""
An function to load an object with a specific version of the class.
Parameters
----------
pickler : file
A GLUnpickler file handle.
version : int
A version number as maintained by the class writer.
"""
obj = unpickler.load()
return TransformerChain(obj._state["steps"]) |
def del_param(self, param, header='content-type', requote=True):
"""Remove the given parameter completely from the Content-Type header.
The header will be re-written in place without the parameter or its
value. All values will be quoted as necessary unless requote is
False. Optional header specifies an alternative to the Content-Type
header.
"""
if header not in self:
return
new_ctype = ''
for p, v in self.get_params(header=header, unquote=requote):
if p.lower() != param.lower():
if not new_ctype:
new_ctype = _formatparam(p, v, requote)
else:
new_ctype = SEMISPACE.join([new_ctype,
_formatparam(p, v, requote)])
if new_ctype != self.get(header):
del self[header]
self[header] = new_ctype | Remove the given parameter completely from the Content-Type header.
The header will be re-written in place without the parameter or its
value. All values will be quoted as necessary unless requote is
False. Optional header specifies an alternative to the Content-Type
header. | Below is the the instruction that describes the task:
### Input:
Remove the given parameter completely from the Content-Type header.
The header will be re-written in place without the parameter or its
value. All values will be quoted as necessary unless requote is
False. Optional header specifies an alternative to the Content-Type
header.
### Response:
def del_param(self, param, header='content-type', requote=True):
"""Remove the given parameter completely from the Content-Type header.
The header will be re-written in place without the parameter or its
value. All values will be quoted as necessary unless requote is
False. Optional header specifies an alternative to the Content-Type
header.
"""
if header not in self:
return
new_ctype = ''
for p, v in self.get_params(header=header, unquote=requote):
if p.lower() != param.lower():
if not new_ctype:
new_ctype = _formatparam(p, v, requote)
else:
new_ctype = SEMISPACE.join([new_ctype,
_formatparam(p, v, requote)])
if new_ctype != self.get(header):
del self[header]
self[header] = new_ctype |
def update_container(self, container, metadata, **kwargs):
"""Update container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param metadata(dict): additional metadata to include in the request.
:param **kwargs(dict): extend args for specific driver.
"""
LOG.debug('update_object() with %s is success.', self.driver)
return self.driver.update_container(container, metadata, **kwargs) | Update container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param metadata(dict): additional metadata to include in the request.
:param **kwargs(dict): extend args for specific driver. | Below is the the instruction that describes the task:
### Input:
Update container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param metadata(dict): additional metadata to include in the request.
:param **kwargs(dict): extend args for specific driver.
### Response:
def update_container(self, container, metadata, **kwargs):
"""Update container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param metadata(dict): additional metadata to include in the request.
:param **kwargs(dict): extend args for specific driver.
"""
LOG.debug('update_object() with %s is success.', self.driver)
return self.driver.update_container(container, metadata, **kwargs) |
def add_rows(self, list_of_rows):
"""Add a list of rows to the DataFrameCache class"""
for row in list_of_rows:
self.row_deque.append(row)
self.time_deque.append(time.time())
# Update the data structure
self.update() | Add a list of rows to the DataFrameCache class | Below is the the instruction that describes the task:
### Input:
Add a list of rows to the DataFrameCache class
### Response:
def add_rows(self, list_of_rows):
"""Add a list of rows to the DataFrameCache class"""
for row in list_of_rows:
self.row_deque.append(row)
self.time_deque.append(time.time())
# Update the data structure
self.update() |
def getBitmap(self):
""" Captures screen area of this region, at least the part that is on the screen
Returns image as numpy array
"""
return PlatformManager.getBitmapFromRect(self.x, self.y, self.w, self.h) | Captures screen area of this region, at least the part that is on the screen
Returns image as numpy array | Below is the the instruction that describes the task:
### Input:
Captures screen area of this region, at least the part that is on the screen
Returns image as numpy array
### Response:
def getBitmap(self):
""" Captures screen area of this region, at least the part that is on the screen
Returns image as numpy array
"""
return PlatformManager.getBitmapFromRect(self.x, self.y, self.w, self.h) |
def eoq(I,F,h,d,w,W,a0,aK,K):
"""eoq -- multi-item capacitated economic ordering quantity model
Parameters:
- I: set of items
- F[i]: ordering cost for item i
- h[i]: holding cost for item i
- d[i]: demand for item i
- w[i]: unit weight for item i
- W: capacity (limit on order quantity)
- a0: lower bound on the cycle time (x axis)
- aK: upper bound on the cycle time (x axis)
- K: number of linear pieces to use in the approximation
Returns a model, ready to be solved.
"""
# construct points for piecewise-linear relation, store in a,b
a,b = {},{}
delta = float(aK-a0)/K
for i in I:
for k in range(K):
T = a0 + delta*k
a[i,k] = T # abscissa: cycle time
b[i,k] = F[i]/T + h[i]*d[i]*T/2. # ordinate: (convex) cost for this cycle time
model = Model("multi-item, capacitated EOQ")
x,c,w_ = {},{},{}
for i in I:
x[i] = model.addVar(vtype="C", name="x(%s)"%i) # cycle time for item i
c[i] = model.addVar(vtype="C", name="c(%s)"%i) # total cost for item i
for k in range(K):
w_[i,k] = model.addVar(ub=1, vtype="C", name="w(%s,%s)"%(i,k)) #todo ??
for i in I:
model.addCons(quicksum(w_[i,k] for k in range(K)) == 1)
model.addCons(quicksum(a[i,k]*w_[i,k] for k in range(K)) == x[i])
model.addCons(quicksum(b[i,k]*w_[i,k] for k in range(K)) == c[i])
model.addCons(quicksum(w[i]*d[i]*x[i] for i in I) <= W)
model.setObjective(quicksum(c[i] for i in I), "minimize")
model.data = x,w
return model | eoq -- multi-item capacitated economic ordering quantity model
Parameters:
- I: set of items
- F[i]: ordering cost for item i
- h[i]: holding cost for item i
- d[i]: demand for item i
- w[i]: unit weight for item i
- W: capacity (limit on order quantity)
- a0: lower bound on the cycle time (x axis)
- aK: upper bound on the cycle time (x axis)
- K: number of linear pieces to use in the approximation
Returns a model, ready to be solved. | Below is the the instruction that describes the task:
### Input:
eoq -- multi-item capacitated economic ordering quantity model
Parameters:
- I: set of items
- F[i]: ordering cost for item i
- h[i]: holding cost for item i
- d[i]: demand for item i
- w[i]: unit weight for item i
- W: capacity (limit on order quantity)
- a0: lower bound on the cycle time (x axis)
- aK: upper bound on the cycle time (x axis)
- K: number of linear pieces to use in the approximation
Returns a model, ready to be solved.
### Response:
def eoq(I,F,h,d,w,W,a0,aK,K):
"""eoq -- multi-item capacitated economic ordering quantity model
Parameters:
- I: set of items
- F[i]: ordering cost for item i
- h[i]: holding cost for item i
- d[i]: demand for item i
- w[i]: unit weight for item i
- W: capacity (limit on order quantity)
- a0: lower bound on the cycle time (x axis)
- aK: upper bound on the cycle time (x axis)
- K: number of linear pieces to use in the approximation
Returns a model, ready to be solved.
"""
# construct points for piecewise-linear relation, store in a,b
a,b = {},{}
delta = float(aK-a0)/K
for i in I:
for k in range(K):
T = a0 + delta*k
a[i,k] = T # abscissa: cycle time
b[i,k] = F[i]/T + h[i]*d[i]*T/2. # ordinate: (convex) cost for this cycle time
model = Model("multi-item, capacitated EOQ")
x,c,w_ = {},{},{}
for i in I:
x[i] = model.addVar(vtype="C", name="x(%s)"%i) # cycle time for item i
c[i] = model.addVar(vtype="C", name="c(%s)"%i) # total cost for item i
for k in range(K):
w_[i,k] = model.addVar(ub=1, vtype="C", name="w(%s,%s)"%(i,k)) #todo ??
for i in I:
model.addCons(quicksum(w_[i,k] for k in range(K)) == 1)
model.addCons(quicksum(a[i,k]*w_[i,k] for k in range(K)) == x[i])
model.addCons(quicksum(b[i,k]*w_[i,k] for k in range(K)) == c[i])
model.addCons(quicksum(w[i]*d[i]*x[i] for i in I) <= W)
model.setObjective(quicksum(c[i] for i in I), "minimize")
model.data = x,w
return model |
def create_user_task(sender=None, body=None, **kwargs): # pylint: disable=unused-argument
"""
Create a :py:class:`UserTaskStatus` record for each :py:class:`UserTaskMixin`.
Also creates a :py:class:`UserTaskStatus` for each chain, chord, or group containing
the new :py:class:`UserTaskMixin`.
"""
try:
task_class = import_string(sender)
except ImportError:
return
if issubclass(task_class.__class__, UserTaskMixin):
arguments_dict = task_class.arguments_as_dict(*body['args'], **body['kwargs'])
user_id = _get_user_id(arguments_dict)
task_id = body['id']
if body.get('callbacks', []):
return _create_chain_entry(user_id, task_id, task_class, body['args'], body['kwargs'], body['callbacks'])
if body.get('chord', None):
return _create_chord_entry(task_id, task_class, body, user_id)
parent = _get_or_create_group_parent(body, user_id)
name = task_class.generate_name(arguments_dict)
total_steps = task_class.calculate_total_steps(arguments_dict)
UserTaskStatus.objects.get_or_create(
task_id=task_id, defaults={'user_id': user_id, 'parent': parent, 'name': name, 'task_class': sender,
'total_steps': total_steps})
if parent:
parent.increment_total_steps(total_steps) | Create a :py:class:`UserTaskStatus` record for each :py:class:`UserTaskMixin`.
Also creates a :py:class:`UserTaskStatus` for each chain, chord, or group containing
the new :py:class:`UserTaskMixin`. | Below is the the instruction that describes the task:
### Input:
Create a :py:class:`UserTaskStatus` record for each :py:class:`UserTaskMixin`.
Also creates a :py:class:`UserTaskStatus` for each chain, chord, or group containing
the new :py:class:`UserTaskMixin`.
### Response:
def create_user_task(sender=None, body=None, **kwargs): # pylint: disable=unused-argument
"""
Create a :py:class:`UserTaskStatus` record for each :py:class:`UserTaskMixin`.
Also creates a :py:class:`UserTaskStatus` for each chain, chord, or group containing
the new :py:class:`UserTaskMixin`.
"""
try:
task_class = import_string(sender)
except ImportError:
return
if issubclass(task_class.__class__, UserTaskMixin):
arguments_dict = task_class.arguments_as_dict(*body['args'], **body['kwargs'])
user_id = _get_user_id(arguments_dict)
task_id = body['id']
if body.get('callbacks', []):
return _create_chain_entry(user_id, task_id, task_class, body['args'], body['kwargs'], body['callbacks'])
if body.get('chord', None):
return _create_chord_entry(task_id, task_class, body, user_id)
parent = _get_or_create_group_parent(body, user_id)
name = task_class.generate_name(arguments_dict)
total_steps = task_class.calculate_total_steps(arguments_dict)
UserTaskStatus.objects.get_or_create(
task_id=task_id, defaults={'user_id': user_id, 'parent': parent, 'name': name, 'task_class': sender,
'total_steps': total_steps})
if parent:
parent.increment_total_steps(total_steps) |
def display_weyl(decomps):
"""Construct and display 3D plot of canonical coordinates"""
tx, ty, tz = list(zip(*decomps))
rcParams['axes.labelsize'] = 24
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
fig = pyplot.figure()
ax = Axes3D(fig)
ax.scatter(tx, ty, tz)
ax.plot((1,), (1,), (1,))
ax.plot((0, 1, 1/2, 0, 1/2, 1, 1/2, 1/2),
(0, 0, 1/2, 0, 1/2, 0, 1/2, 1/2),
(0, 0, 0, 0, 1/2, 0, 0, 1/2))
ax.plot((0, 1/2, 1, 1/2, 1/2),
(0, 1/4, 0, 1/4, 1/2),
(0, 1/4, 0, 1/4, 0))
points = [(0, 0, 0), (1/4, 0, 0), (1/2, 0, 0), (3/4, 0, 0), (1, 0, 0),
(1/4, 1/4, 0), (1/2, 1/4, 0), (3/4, 1/4, 0), (1/2, 1/2, 0),
(1/4, 1/4, 1/4), (1/2, 1/4, 1/4), (3/4, 1/4, 1/4),
(1/2, 1/2, 1/4), (1/2, 1/2, 1/2)]
ax.scatter(*zip(*points))
eps = 0.04
ax.text(0, 0, 0-2*eps, 'I', ha='center')
ax.text(1, 0, 0-2*eps, 'I', ha='center')
ax.text(1/2, 1/2, 0-2*eps, 'iSWAP', ha='center')
ax.text(1/2, 1/2, 1/2+eps, 'SWAP', ha='center')
ax.text(1/2, 0, 0-2*eps, 'CNOT', ha='center')
# More coordinate labels
# ax.text(1/4-eps, 1/4, 1/4, '$\sqrt{SWAP}$', ha='right')
# ax.text(3/4+eps, 1/4, 1/4, '$\sqrt{SWAP}^\dagger$', ha='left')
# ax.text(1/4, 0, 0-2*eps, '$\sqrt{{CNOT}}$', ha='center')
# ax.text(3/4, 0, 0-2*eps, '$\sqrt{{CNOT}}$', ha='center')
# ax.text(1/2, 1/4, 0-2*eps, 'B', ha='center')
# ax.text(1/2, 1/4, 1/4+eps, 'ECP', ha='center')
# ax.text(1/4, 1/4, 0-2*eps, '$\sqrt{iSWAP}$', ha='center')
# ax.text(3/4, 1/4, 0-2*eps, '$\sqrt{iSWAP}$', ha='center')
# ax.text(1/2, 1/2+eps, 1/4, 'PSWAP(1/2)', ha='left')
ax.set_xlim(0, 1)
ax.set_ylim(-1/4, 3/4)
ax.set_zlim(-1/4, 3/4)
# Get rid of the panes
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Get rid of the spines
ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# Get rid of the ticks
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
pyplot.show() | Construct and display 3D plot of canonical coordinates | Below is the the instruction that describes the task:
### Input:
Construct and display 3D plot of canonical coordinates
### Response:
def display_weyl(decomps):
"""Construct and display 3D plot of canonical coordinates"""
tx, ty, tz = list(zip(*decomps))
rcParams['axes.labelsize'] = 24
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
fig = pyplot.figure()
ax = Axes3D(fig)
ax.scatter(tx, ty, tz)
ax.plot((1,), (1,), (1,))
ax.plot((0, 1, 1/2, 0, 1/2, 1, 1/2, 1/2),
(0, 0, 1/2, 0, 1/2, 0, 1/2, 1/2),
(0, 0, 0, 0, 1/2, 0, 0, 1/2))
ax.plot((0, 1/2, 1, 1/2, 1/2),
(0, 1/4, 0, 1/4, 1/2),
(0, 1/4, 0, 1/4, 0))
points = [(0, 0, 0), (1/4, 0, 0), (1/2, 0, 0), (3/4, 0, 0), (1, 0, 0),
(1/4, 1/4, 0), (1/2, 1/4, 0), (3/4, 1/4, 0), (1/2, 1/2, 0),
(1/4, 1/4, 1/4), (1/2, 1/4, 1/4), (3/4, 1/4, 1/4),
(1/2, 1/2, 1/4), (1/2, 1/2, 1/2)]
ax.scatter(*zip(*points))
eps = 0.04
ax.text(0, 0, 0-2*eps, 'I', ha='center')
ax.text(1, 0, 0-2*eps, 'I', ha='center')
ax.text(1/2, 1/2, 0-2*eps, 'iSWAP', ha='center')
ax.text(1/2, 1/2, 1/2+eps, 'SWAP', ha='center')
ax.text(1/2, 0, 0-2*eps, 'CNOT', ha='center')
# More coordinate labels
# ax.text(1/4-eps, 1/4, 1/4, '$\sqrt{SWAP}$', ha='right')
# ax.text(3/4+eps, 1/4, 1/4, '$\sqrt{SWAP}^\dagger$', ha='left')
# ax.text(1/4, 0, 0-2*eps, '$\sqrt{{CNOT}}$', ha='center')
# ax.text(3/4, 0, 0-2*eps, '$\sqrt{{CNOT}}$', ha='center')
# ax.text(1/2, 1/4, 0-2*eps, 'B', ha='center')
# ax.text(1/2, 1/4, 1/4+eps, 'ECP', ha='center')
# ax.text(1/4, 1/4, 0-2*eps, '$\sqrt{iSWAP}$', ha='center')
# ax.text(3/4, 1/4, 0-2*eps, '$\sqrt{iSWAP}$', ha='center')
# ax.text(1/2, 1/2+eps, 1/4, 'PSWAP(1/2)', ha='left')
ax.set_xlim(0, 1)
ax.set_ylim(-1/4, 3/4)
ax.set_zlim(-1/4, 3/4)
# Get rid of the panes
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Get rid of the spines
ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# Get rid of the ticks
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
pyplot.show() |
def parse_arguments(argv):
"""Parse the command line arguments."""
parser = argparse.ArgumentParser(
description=('Train a regression or classification model. Note that if '
'using a DNN model, --layer-size1=NUM, --layer-size2=NUM, '
'should be used. '))
# I/O file parameters
parser.add_argument('--train-data-paths', type=str, action='append',
required=True)
parser.add_argument('--eval-data-paths', type=str, action='append',
required=True)
parser.add_argument('--job-dir', type=str, required=True)
parser.add_argument('--preprocess-output-dir',
type=str,
required=True,
help=('Output folder of preprocessing. Should contain the'
' schema file, and numerical stats and vocab files.'
' Path must be on GCS if running'
' cloud training.'))
parser.add_argument('--transforms-file',
type=str,
required=True,
help=('File describing the the transforms to apply on '
'each column'))
# HP parameters
parser.add_argument('--learning-rate', type=float, default=0.01,
help='tf.train.AdamOptimizer learning rate')
parser.add_argument('--epsilon', type=float, default=0.0005,
help='tf.train.AdamOptimizer epsilon')
# --layer_size See below
# Model problems
parser.add_argument('--model-type',
choices=['linear_classification', 'linear_regression',
'dnn_classification', 'dnn_regression'],
required=True)
parser.add_argument('--top-n',
type=int,
default=1,
help=('For classification problems, the output graph '
'will contain the labels and scores for the top '
'n classes.'))
# Training input parameters
parser.add_argument('--max-steps', type=int, default=5000,
help='Maximum number of training steps to perform.')
parser.add_argument('--num-epochs',
type=int,
help=('Maximum number of training data epochs on which '
'to train. If both --max-steps and --num-epochs '
'are specified, the training job will run for '
'--max-steps or --num-epochs, whichever occurs '
'first. If unspecified will run for --max-steps.'))
parser.add_argument('--train-batch-size', type=int, default=1000)
parser.add_argument('--eval-batch-size', type=int, default=1000)
parser.add_argument('--min-eval-frequency', type=int, default=100,
help=('Minimum number of training steps between '
'evaluations'))
# other parameters
parser.add_argument('--save-checkpoints-secs', type=int, default=600,
help=('How often the model should be checkpointed/saved '
'in seconds'))
args, remaining_args = parser.parse_known_args(args=argv[1:])
# All HP parambeters must be unique, so we need to support an unknown number
# of --layer_size1=10 --layer_size2=10 ...
# Look at remaining_args for layer_size\d+ to get the layer info.
# Get number of layers
pattern = re.compile('layer-size(\d+)')
num_layers = 0
for other_arg in remaining_args:
match = re.search(pattern, other_arg)
if match:
num_layers = max(num_layers, int(match.group(1)))
# Build a new parser so we catch unknown args and missing layer_sizes.
parser = argparse.ArgumentParser()
for i in range(num_layers):
parser.add_argument('--layer-size%s' % str(i + 1), type=int, required=True)
layer_args = vars(parser.parse_args(args=remaining_args))
layer_sizes = []
for i in range(num_layers):
key = 'layer_size%s' % str(i + 1)
layer_sizes.append(layer_args[key])
assert len(layer_sizes) == num_layers
args.layer_sizes = layer_sizes
return args | Parse the command line arguments. | Below is the the instruction that describes the task:
### Input:
Parse the command line arguments.
### Response:
def parse_arguments(argv):
"""Parse the command line arguments."""
parser = argparse.ArgumentParser(
description=('Train a regression or classification model. Note that if '
'using a DNN model, --layer-size1=NUM, --layer-size2=NUM, '
'should be used. '))
# I/O file parameters
parser.add_argument('--train-data-paths', type=str, action='append',
required=True)
parser.add_argument('--eval-data-paths', type=str, action='append',
required=True)
parser.add_argument('--job-dir', type=str, required=True)
parser.add_argument('--preprocess-output-dir',
type=str,
required=True,
help=('Output folder of preprocessing. Should contain the'
' schema file, and numerical stats and vocab files.'
' Path must be on GCS if running'
' cloud training.'))
parser.add_argument('--transforms-file',
type=str,
required=True,
help=('File describing the the transforms to apply on '
'each column'))
# HP parameters
parser.add_argument('--learning-rate', type=float, default=0.01,
help='tf.train.AdamOptimizer learning rate')
parser.add_argument('--epsilon', type=float, default=0.0005,
help='tf.train.AdamOptimizer epsilon')
# --layer_size See below
# Model problems
parser.add_argument('--model-type',
choices=['linear_classification', 'linear_regression',
'dnn_classification', 'dnn_regression'],
required=True)
parser.add_argument('--top-n',
type=int,
default=1,
help=('For classification problems, the output graph '
'will contain the labels and scores for the top '
'n classes.'))
# Training input parameters
parser.add_argument('--max-steps', type=int, default=5000,
help='Maximum number of training steps to perform.')
parser.add_argument('--num-epochs',
type=int,
help=('Maximum number of training data epochs on which '
'to train. If both --max-steps and --num-epochs '
'are specified, the training job will run for '
'--max-steps or --num-epochs, whichever occurs '
'first. If unspecified will run for --max-steps.'))
parser.add_argument('--train-batch-size', type=int, default=1000)
parser.add_argument('--eval-batch-size', type=int, default=1000)
parser.add_argument('--min-eval-frequency', type=int, default=100,
help=('Minimum number of training steps between '
'evaluations'))
# other parameters
parser.add_argument('--save-checkpoints-secs', type=int, default=600,
help=('How often the model should be checkpointed/saved '
'in seconds'))
args, remaining_args = parser.parse_known_args(args=argv[1:])
# All HP parambeters must be unique, so we need to support an unknown number
# of --layer_size1=10 --layer_size2=10 ...
# Look at remaining_args for layer_size\d+ to get the layer info.
# Get number of layers
pattern = re.compile('layer-size(\d+)')
num_layers = 0
for other_arg in remaining_args:
match = re.search(pattern, other_arg)
if match:
num_layers = max(num_layers, int(match.group(1)))
# Build a new parser so we catch unknown args and missing layer_sizes.
parser = argparse.ArgumentParser()
for i in range(num_layers):
parser.add_argument('--layer-size%s' % str(i + 1), type=int, required=True)
layer_args = vars(parser.parse_args(args=remaining_args))
layer_sizes = []
for i in range(num_layers):
key = 'layer_size%s' % str(i + 1)
layer_sizes.append(layer_args[key])
assert len(layer_sizes) == num_layers
args.layer_sizes = layer_sizes
return args |
def plot_phens_circles(phen_grid, **kwargs):
"""
Plots phenotypes represented as concentric circles. Each circle represents
one task that the phenotype can perform, with larger circles representing
more complex tasks.
Arguments: phen_grid - a 2D array of strings representing binary numbers
kwargs:
palette - a seaborn palette (list of RGB values) indicating
how to color values. Will be converted to a continuous
colormap if necessary
denom - the maximum value of numbers in the grid (only used
if the grid actually contains numbers). This is used
to normalize values and use the full dynamic range of
the color pallete.
TODO: come up with way to represent organisms that don't do any tasks.
"""
denom, palette = get_kwargs(phen_grid, kwargs, True)
n_tasks = len(palette)
grid = phen_grid
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] != -1 and int(grid[i][j], 2) != -1 and \
int(grid[i][j], 2) != 0:
first = True
b_ind = grid[i][j].find("b")
phen = grid[i][j][b_ind+1:]
for k in range(len(phen)):
if int(phen[k]) == 1:
plt.gca().add_patch(
plt.Circle(
(j, i), radius=(n_tasks - k)*.05,
lw=.1 if first else 0, ec="black",
facecolor=palette[k], zorder=2+k))
first = False
elif int(grid[i][j], 2) == 0:
plt.gca().add_patch(
plt.Circle(
(j, i), radius=(n_tasks)*.05,
lw=.1, ec="black",
facecolor="grey", zorder=2)) | Plots phenotypes represented as concentric circles. Each circle represents
one task that the phenotype can perform, with larger circles representing
more complex tasks.
Arguments: phen_grid - a 2D array of strings representing binary numbers
kwargs:
palette - a seaborn palette (list of RGB values) indicating
how to color values. Will be converted to a continuous
colormap if necessary
denom - the maximum value of numbers in the grid (only used
if the grid actually contains numbers). This is used
to normalize values and use the full dynamic range of
the color pallete.
TODO: come up with way to represent organisms that don't do any tasks. | Below is the the instruction that describes the task:
### Input:
Plots phenotypes represented as concentric circles. Each circle represents
one task that the phenotype can perform, with larger circles representing
more complex tasks.
Arguments: phen_grid - a 2D array of strings representing binary numbers
kwargs:
palette - a seaborn palette (list of RGB values) indicating
how to color values. Will be converted to a continuous
colormap if necessary
denom - the maximum value of numbers in the grid (only used
if the grid actually contains numbers). This is used
to normalize values and use the full dynamic range of
the color pallete.
TODO: come up with way to represent organisms that don't do any tasks.
### Response:
def plot_phens_circles(phen_grid, **kwargs):
"""
Plots phenotypes represented as concentric circles. Each circle represents
one task that the phenotype can perform, with larger circles representing
more complex tasks.
Arguments: phen_grid - a 2D array of strings representing binary numbers
kwargs:
palette - a seaborn palette (list of RGB values) indicating
how to color values. Will be converted to a continuous
colormap if necessary
denom - the maximum value of numbers in the grid (only used
if the grid actually contains numbers). This is used
to normalize values and use the full dynamic range of
the color pallete.
TODO: come up with way to represent organisms that don't do any tasks.
"""
denom, palette = get_kwargs(phen_grid, kwargs, True)
n_tasks = len(palette)
grid = phen_grid
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] != -1 and int(grid[i][j], 2) != -1 and \
int(grid[i][j], 2) != 0:
first = True
b_ind = grid[i][j].find("b")
phen = grid[i][j][b_ind+1:]
for k in range(len(phen)):
if int(phen[k]) == 1:
plt.gca().add_patch(
plt.Circle(
(j, i), radius=(n_tasks - k)*.05,
lw=.1 if first else 0, ec="black",
facecolor=palette[k], zorder=2+k))
first = False
elif int(grid[i][j], 2) == 0:
plt.gca().add_patch(
plt.Circle(
(j, i), radius=(n_tasks)*.05,
lw=.1, ec="black",
facecolor="grey", zorder=2)) |
def _encode_timestamp(name, value, dummy0, dummy1):
"""Encode bson.timestamp.Timestamp."""
return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time) | Encode bson.timestamp.Timestamp. | Below is the the instruction that describes the task:
### Input:
Encode bson.timestamp.Timestamp.
### Response:
def _encode_timestamp(name, value, dummy0, dummy1):
"""Encode bson.timestamp.Timestamp."""
return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time) |
def OnRangeSelected(self, event):
"""Event handler for grid selection"""
# If grid editing is disabled then pyspread is in selection mode
if not self.grid.IsEditable():
selection = self.grid.selection
row, col, __ = self.grid.sel_mode_cursor
if (row, col) in selection:
self.grid.ClearSelection()
else:
self.grid.SetGridCursor(row, col)
post_command_event(self.grid, self.grid.SelectionMsg,
selection=selection) | Event handler for grid selection | Below is the the instruction that describes the task:
### Input:
Event handler for grid selection
### Response:
def OnRangeSelected(self, event):
"""Event handler for grid selection"""
# If grid editing is disabled then pyspread is in selection mode
if not self.grid.IsEditable():
selection = self.grid.selection
row, col, __ = self.grid.sel_mode_cursor
if (row, col) in selection:
self.grid.ClearSelection()
else:
self.grid.SetGridCursor(row, col)
post_command_event(self.grid, self.grid.SelectionMsg,
selection=selection) |
def _settings(self) -> Settings:
"""
Return the actual settings object, or create it if missing.
"""
if self.__dict__['__settings'] is None:
self.__dict__['__settings'] = Settings()
for file_path in self._get_files():
if file_path:
# noinspection PyProtectedMember
self.__dict__['__settings']._load(file_path)
return self.__dict__['__settings'] | Return the actual settings object, or create it if missing. | Below is the the instruction that describes the task:
### Input:
Return the actual settings object, or create it if missing.
### Response:
def _settings(self) -> Settings:
"""
Return the actual settings object, or create it if missing.
"""
if self.__dict__['__settings'] is None:
self.__dict__['__settings'] = Settings()
for file_path in self._get_files():
if file_path:
# noinspection PyProtectedMember
self.__dict__['__settings']._load(file_path)
return self.__dict__['__settings'] |
def _get_veths(net_data):
'''
Parse the nic setup inside lxc conf tuples back to a dictionary indexed by
network interface
'''
if isinstance(net_data, dict):
net_data = list(net_data.items())
nics = salt.utils.odict.OrderedDict()
current_nic = salt.utils.odict.OrderedDict()
no_names = True
for item in net_data:
if item and isinstance(item, dict):
item = list(item.items())[0]
# skip LXC configuration comment lines, and play only with tuples conf
elif isinstance(item, six.string_types):
# deal with reflection of commented lxc configs
sitem = item.strip()
if sitem.startswith('#') or not sitem:
continue
elif '=' in item:
item = tuple([a.strip() for a in item.split('=', 1)])
if item[0] == 'lxc.network.type':
current_nic = salt.utils.odict.OrderedDict()
if item[0] == 'lxc.network.name':
no_names = False
nics[item[1].strip()] = current_nic
current_nic[item[0].strip()] = item[1].strip()
# if not ethernet card name has been collected, assuming we collected
# data for eth0
if no_names and current_nic:
nics[DEFAULT_NIC] = current_nic
return nics | Parse the nic setup inside lxc conf tuples back to a dictionary indexed by
network interface | Below is the the instruction that describes the task:
### Input:
Parse the nic setup inside lxc conf tuples back to a dictionary indexed by
network interface
### Response:
def _get_veths(net_data):
'''
Parse the nic setup inside lxc conf tuples back to a dictionary indexed by
network interface
'''
if isinstance(net_data, dict):
net_data = list(net_data.items())
nics = salt.utils.odict.OrderedDict()
current_nic = salt.utils.odict.OrderedDict()
no_names = True
for item in net_data:
if item and isinstance(item, dict):
item = list(item.items())[0]
# skip LXC configuration comment lines, and play only with tuples conf
elif isinstance(item, six.string_types):
# deal with reflection of commented lxc configs
sitem = item.strip()
if sitem.startswith('#') or not sitem:
continue
elif '=' in item:
item = tuple([a.strip() for a in item.split('=', 1)])
if item[0] == 'lxc.network.type':
current_nic = salt.utils.odict.OrderedDict()
if item[0] == 'lxc.network.name':
no_names = False
nics[item[1].strip()] = current_nic
current_nic[item[0].strip()] = item[1].strip()
# if not ethernet card name has been collected, assuming we collected
# data for eth0
if no_names and current_nic:
nics[DEFAULT_NIC] = current_nic
return nics |
def exec_(argv): # never returns
"""Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
"""
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv) | Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns. | Below is the the instruction that describes the task:
### Input:
Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
### Response:
def exec_(argv): # never returns
"""Wrapper to os.execv which shows the command and runs any atexit handlers (for coverage's sake).
Like os.execv, this function never returns.
"""
# info('EXEC' + colorize(argv)) # TODO: debug logging by environment variable
# in python3, sys.exitfunc has gone away, and atexit._run_exitfuncs seems to be the only pubic-ish interface
# https://hg.python.org/cpython/file/3.4/Modules/atexitmodule.c#l289
import atexit
atexit._run_exitfuncs()
from os import execv
execv(argv[0], argv) |
def _to_autoassign(self):
"""Save :class:`~nmrstarlib.plsimulator.PeakList` into AutoAssign-formatted string.
:return: Peak list representation in AutoAssign format.
:rtype: :py:class:`str`
"""
autoassign_str = "#Index\t\t{}\t\tIntensity\t\tWorkbook\n".format(
"\t\t".join([str(i + 1) + "Dim" for i in range(len(self.labels))]))
for peak_idx, peak in enumerate(self):
dimensions_str = "\t\t".join([str(chemshift) for chemshift in peak.chemshifts_list])
autoassign_str += "{}\t\t{}\t\t{}\t\t{}\n".format(peak_idx+1, dimensions_str, 0, self.spectrum_name)
return autoassign_str | Save :class:`~nmrstarlib.plsimulator.PeakList` into AutoAssign-formatted string.
:return: Peak list representation in AutoAssign format.
:rtype: :py:class:`str` | Below is the the instruction that describes the task:
### Input:
Save :class:`~nmrstarlib.plsimulator.PeakList` into AutoAssign-formatted string.
:return: Peak list representation in AutoAssign format.
:rtype: :py:class:`str`
### Response:
def _to_autoassign(self):
"""Save :class:`~nmrstarlib.plsimulator.PeakList` into AutoAssign-formatted string.
:return: Peak list representation in AutoAssign format.
:rtype: :py:class:`str`
"""
autoassign_str = "#Index\t\t{}\t\tIntensity\t\tWorkbook\n".format(
"\t\t".join([str(i + 1) + "Dim" for i in range(len(self.labels))]))
for peak_idx, peak in enumerate(self):
dimensions_str = "\t\t".join([str(chemshift) for chemshift in peak.chemshifts_list])
autoassign_str += "{}\t\t{}\t\t{}\t\t{}\n".format(peak_idx+1, dimensions_str, 0, self.spectrum_name)
return autoassign_str |
def set(self, val):
"""Set the value"""
import time
now = time.time()
expected_value = []
new_val = {}
new_val['timestamp'] = now
if self._value != None:
new_val['last_value'] = self._value
expected_value = ['current_value', str(self._value)]
new_val['current_value'] = val
try:
self.db.put_attributes(self.id, new_val, expected_value=expected_value)
self.timestamp = new_val['timestamp']
except SDBResponseError, e:
if e.status == 409:
raise ValueError, "Sequence out of sync"
else:
raise | Set the value | Below is the the instruction that describes the task:
### Input:
Set the value
### Response:
def set(self, val):
"""Set the value"""
import time
now = time.time()
expected_value = []
new_val = {}
new_val['timestamp'] = now
if self._value != None:
new_val['last_value'] = self._value
expected_value = ['current_value', str(self._value)]
new_val['current_value'] = val
try:
self.db.put_attributes(self.id, new_val, expected_value=expected_value)
self.timestamp = new_val['timestamp']
except SDBResponseError, e:
if e.status == 409:
raise ValueError, "Sequence out of sync"
else:
raise |
def first_plugin_context(self):
"""Returns the context is associated with the first app this plugin was
registered on"""
# Note, because registrations are stored in a set, its not _really_
# the first one, but whichever one it sees first in the set.
first_spf_reg = next(iter(self.registrations))
return self.get_context_from_spf(first_spf_reg) | Returns the context is associated with the first app this plugin was
registered on | Below is the the instruction that describes the task:
### Input:
Returns the context is associated with the first app this plugin was
registered on
### Response:
def first_plugin_context(self):
"""Returns the context is associated with the first app this plugin was
registered on"""
# Note, because registrations are stored in a set, its not _really_
# the first one, but whichever one it sees first in the set.
first_spf_reg = next(iter(self.registrations))
return self.get_context_from_spf(first_spf_reg) |
def duration(label, stop_it=True, stop_at=None):
"""Returns duration in seconds for label"""
if label not in labels:
return None
if "duration" in labels[label]:
return Duration(labels[label]["duration"])
if stop_it:
return stop(label, at=stop_at)
else:
return None | Returns duration in seconds for label | Below is the the instruction that describes the task:
### Input:
Returns duration in seconds for label
### Response:
def duration(label, stop_it=True, stop_at=None):
"""Returns duration in seconds for label"""
if label not in labels:
return None
if "duration" in labels[label]:
return Duration(labels[label]["duration"])
if stop_it:
return stop(label, at=stop_at)
else:
return None |
def save(filename, rsrc):
"Save the resource to the source file"
s = pprint.pformat(rsrc)
## s = s.encode("utf8")
open(filename, "w").write(s) | Save the resource to the source file | Below is the the instruction that describes the task:
### Input:
Save the resource to the source file
### Response:
def save(filename, rsrc):
"Save the resource to the source file"
s = pprint.pformat(rsrc)
## s = s.encode("utf8")
open(filename, "w").write(s) |
def _compute_gridspec(self, layout):
"""
Computes the tallest and widest cell for each row and column
by examining the Layouts in the GridSpace. The GridSpec is then
instantiated and the LayoutPlots are configured with the
appropriate embedded layout_types. The first element of the
returned tuple is a dictionary of all the LayoutPlots indexed
by row and column. The second dictionary in the tuple supplies
the grid indicies needed to instantiate the axes for each
LayoutPlot.
"""
layout_items = layout.grid_items()
layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None
layouts = {}
col_widthratios, row_heightratios = {}, {}
for (r, c) in self.coords:
# Get view at layout position and wrap in AdjointLayout
_, view = layout_items.get((c, r) if self.transpose else (r, c), (None, None))
if isinstance(view, NdLayout):
raise SkipRendering("Cannot render NdLayout nested inside a Layout")
layout_view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])
layouts[(r, c)] = layout_view
# Compute shape of AdjointLayout element
layout_lens = {1:'Single', 2:'Dual', 3:'Triple'}
layout_type = layout_lens[len(layout_view)]
# Get aspects
main = layout_view.main
main = main.last if isinstance(main, HoloMap) else main
main_options = self.lookup_options(main, 'plot').options if main else {}
if main and not isinstance(main_options.get('aspect', 1), basestring):
main_aspect = np.nan if isinstance(main, Empty) else main_options.get('aspect', 1)
main_aspect = self.aspect_weight*main_aspect + 1-self.aspect_weight
else:
main_aspect = np.nan
if layout_type in ['Dual', 'Triple']:
el = layout_view.get('right', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)
width_ratios = [4, 4*ratio]
else:
width_ratios = [4, 1]
else:
width_ratios = [4]
inv_aspect = 1./main_aspect if main_aspect else np.NaN
if layout_type in ['Embedded Dual', 'Triple']:
el = layout_view.get('top', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)
height_ratios = [4*ratio, 4]
else:
height_ratios = [1, 4]
else:
height_ratios = [4]
if not isinstance(main_aspect, (basestring, type(None))):
width_ratios = [wratio * main_aspect for wratio in width_ratios]
height_ratios = [hratio * inv_aspect for hratio in height_ratios]
layout_shape = (len(width_ratios), len(height_ratios))
# For each row and column record the width and height ratios
# of the LayoutPlot with the most horizontal or vertical splits
# and largest aspect
prev_heights = row_heightratios.get(r, (0, []))
if layout_shape[1] > prev_heights[0]:
row_heightratios[r] = [layout_shape[1], prev_heights[1]]
row_heightratios[r][1].append(height_ratios)
prev_widths = col_widthratios.get(c, (0, []))
if layout_shape[0] > prev_widths[0]:
col_widthratios[c] = (layout_shape[0], prev_widths[1])
col_widthratios[c][1].append(width_ratios)
col_splits = [v[0] for __, v in sorted(col_widthratios.items())]
row_splits = [v[0] for ___, v in sorted(row_heightratios.items())]
widths = np.array([r for col in col_widthratios.values()
for ratios in col[1] for r in ratios])/4
wr_unnormalized = compute_ratios(col_widthratios, False)
hr_list = compute_ratios(row_heightratios)
wr_list = compute_ratios(col_widthratios)
# Compute the number of rows and cols
cols, rows = len(wr_list), len(hr_list)
wr_list = [r if np.isfinite(r) else 1 for r in wr_list]
hr_list = [r if np.isfinite(r) else 1 for r in hr_list]
width = sum([r if np.isfinite(r) else 1 for r in wr_list])
yscale = width/sum([(1/v)*4 if np.isfinite(v) else 4 for v in wr_unnormalized])
if self.absolute_scaling:
width = width*np.nanmax(widths)
xinches, yinches = None, None
if not isinstance(self.fig_inches, (tuple, list)):
xinches = self.fig_inches * width
yinches = xinches/yscale
elif self.fig_inches[0] is None:
xinches = self.fig_inches[1] * yscale
yinches = self.fig_inches[1]
elif self.fig_inches[1] is None:
xinches = self.fig_inches[0]
yinches = self.fig_inches[0] / yscale
if xinches and yinches:
self.handles['fig'].set_size_inches([xinches, yinches])
self.gs = gridspec.GridSpec(rows, cols,
width_ratios=wr_list,
height_ratios=hr_list,
wspace=self.hspace,
hspace=self.vspace)
# Situate all the Layouts in the grid and compute the gridspec
# indices for all the axes required by each LayoutPlot.
gidx = 0
layout_count = 0
tight = self.tight
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
frame_ranges = self.compute_ranges(layout, None, None)
keys = self.keys[:1] if self.dynamic else self.keys
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in keys])
layout_subplots, layout_axes = {}, {}
for r, c in self.coords:
# Compute the layout type from shape
wsplits = col_splits[c]
hsplits = row_splits[r]
if (wsplits, hsplits) == (1,1):
layout_type = 'Single'
elif (wsplits, hsplits) == (2,1):
layout_type = 'Dual'
elif (wsplits, hsplits) == (1,2):
layout_type = 'Embedded Dual'
elif (wsplits, hsplits) == (2,2):
layout_type = 'Triple'
# Get the AdjoinLayout at the specified coordinate
view = layouts[(r, c)]
positions = AdjointLayoutPlot.layout_dict[layout_type]
# Create temporary subplots to get projections types
# to create the correct subaxes for all plots in the layout
_, _, projs = self._create_subplots(layouts[(r, c)], positions,
None, frame_ranges, create=False)
gidx, gsinds = self.grid_situate(gidx, layout_type, cols)
layout_key, _ = layout_items.get((r, c), (None, None))
if isinstance(layout, NdLayout) and layout_key:
layout_dimensions = OrderedDict(zip(layout_dimensions, layout_key))
# Generate the axes and create the subplots with the appropriate
# axis objects, handling any Empty objects.
obj = layouts[(r, c)]
empty = isinstance(obj.main, Empty)
if view.main is None:
continue
elif empty:
obj = AdjointLayout([])
elif not view.traverse(lambda x: x, [Element]):
self.param.warning('%s is empty, skipping subplot.' % obj.main)
continue
elif self.transpose:
layout_count = (c*self.rows+(r+1))
else:
layout_count += 1
subaxes = [plt.subplot(self.gs[ind], projection=proj)
for ind, proj in zip(gsinds, projs)]
subplot_data = self._create_subplots(obj, positions,
layout_dimensions, frame_ranges,
dict(zip(positions, subaxes)),
num=0 if empty else layout_count)
subplots, adjoint_layout, _ = subplot_data
layout_axes[(r, c)] = subaxes
# Generate the AdjointLayoutsPlot which will coordinate
# plotting of AdjointLayouts in the larger grid
plotopts = self.lookup_options(view, 'plot').options
layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subaxes, subplots,
fig=self.handles['fig'], **plotopts)
layout_subplots[(r, c)] = layout_plot
tight = not any(type(p) is GridPlot for p in layout_plot.subplots.values()) and tight
if layout_key:
collapsed_layout[layout_key] = adjoint_layout
# Apply tight layout if enabled and incompatible
# GridPlot isn't present.
if tight:
if isinstance(self.tight_padding, (tuple, list)):
wpad, hpad = self.tight_padding
padding = dict(w_pad=wpad, h_pad=hpad)
else:
padding = dict(w_pad=self.tight_padding, h_pad=self.tight_padding)
self.gs.tight_layout(self.handles['fig'], rect=self.fig_bounds, **padding)
return layout_subplots, layout_axes, collapsed_layout | Computes the tallest and widest cell for each row and column
by examining the Layouts in the GridSpace. The GridSpec is then
instantiated and the LayoutPlots are configured with the
appropriate embedded layout_types. The first element of the
returned tuple is a dictionary of all the LayoutPlots indexed
by row and column. The second dictionary in the tuple supplies
the grid indicies needed to instantiate the axes for each
LayoutPlot. | Below is the the instruction that describes the task:
### Input:
Computes the tallest and widest cell for each row and column
by examining the Layouts in the GridSpace. The GridSpec is then
instantiated and the LayoutPlots are configured with the
appropriate embedded layout_types. The first element of the
returned tuple is a dictionary of all the LayoutPlots indexed
by row and column. The second dictionary in the tuple supplies
the grid indicies needed to instantiate the axes for each
LayoutPlot.
### Response:
def _compute_gridspec(self, layout):
"""
Computes the tallest and widest cell for each row and column
by examining the Layouts in the GridSpace. The GridSpec is then
instantiated and the LayoutPlots are configured with the
appropriate embedded layout_types. The first element of the
returned tuple is a dictionary of all the LayoutPlots indexed
by row and column. The second dictionary in the tuple supplies
the grid indicies needed to instantiate the axes for each
LayoutPlot.
"""
layout_items = layout.grid_items()
layout_dimensions = layout.kdims if isinstance(layout, NdLayout) else None
layouts = {}
col_widthratios, row_heightratios = {}, {}
for (r, c) in self.coords:
# Get view at layout position and wrap in AdjointLayout
_, view = layout_items.get((c, r) if self.transpose else (r, c), (None, None))
if isinstance(view, NdLayout):
raise SkipRendering("Cannot render NdLayout nested inside a Layout")
layout_view = view if isinstance(view, AdjointLayout) else AdjointLayout([view])
layouts[(r, c)] = layout_view
# Compute shape of AdjointLayout element
layout_lens = {1:'Single', 2:'Dual', 3:'Triple'}
layout_type = layout_lens[len(layout_view)]
# Get aspects
main = layout_view.main
main = main.last if isinstance(main, HoloMap) else main
main_options = self.lookup_options(main, 'plot').options if main else {}
if main and not isinstance(main_options.get('aspect', 1), basestring):
main_aspect = np.nan if isinstance(main, Empty) else main_options.get('aspect', 1)
main_aspect = self.aspect_weight*main_aspect + 1-self.aspect_weight
else:
main_aspect = np.nan
if layout_type in ['Dual', 'Triple']:
el = layout_view.get('right', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)
width_ratios = [4, 4*ratio]
else:
width_ratios = [4, 1]
else:
width_ratios = [4]
inv_aspect = 1./main_aspect if main_aspect else np.NaN
if layout_type in ['Embedded Dual', 'Triple']:
el = layout_view.get('top', None)
eltype = type(el)
if el and eltype in MPLPlot.sideplots:
plot_type = MPLPlot.sideplots[type(el)]
ratio = 0.6*(plot_type.subplot_size+plot_type.border_size)
height_ratios = [4*ratio, 4]
else:
height_ratios = [1, 4]
else:
height_ratios = [4]
if not isinstance(main_aspect, (basestring, type(None))):
width_ratios = [wratio * main_aspect for wratio in width_ratios]
height_ratios = [hratio * inv_aspect for hratio in height_ratios]
layout_shape = (len(width_ratios), len(height_ratios))
# For each row and column record the width and height ratios
# of the LayoutPlot with the most horizontal or vertical splits
# and largest aspect
prev_heights = row_heightratios.get(r, (0, []))
if layout_shape[1] > prev_heights[0]:
row_heightratios[r] = [layout_shape[1], prev_heights[1]]
row_heightratios[r][1].append(height_ratios)
prev_widths = col_widthratios.get(c, (0, []))
if layout_shape[0] > prev_widths[0]:
col_widthratios[c] = (layout_shape[0], prev_widths[1])
col_widthratios[c][1].append(width_ratios)
col_splits = [v[0] for __, v in sorted(col_widthratios.items())]
row_splits = [v[0] for ___, v in sorted(row_heightratios.items())]
widths = np.array([r for col in col_widthratios.values()
for ratios in col[1] for r in ratios])/4
wr_unnormalized = compute_ratios(col_widthratios, False)
hr_list = compute_ratios(row_heightratios)
wr_list = compute_ratios(col_widthratios)
# Compute the number of rows and cols
cols, rows = len(wr_list), len(hr_list)
wr_list = [r if np.isfinite(r) else 1 for r in wr_list]
hr_list = [r if np.isfinite(r) else 1 for r in hr_list]
width = sum([r if np.isfinite(r) else 1 for r in wr_list])
yscale = width/sum([(1/v)*4 if np.isfinite(v) else 4 for v in wr_unnormalized])
if self.absolute_scaling:
width = width*np.nanmax(widths)
xinches, yinches = None, None
if not isinstance(self.fig_inches, (tuple, list)):
xinches = self.fig_inches * width
yinches = xinches/yscale
elif self.fig_inches[0] is None:
xinches = self.fig_inches[1] * yscale
yinches = self.fig_inches[1]
elif self.fig_inches[1] is None:
xinches = self.fig_inches[0]
yinches = self.fig_inches[0] / yscale
if xinches and yinches:
self.handles['fig'].set_size_inches([xinches, yinches])
self.gs = gridspec.GridSpec(rows, cols,
width_ratios=wr_list,
height_ratios=hr_list,
wspace=self.hspace,
hspace=self.vspace)
# Situate all the Layouts in the grid and compute the gridspec
# indices for all the axes required by each LayoutPlot.
gidx = 0
layout_count = 0
tight = self.tight
collapsed_layout = layout.clone(shared_data=False, id=layout.id)
frame_ranges = self.compute_ranges(layout, None, None)
keys = self.keys[:1] if self.dynamic else self.keys
frame_ranges = OrderedDict([(key, self.compute_ranges(layout, key, frame_ranges))
for key in keys])
layout_subplots, layout_axes = {}, {}
for r, c in self.coords:
# Compute the layout type from shape
wsplits = col_splits[c]
hsplits = row_splits[r]
if (wsplits, hsplits) == (1,1):
layout_type = 'Single'
elif (wsplits, hsplits) == (2,1):
layout_type = 'Dual'
elif (wsplits, hsplits) == (1,2):
layout_type = 'Embedded Dual'
elif (wsplits, hsplits) == (2,2):
layout_type = 'Triple'
# Get the AdjoinLayout at the specified coordinate
view = layouts[(r, c)]
positions = AdjointLayoutPlot.layout_dict[layout_type]
# Create temporary subplots to get projections types
# to create the correct subaxes for all plots in the layout
_, _, projs = self._create_subplots(layouts[(r, c)], positions,
None, frame_ranges, create=False)
gidx, gsinds = self.grid_situate(gidx, layout_type, cols)
layout_key, _ = layout_items.get((r, c), (None, None))
if isinstance(layout, NdLayout) and layout_key:
layout_dimensions = OrderedDict(zip(layout_dimensions, layout_key))
# Generate the axes and create the subplots with the appropriate
# axis objects, handling any Empty objects.
obj = layouts[(r, c)]
empty = isinstance(obj.main, Empty)
if view.main is None:
continue
elif empty:
obj = AdjointLayout([])
elif not view.traverse(lambda x: x, [Element]):
self.param.warning('%s is empty, skipping subplot.' % obj.main)
continue
elif self.transpose:
layout_count = (c*self.rows+(r+1))
else:
layout_count += 1
subaxes = [plt.subplot(self.gs[ind], projection=proj)
for ind, proj in zip(gsinds, projs)]
subplot_data = self._create_subplots(obj, positions,
layout_dimensions, frame_ranges,
dict(zip(positions, subaxes)),
num=0 if empty else layout_count)
subplots, adjoint_layout, _ = subplot_data
layout_axes[(r, c)] = subaxes
# Generate the AdjointLayoutsPlot which will coordinate
# plotting of AdjointLayouts in the larger grid
plotopts = self.lookup_options(view, 'plot').options
layout_plot = AdjointLayoutPlot(adjoint_layout, layout_type, subaxes, subplots,
fig=self.handles['fig'], **plotopts)
layout_subplots[(r, c)] = layout_plot
tight = not any(type(p) is GridPlot for p in layout_plot.subplots.values()) and tight
if layout_key:
collapsed_layout[layout_key] = adjoint_layout
# Apply tight layout if enabled and incompatible
# GridPlot isn't present.
if tight:
if isinstance(self.tight_padding, (tuple, list)):
wpad, hpad = self.tight_padding
padding = dict(w_pad=wpad, h_pad=hpad)
else:
padding = dict(w_pad=self.tight_padding, h_pad=self.tight_padding)
self.gs.tight_layout(self.handles['fig'], rect=self.fig_bounds, **padding)
return layout_subplots, layout_axes, collapsed_layout |
def get_under_bridge(self):
"""Return element closest to the adsorbate in the subsurface layer"""
C0 = self.B[-1:] * (3, 3, 1)
ads_pos = C0.positions[4]
C = self.get_subsurface_layer() * (3, 3, 1)
dis = self.B.cell[0][0] * 2
ret = None
for ele in C:
new_dis = np.linalg.norm(ads_pos - ele.position)
if new_dis < dis:
dis = new_dis
ret = ele.symbol
return ret | Return element closest to the adsorbate in the subsurface layer | Below is the the instruction that describes the task:
### Input:
Return element closest to the adsorbate in the subsurface layer
### Response:
def get_under_bridge(self):
"""Return element closest to the adsorbate in the subsurface layer"""
C0 = self.B[-1:] * (3, 3, 1)
ads_pos = C0.positions[4]
C = self.get_subsurface_layer() * (3, 3, 1)
dis = self.B.cell[0][0] * 2
ret = None
for ele in C:
new_dis = np.linalg.norm(ads_pos - ele.position)
if new_dis < dis:
dis = new_dis
ret = ele.symbol
return ret |
def addAttachment(self, filepath):
"""Upload attachment to a workitem
:param filepath: the attachment file path
:return: the :class:`rtcclient.models.Attachment` object
:rtype: rtcclient.models.Attachment
"""
proj_id = self.contextId
fa = self.rtc_obj.getFiledAgainst(self.filedAgainst,
projectarea_id=proj_id)
fa_id = fa.url.split("/")[-1]
headers = copy.deepcopy(self.rtc_obj.headers)
if headers.__contains__("Content-Type"):
headers.__delitem__("Content-Type")
filename = os.path.basename(filepath)
fileh = open(filepath, "rb")
files = {"attach": (filename, fileh, "application/octet-stream")}
params = {"projectId": proj_id,
"multiple": "true",
"category": fa_id}
req_url = "".join([self.rtc_obj.url,
"/service/com.ibm.team.workitem.service.",
"internal.rest.IAttachmentRestService/"])
resp = self.post(req_url,
verify=False,
headers=headers,
proxies=self.rtc_obj.proxies,
params=params,
files=files)
raw_data = xmltodict.parse(resp.content)
json_body = json.loads(raw_data["html"]["body"]["textarea"])
attachment_info = json_body["files"][0]
return self._add_attachment_link(attachment_info) | Upload attachment to a workitem
:param filepath: the attachment file path
:return: the :class:`rtcclient.models.Attachment` object
:rtype: rtcclient.models.Attachment | Below is the the instruction that describes the task:
### Input:
Upload attachment to a workitem
:param filepath: the attachment file path
:return: the :class:`rtcclient.models.Attachment` object
:rtype: rtcclient.models.Attachment
### Response:
def addAttachment(self, filepath):
"""Upload attachment to a workitem
:param filepath: the attachment file path
:return: the :class:`rtcclient.models.Attachment` object
:rtype: rtcclient.models.Attachment
"""
proj_id = self.contextId
fa = self.rtc_obj.getFiledAgainst(self.filedAgainst,
projectarea_id=proj_id)
fa_id = fa.url.split("/")[-1]
headers = copy.deepcopy(self.rtc_obj.headers)
if headers.__contains__("Content-Type"):
headers.__delitem__("Content-Type")
filename = os.path.basename(filepath)
fileh = open(filepath, "rb")
files = {"attach": (filename, fileh, "application/octet-stream")}
params = {"projectId": proj_id,
"multiple": "true",
"category": fa_id}
req_url = "".join([self.rtc_obj.url,
"/service/com.ibm.team.workitem.service.",
"internal.rest.IAttachmentRestService/"])
resp = self.post(req_url,
verify=False,
headers=headers,
proxies=self.rtc_obj.proxies,
params=params,
files=files)
raw_data = xmltodict.parse(resp.content)
json_body = json.loads(raw_data["html"]["body"]["textarea"])
attachment_info = json_body["files"][0]
return self._add_attachment_link(attachment_info) |
def _singleton_method(name):
"""Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
"""
# Disable pylint msg W0612, because a bunch of variables look unused, but
# they're accessed via locals().
# pylint: disable=W0612
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
import inspect
meth = getattr(coverage, name)
args, varargs, kw, defaults = inspect.getargspec(meth)
argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
docstring = meth.__doc__
wrapper.__doc__ = ("""\
A first-use-singleton wrapper around coverage.%(name)s.
This wrapper is provided for backward compatibility with legacy code.
New code should use coverage.%(name)s directly.
%(name)s%(argspec)s:
%(docstring)s
""" % locals()
)
return wrapper | Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called. | Below is the the instruction that describes the task:
### Input:
Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
### Response:
def _singleton_method(name):
"""Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
"""
# Disable pylint msg W0612, because a bunch of variables look unused, but
# they're accessed via locals().
# pylint: disable=W0612
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
import inspect
meth = getattr(coverage, name)
args, varargs, kw, defaults = inspect.getargspec(meth)
argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
docstring = meth.__doc__
wrapper.__doc__ = ("""\
A first-use-singleton wrapper around coverage.%(name)s.
This wrapper is provided for backward compatibility with legacy code.
New code should use coverage.%(name)s directly.
%(name)s%(argspec)s:
%(docstring)s
""" % locals()
)
return wrapper |
def _find_files(dl_paths, publisher, url_dict):
"""Find files corresponding to urls."""
if publisher == 'cnn':
top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories')
elif publisher == 'dm':
top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories')
else:
logging.fatal('Unsupported publisher: %s', publisher)
files = tf.io.gfile.listdir(top_dir)
ret_files = []
for p in files:
basename = os.path.basename(p)
if basename[0:basename.find('.story')] in url_dict:
ret_files.append(os.path.join(top_dir, p))
return ret_files | Find files corresponding to urls. | Below is the the instruction that describes the task:
### Input:
Find files corresponding to urls.
### Response:
def _find_files(dl_paths, publisher, url_dict):
"""Find files corresponding to urls."""
if publisher == 'cnn':
top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories')
elif publisher == 'dm':
top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories')
else:
logging.fatal('Unsupported publisher: %s', publisher)
files = tf.io.gfile.listdir(top_dir)
ret_files = []
for p in files:
basename = os.path.basename(p)
if basename[0:basename.find('.story')] in url_dict:
ret_files.append(os.path.join(top_dir, p))
return ret_files |
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render the HTML for the browsable API representation.
"""
self.accepted_media_type = accepted_media_type or ''
self.renderer_context = renderer_context or {}
template = loader.get_template(self.template)
context = self.get_context(data, accepted_media_type, renderer_context)
ret = template_render(template, context, request=renderer_context['request'])
# Munge DELETE Response code to allow us to return content
# (Do this *after* we've rendered the template so that we include
# the normal deletion response code in the output)
response = renderer_context['response']
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_200_OK
return ret | Render the HTML for the browsable API representation. | Below is the the instruction that describes the task:
### Input:
Render the HTML for the browsable API representation.
### Response:
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render the HTML for the browsable API representation.
"""
self.accepted_media_type = accepted_media_type or ''
self.renderer_context = renderer_context or {}
template = loader.get_template(self.template)
context = self.get_context(data, accepted_media_type, renderer_context)
ret = template_render(template, context, request=renderer_context['request'])
# Munge DELETE Response code to allow us to return content
# (Do this *after* we've rendered the template so that we include
# the normal deletion response code in the output)
response = renderer_context['response']
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_200_OK
return ret |
def get_mongoadmins(self):
""" Returns a list of all mongoadmin implementations for the site """
apps = []
for app_name in settings.INSTALLED_APPS:
mongoadmin = "{0}.mongoadmin".format(app_name)
try:
module = import_module(mongoadmin)
except ImportError as e:
if str(e).startswith("No module named"):
continue
raise e
app_store = AppStore(module)
apps.append(dict(
app_name=app_name,
obj=app_store
))
return apps | Returns a list of all mongoadmin implementations for the site | Below is the the instruction that describes the task:
### Input:
Returns a list of all mongoadmin implementations for the site
### Response:
def get_mongoadmins(self):
""" Returns a list of all mongoadmin implementations for the site """
apps = []
for app_name in settings.INSTALLED_APPS:
mongoadmin = "{0}.mongoadmin".format(app_name)
try:
module = import_module(mongoadmin)
except ImportError as e:
if str(e).startswith("No module named"):
continue
raise e
app_store = AppStore(module)
apps.append(dict(
app_name=app_name,
obj=app_store
))
return apps |
def do_quit(self, arg):
"""
quit - close the debugging session
q - close the debugging session
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.confirm_quit:
count = self.debug.get_debugee_count()
if count > 0:
if count == 1:
msg = "There's a program still running."
else:
msg = "There are %s programs still running." % count
if not self.ask_user(msg):
return False
self.debuggerExit = True
return True | quit - close the debugging session
q - close the debugging session | Below is the the instruction that describes the task:
### Input:
quit - close the debugging session
q - close the debugging session
### Response:
def do_quit(self, arg):
"""
quit - close the debugging session
q - close the debugging session
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.confirm_quit:
count = self.debug.get_debugee_count()
if count > 0:
if count == 1:
msg = "There's a program still running."
else:
msg = "There are %s programs still running." % count
if not self.ask_user(msg):
return False
self.debuggerExit = True
return True |
def get_variable_value_for_variation(self, variable, variation):
""" Get the variable value for the given variation.
Args:
variable: The Variable for which we are getting the value.
variation: The Variation for which we are getting the variable value.
Returns:
The variable value or None if any of the inputs are invalid.
"""
if not variable or not variation:
return None
if variation.id not in self.variation_variable_usage_map:
self.logger.error('Variation with ID "%s" is not in the datafile.' % variation.id)
return None
# Get all variable usages for the given variation
variable_usages = self.variation_variable_usage_map[variation.id]
# Find usage in given variation
variable_usage = None
if variable_usages:
variable_usage = variable_usages.get(variable.id)
if variable_usage:
variable_value = variable_usage.value
self.logger.info('Value for variable "%s" for variation "%s" is "%s".' % (
variable.key,
variation.key,
variable_value
))
else:
variable_value = variable.defaultValue
self.logger.info('Variable "%s" is not used in variation "%s". Assigning default value "%s".' % (
variable.key,
variation.key,
variable_value
))
return variable_value | Get the variable value for the given variation.
Args:
variable: The Variable for which we are getting the value.
variation: The Variation for which we are getting the variable value.
Returns:
The variable value or None if any of the inputs are invalid. | Below is the the instruction that describes the task:
### Input:
Get the variable value for the given variation.
Args:
variable: The Variable for which we are getting the value.
variation: The Variation for which we are getting the variable value.
Returns:
The variable value or None if any of the inputs are invalid.
### Response:
def get_variable_value_for_variation(self, variable, variation):
""" Get the variable value for the given variation.
Args:
variable: The Variable for which we are getting the value.
variation: The Variation for which we are getting the variable value.
Returns:
The variable value or None if any of the inputs are invalid.
"""
if not variable or not variation:
return None
if variation.id not in self.variation_variable_usage_map:
self.logger.error('Variation with ID "%s" is not in the datafile.' % variation.id)
return None
# Get all variable usages for the given variation
variable_usages = self.variation_variable_usage_map[variation.id]
# Find usage in given variation
variable_usage = None
if variable_usages:
variable_usage = variable_usages.get(variable.id)
if variable_usage:
variable_value = variable_usage.value
self.logger.info('Value for variable "%s" for variation "%s" is "%s".' % (
variable.key,
variation.key,
variable_value
))
else:
variable_value = variable.defaultValue
self.logger.info('Variable "%s" is not used in variation "%s". Assigning default value "%s".' % (
variable.key,
variation.key,
variable_value
))
return variable_value |
def config_reader(self, config_level=None):
"""
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which file you wish to read to prevent reading multiple files.
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used."""
files = None
if config_level is None:
files = [self._get_config_path(f) for f in self.config_level]
else:
files = [self._get_config_path(config_level)]
return GitConfigParser(files, read_only=True) | :return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which file you wish to read to prevent reading multiple files.
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used. | Below is the the instruction that describes the task:
### Input:
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which file you wish to read to prevent reading multiple files.
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used.
### Response:
def config_reader(self, config_level=None):
"""
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which file you wish to read to prevent reading multiple files.
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used."""
files = None
if config_level is None:
files = [self._get_config_path(f) for f in self.config_level]
else:
files = [self._get_config_path(config_level)]
return GitConfigParser(files, read_only=True) |
def _readPermutationsFile(self, filename, modelDescription):
"""
Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate HTMPredictionModel results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None
"""
# Open and execute the permutations file
vars = {}
permFile = execfile(filename, globals(), vars)
# Read in misc info.
self._reportKeys = vars.get('report', [])
self._filterFunc = vars.get('permutationFilter', None)
self._dummyModelParamsFunc = vars.get('dummyModelParams', None)
self._predictedField = None # default
self._predictedFieldEncoder = None # default
self._fixedFields = None # default
# The fastSwarm variable, if present, contains the params from a best
# model from a previous swarm. If present, use info from that to seed
# a fast swarm
self._fastSwarmModelParams = vars.get('fastSwarmModelParams', None)
if self._fastSwarmModelParams is not None:
encoders = self._fastSwarmModelParams['structuredParams']['modelParams']\
['sensorParams']['encoders']
self._fixedFields = []
for fieldName in encoders:
if encoders[fieldName] is not None:
self._fixedFields.append(fieldName)
if 'fixedFields' in vars:
self._fixedFields = vars['fixedFields']
# Get min number of particles per swarm from either permutations file or
# config.
self._minParticlesPerSwarm = vars.get('minParticlesPerSwarm')
if self._minParticlesPerSwarm == None:
self._minParticlesPerSwarm = Configuration.get(
'nupic.hypersearch.minParticlesPerSwarm')
self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)
# Enable logic to kill off speculative swarms when an earlier sprint
# has found that it contains poorly performing field combination?
self._killUselessSwarms = vars.get('killUselessSwarms', True)
# The caller can request that the predicted field ALWAYS be included ("yes")
# or optionally include ("auto"). The setting of "no" is N/A and ignored
# because in that case the encoder for the predicted field will not even
# be present in the permutations file.
# When set to "yes", this will force the first sprint to try the predicted
# field only (the legacy mode of swarming).
# When set to "auto", the first sprint tries all possible fields (one at a
# time) in the first sprint.
self._inputPredictedField = vars.get("inputPredictedField", "yes")
# Try all possible 3-field combinations? Normally, we start with the best
# 2-field combination as a base. When this flag is set though, we try
# all possible 3-field combinations which takes longer but can find a
# better model.
self._tryAll3FieldCombinations = vars.get('tryAll3FieldCombinations', False)
# Always include timestamp fields in the 3-field swarms?
# This is a less compute intensive version of tryAll3FieldCombinations.
# Instead of trying ALL possible 3 field combinations, it just insures
# that the timestamp fields (dayOfWeek, timeOfDay, weekend) are never left
# out when generating the 3-field swarms.
self._tryAll3FieldCombinationsWTimestamps = vars.get(
'tryAll3FieldCombinationsWTimestamps', False)
# Allow the permutations file to override minFieldContribution. This would
# be set to a negative number for large swarms so that you don't disqualify
# a field in an early sprint just because it did poorly there. Sometimes,
# a field that did poorly in an early sprint could help accuracy when
# added in a later sprint
minFieldContribution = vars.get('minFieldContribution', None)
if minFieldContribution is not None:
self._minFieldContribution = minFieldContribution
# Allow the permutations file to override maxBranching.
maxBranching = vars.get('maxFieldBranching', None)
if maxBranching is not None:
self._maxBranching = maxBranching
# Read in the optimization info.
if 'maximize' in vars:
self._optimizeKey = vars['maximize']
self._maximize = True
elif 'minimize' in vars:
self._optimizeKey = vars['minimize']
self._maximize = False
else:
raise RuntimeError("Permutations file '%s' does not include a maximize"
" or minimize metric.")
# The permutations file is the new location for maxModels. The old location,
# in the jobParams is deprecated.
maxModels = vars.get('maxModels')
if maxModels is not None:
if self._maxModels is None:
self._maxModels = maxModels
else:
raise RuntimeError('It is an error to specify maxModels both in the job'
' params AND in the permutations file.')
# Figure out if what kind of search this is:
#
# If it's a temporal prediction search:
# the first sprint has 1 swarm, with just the predicted field
# elif it's a spatial prediction search:
# the first sprint has N swarms, each with predicted field + one
# other field.
# elif it's a classification search:
# the first sprint has N swarms, each with 1 field
inferenceType = modelDescription['modelParams']['inferenceType']
if not InferenceType.validate(inferenceType):
raise ValueError("Invalid inference type %s" %inferenceType)
if inferenceType in [InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep]:
# If it does not have a separate encoder for the predicted field that
# goes to the classifier, it is a legacy multi-step network
classifierOnlyEncoder = None
for encoder in modelDescription["modelParams"]["sensorParams"]\
["encoders"].values():
if encoder.get("classifierOnly", False) \
and encoder["fieldname"] == vars.get('predictedField', None):
classifierOnlyEncoder = encoder
break
if classifierOnlyEncoder is None or self._inputPredictedField=="yes":
# If we don't have a separate encoder for the classifier (legacy
# MultiStep) or the caller explicitly wants to include the predicted
# field, then use the legacy temporal search methodology.
self._searchType = HsSearchType.legacyTemporal
else:
self._searchType = HsSearchType.temporal
elif inferenceType in [InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly]:
self._searchType = HsSearchType.legacyTemporal
elif inferenceType in (InferenceType.TemporalClassification,
InferenceType.NontemporalClassification):
self._searchType = HsSearchType.classification
else:
raise RuntimeError("Unsupported inference type: %s" % inferenceType)
# Get the predicted field. Note that even classification experiments
# have a "predicted" field - which is the field that contains the
# classification value.
self._predictedField = vars.get('predictedField', None)
if self._predictedField is None:
raise RuntimeError("Permutations file '%s' does not have the required"
" 'predictedField' variable" % filename)
# Read in and validate the permutations dict
if 'permutations' not in vars:
raise RuntimeError("Permutations file '%s' does not define permutations" % filename)
if not isinstance(vars['permutations'], dict):
raise RuntimeError("Permutations file '%s' defines a permutations variable "
"but it is not a dict")
self._encoderNames = []
self._permutations = vars['permutations']
self._flattenedPermutations = dict()
def _flattenPermutations(value, keys):
if ':' in keys[-1]:
raise RuntimeError("The permutation variable '%s' contains a ':' "
"character, which is not allowed.")
flatKey = _flattenKeys(keys)
if isinstance(value, PermuteEncoder):
self._encoderNames.append(flatKey)
# If this is the encoder for the predicted field, save its name.
if value.fieldName == self._predictedField:
self._predictedFieldEncoder = flatKey
# Store the flattened representations of the variables within the
# encoder.
for encKey, encValue in value.kwArgs.iteritems():
if isinstance(encValue, PermuteVariable):
self._flattenedPermutations['%s:%s' % (flatKey, encKey)] = encValue
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[flatKey] = value
else:
if isinstance(value, PermuteVariable):
self._flattenedPermutations[key] = value
rApply(self._permutations, _flattenPermutations) | Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate HTMPredictionModel results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None | Below is the the instruction that describes the task:
### Input:
Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate HTMPredictionModel results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None
### Response:
def _readPermutationsFile(self, filename, modelDescription):
"""
Read the permutations file and initialize the following member variables:
_predictedField: field name of the field we are trying to
predict
_permutations: Dict containing the full permutations dictionary.
_flattenedPermutations: Dict containing the flattened version of
_permutations. The keys leading to the value in the dict are joined
with a period to create the new key and permute variables within
encoders are pulled out of the encoder.
_encoderNames: keys from self._permutations of only the encoder
variables.
_reportKeys: The 'report' list from the permutations file.
This is a list of the items from each experiment's pickled
results file that should be included in the final report. The
format of each item is a string of key names separated by colons,
each key being one level deeper into the experiment results
dict. For example, 'key1:key2'.
_filterFunc: a user-supplied function that can be used to
filter out specific permutation combinations.
_optimizeKey: which report key to optimize for
_maximize: True if we should try and maximize the optimizeKey
metric. False if we should minimize it.
_dummyModelParamsFunc: a user-supplied function that can be used to
artificially generate HTMPredictionModel results. When supplied,
the model is not actually run through the OPF, but instead is run
through a "Dummy Model" (nupic.swarming.ModelRunner.
OPFDummyModelRunner). This function returns the params dict used
to control various options in the dummy model (the returned metric,
the execution time, etc.). This is used for hypersearch algorithm
development.
Parameters:
---------------------------------------------------------
filename: Name of permutations file
retval: None
"""
# Open and execute the permutations file
vars = {}
permFile = execfile(filename, globals(), vars)
# Read in misc info.
self._reportKeys = vars.get('report', [])
self._filterFunc = vars.get('permutationFilter', None)
self._dummyModelParamsFunc = vars.get('dummyModelParams', None)
self._predictedField = None # default
self._predictedFieldEncoder = None # default
self._fixedFields = None # default
# The fastSwarm variable, if present, contains the params from a best
# model from a previous swarm. If present, use info from that to seed
# a fast swarm
self._fastSwarmModelParams = vars.get('fastSwarmModelParams', None)
if self._fastSwarmModelParams is not None:
encoders = self._fastSwarmModelParams['structuredParams']['modelParams']\
['sensorParams']['encoders']
self._fixedFields = []
for fieldName in encoders:
if encoders[fieldName] is not None:
self._fixedFields.append(fieldName)
if 'fixedFields' in vars:
self._fixedFields = vars['fixedFields']
# Get min number of particles per swarm from either permutations file or
# config.
self._minParticlesPerSwarm = vars.get('minParticlesPerSwarm')
if self._minParticlesPerSwarm == None:
self._minParticlesPerSwarm = Configuration.get(
'nupic.hypersearch.minParticlesPerSwarm')
self._minParticlesPerSwarm = int(self._minParticlesPerSwarm)
# Enable logic to kill off speculative swarms when an earlier sprint
# has found that it contains poorly performing field combination?
self._killUselessSwarms = vars.get('killUselessSwarms', True)
# The caller can request that the predicted field ALWAYS be included ("yes")
# or optionally include ("auto"). The setting of "no" is N/A and ignored
# because in that case the encoder for the predicted field will not even
# be present in the permutations file.
# When set to "yes", this will force the first sprint to try the predicted
# field only (the legacy mode of swarming).
# When set to "auto", the first sprint tries all possible fields (one at a
# time) in the first sprint.
self._inputPredictedField = vars.get("inputPredictedField", "yes")
# Try all possible 3-field combinations? Normally, we start with the best
# 2-field combination as a base. When this flag is set though, we try
# all possible 3-field combinations which takes longer but can find a
# better model.
self._tryAll3FieldCombinations = vars.get('tryAll3FieldCombinations', False)
# Always include timestamp fields in the 3-field swarms?
# This is a less compute intensive version of tryAll3FieldCombinations.
# Instead of trying ALL possible 3 field combinations, it just insures
# that the timestamp fields (dayOfWeek, timeOfDay, weekend) are never left
# out when generating the 3-field swarms.
self._tryAll3FieldCombinationsWTimestamps = vars.get(
'tryAll3FieldCombinationsWTimestamps', False)
# Allow the permutations file to override minFieldContribution. This would
# be set to a negative number for large swarms so that you don't disqualify
# a field in an early sprint just because it did poorly there. Sometimes,
# a field that did poorly in an early sprint could help accuracy when
# added in a later sprint
minFieldContribution = vars.get('minFieldContribution', None)
if minFieldContribution is not None:
self._minFieldContribution = minFieldContribution
# Allow the permutations file to override maxBranching.
maxBranching = vars.get('maxFieldBranching', None)
if maxBranching is not None:
self._maxBranching = maxBranching
# Read in the optimization info.
if 'maximize' in vars:
self._optimizeKey = vars['maximize']
self._maximize = True
elif 'minimize' in vars:
self._optimizeKey = vars['minimize']
self._maximize = False
else:
raise RuntimeError("Permutations file '%s' does not include a maximize"
" or minimize metric.")
# The permutations file is the new location for maxModels. The old location,
# in the jobParams is deprecated.
maxModels = vars.get('maxModels')
if maxModels is not None:
if self._maxModels is None:
self._maxModels = maxModels
else:
raise RuntimeError('It is an error to specify maxModels both in the job'
' params AND in the permutations file.')
# Figure out if what kind of search this is:
#
# If it's a temporal prediction search:
# the first sprint has 1 swarm, with just the predicted field
# elif it's a spatial prediction search:
# the first sprint has N swarms, each with predicted field + one
# other field.
# elif it's a classification search:
# the first sprint has N swarms, each with 1 field
inferenceType = modelDescription['modelParams']['inferenceType']
if not InferenceType.validate(inferenceType):
raise ValueError("Invalid inference type %s" %inferenceType)
if inferenceType in [InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep]:
# If it does not have a separate encoder for the predicted field that
# goes to the classifier, it is a legacy multi-step network
classifierOnlyEncoder = None
for encoder in modelDescription["modelParams"]["sensorParams"]\
["encoders"].values():
if encoder.get("classifierOnly", False) \
and encoder["fieldname"] == vars.get('predictedField', None):
classifierOnlyEncoder = encoder
break
if classifierOnlyEncoder is None or self._inputPredictedField=="yes":
# If we don't have a separate encoder for the classifier (legacy
# MultiStep) or the caller explicitly wants to include the predicted
# field, then use the legacy temporal search methodology.
self._searchType = HsSearchType.legacyTemporal
else:
self._searchType = HsSearchType.temporal
elif inferenceType in [InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly]:
self._searchType = HsSearchType.legacyTemporal
elif inferenceType in (InferenceType.TemporalClassification,
InferenceType.NontemporalClassification):
self._searchType = HsSearchType.classification
else:
raise RuntimeError("Unsupported inference type: %s" % inferenceType)
# Get the predicted field. Note that even classification experiments
# have a "predicted" field - which is the field that contains the
# classification value.
self._predictedField = vars.get('predictedField', None)
if self._predictedField is None:
raise RuntimeError("Permutations file '%s' does not have the required"
" 'predictedField' variable" % filename)
# Read in and validate the permutations dict
if 'permutations' not in vars:
raise RuntimeError("Permutations file '%s' does not define permutations" % filename)
if not isinstance(vars['permutations'], dict):
raise RuntimeError("Permutations file '%s' defines a permutations variable "
"but it is not a dict")
self._encoderNames = []
self._permutations = vars['permutations']
self._flattenedPermutations = dict()
def _flattenPermutations(value, keys):
if ':' in keys[-1]:
raise RuntimeError("The permutation variable '%s' contains a ':' "
"character, which is not allowed.")
flatKey = _flattenKeys(keys)
if isinstance(value, PermuteEncoder):
self._encoderNames.append(flatKey)
# If this is the encoder for the predicted field, save its name.
if value.fieldName == self._predictedField:
self._predictedFieldEncoder = flatKey
# Store the flattened representations of the variables within the
# encoder.
for encKey, encValue in value.kwArgs.iteritems():
if isinstance(encValue, PermuteVariable):
self._flattenedPermutations['%s:%s' % (flatKey, encKey)] = encValue
elif isinstance(value, PermuteVariable):
self._flattenedPermutations[flatKey] = value
else:
if isinstance(value, PermuteVariable):
self._flattenedPermutations[key] = value
rApply(self._permutations, _flattenPermutations) |
def load_grid(self, alpha):
'''Load grid and calculate alpha values from the coverage/2.5.
'''
grid = CRGrid.crt_grid(self.dirs[0] + '/grid/elem.dat',
self.dirs[0] + '/grid/elec.dat')
self.plotman = CRPlot.plotManager(grid=grid)
name = self.dirs[0] + '/inv/coverage.mag'
content = np.genfromtxt(name, skip_header=1,
skip_footer=1, usecols=([2]))
abscov = np.abs(content)
if alpha:
normcov = np.divide(abscov, 2.5)
normcov[np.where(normcov > 1)] = 1
mask = np.subtract(1, normcov)
self.alpha = self.plotman.parman.add_data(mask)
else:
self.alpha = self.plotman.parman.add_data(np.ones(len(abscov))) | Load grid and calculate alpha values from the coverage/2.5. | Below is the the instruction that describes the task:
### Input:
Load grid and calculate alpha values from the coverage/2.5.
### Response:
def load_grid(self, alpha):
'''Load grid and calculate alpha values from the coverage/2.5.
'''
grid = CRGrid.crt_grid(self.dirs[0] + '/grid/elem.dat',
self.dirs[0] + '/grid/elec.dat')
self.plotman = CRPlot.plotManager(grid=grid)
name = self.dirs[0] + '/inv/coverage.mag'
content = np.genfromtxt(name, skip_header=1,
skip_footer=1, usecols=([2]))
abscov = np.abs(content)
if alpha:
normcov = np.divide(abscov, 2.5)
normcov[np.where(normcov > 1)] = 1
mask = np.subtract(1, normcov)
self.alpha = self.plotman.parman.add_data(mask)
else:
self.alpha = self.plotman.parman.add_data(np.ones(len(abscov))) |
def b6_verifier(entries, line=None):
"""Raises error if invalid B6/M8 format detected
Args:
entries (list): A list of B6Entry instances
line (int): Line number of first entry
Raises:
FormatError: Error when B6/M8 format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import b6_iter
>>> import os
>>> entries = 'query1\tsubject1\t86.03\t1782\t226\t18\t6038\t7812\t' \
... '755762\t753997\t0.0\t1890{0}' \
... 'query2\tsubject2\t85.46\t1176\t165\t5\t1213\t2385\t' \
... '1154754\t1153582\t0.0\t1219'.format(os.linesep)
>>> b6_entries = b6_iter(iter(entries.split(os.linesep)))
>>> b6_verifier(b6_entries)
"""
regex = r'^.+\t.+\t\d+\.?\d*\t\d+\t\d+\t\d+\t\d+\t\d+\t\d+\t\d+\t' \
+ r'\d+\.?\d*(e-)?\d*\t\d+\.?\d*{0}$'.format(os.linesep)
delimiter = r'\t'
for entry in entries:
try:
entry_verifier([entry.write()], regex, delimiter)
except FormatError as error:
# Format info on what entry error came from
if line:
intro = 'Line {0}'.format(str(line))
elif error.part == 0:
intro = 'Entry with subject ID {0}'.format(entry.subject)
else:
intro = 'Entry with query ID {0}'.format(entry.query)
# Generate error
if error.part == 0:
msg = '{0} has no query ID'.format(intro)
elif error.part == 1:
msg = '{0} has no subject ID'.format(intro)
elif error.part == 2:
msg = '{0} has non-numerical ' \
'characters in percent identity'.format(intro)
elif error.part == 3:
msg = '{0} has non-numerical ' \
'characters in alignment length'.format(intro)
elif error.part == 4:
msg = '{0} has non-numerical ' \
'characters in mismatches'.format(intro)
elif error.part == 5:
msg = '{0} has non-numerical ' \
'characters in gaps'.format(intro)
elif error.part == 6:
msg = '{0} has non-numerical ' \
'characters in query start'.format(intro)
elif error.part == 7:
msg = '{0} has non-numerical ' \
'characters in query end'.format(intro)
elif error.part == 8:
msg = '{0} has non-numerical ' \
'characters in subject start'.format(intro)
elif error.part == 9:
msg = '{0} has non-numerical ' \
'characters in subject end'.format(intro)
elif error.part == 10:
msg = '{0} has non-numerical ' \
'characters in E-value'.format(intro)
elif error.part == 11:
msg = '{0} has non-numerical ' \
'characters in bit score'.format(intro)
else:
msg = '{0}: Unknown Error: Likely a Bug'.format(intro)
raise FormatError(message=msg)
if line:
line += 1 | Raises error if invalid B6/M8 format detected
Args:
entries (list): A list of B6Entry instances
line (int): Line number of first entry
Raises:
FormatError: Error when B6/M8 format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import b6_iter
>>> import os
>>> entries = 'query1\tsubject1\t86.03\t1782\t226\t18\t6038\t7812\t' \
... '755762\t753997\t0.0\t1890{0}' \
... 'query2\tsubject2\t85.46\t1176\t165\t5\t1213\t2385\t' \
... '1154754\t1153582\t0.0\t1219'.format(os.linesep)
>>> b6_entries = b6_iter(iter(entries.split(os.linesep)))
>>> b6_verifier(b6_entries) | Below is the the instruction that describes the task:
### Input:
Raises error if invalid B6/M8 format detected
Args:
entries (list): A list of B6Entry instances
line (int): Line number of first entry
Raises:
FormatError: Error when B6/M8 format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import b6_iter
>>> import os
>>> entries = 'query1\tsubject1\t86.03\t1782\t226\t18\t6038\t7812\t' \
... '755762\t753997\t0.0\t1890{0}' \
... 'query2\tsubject2\t85.46\t1176\t165\t5\t1213\t2385\t' \
... '1154754\t1153582\t0.0\t1219'.format(os.linesep)
>>> b6_entries = b6_iter(iter(entries.split(os.linesep)))
>>> b6_verifier(b6_entries)
### Response:
def b6_verifier(entries, line=None):
"""Raises error if invalid B6/M8 format detected
Args:
entries (list): A list of B6Entry instances
line (int): Line number of first entry
Raises:
FormatError: Error when B6/M8 format incorrect with descriptive message
Example:
>>> from bio_utils.iterators import b6_iter
>>> import os
>>> entries = 'query1\tsubject1\t86.03\t1782\t226\t18\t6038\t7812\t' \
... '755762\t753997\t0.0\t1890{0}' \
... 'query2\tsubject2\t85.46\t1176\t165\t5\t1213\t2385\t' \
... '1154754\t1153582\t0.0\t1219'.format(os.linesep)
>>> b6_entries = b6_iter(iter(entries.split(os.linesep)))
>>> b6_verifier(b6_entries)
"""
regex = r'^.+\t.+\t\d+\.?\d*\t\d+\t\d+\t\d+\t\d+\t\d+\t\d+\t\d+\t' \
+ r'\d+\.?\d*(e-)?\d*\t\d+\.?\d*{0}$'.format(os.linesep)
delimiter = r'\t'
for entry in entries:
try:
entry_verifier([entry.write()], regex, delimiter)
except FormatError as error:
# Format info on what entry error came from
if line:
intro = 'Line {0}'.format(str(line))
elif error.part == 0:
intro = 'Entry with subject ID {0}'.format(entry.subject)
else:
intro = 'Entry with query ID {0}'.format(entry.query)
# Generate error
if error.part == 0:
msg = '{0} has no query ID'.format(intro)
elif error.part == 1:
msg = '{0} has no subject ID'.format(intro)
elif error.part == 2:
msg = '{0} has non-numerical ' \
'characters in percent identity'.format(intro)
elif error.part == 3:
msg = '{0} has non-numerical ' \
'characters in alignment length'.format(intro)
elif error.part == 4:
msg = '{0} has non-numerical ' \
'characters in mismatches'.format(intro)
elif error.part == 5:
msg = '{0} has non-numerical ' \
'characters in gaps'.format(intro)
elif error.part == 6:
msg = '{0} has non-numerical ' \
'characters in query start'.format(intro)
elif error.part == 7:
msg = '{0} has non-numerical ' \
'characters in query end'.format(intro)
elif error.part == 8:
msg = '{0} has non-numerical ' \
'characters in subject start'.format(intro)
elif error.part == 9:
msg = '{0} has non-numerical ' \
'characters in subject end'.format(intro)
elif error.part == 10:
msg = '{0} has non-numerical ' \
'characters in E-value'.format(intro)
elif error.part == 11:
msg = '{0} has non-numerical ' \
'characters in bit score'.format(intro)
else:
msg = '{0}: Unknown Error: Likely a Bug'.format(intro)
raise FormatError(message=msg)
if line:
line += 1 |
def move_dirty_lock_file(dirty_lock_file, sm_path):
""" Move the dirt_lock file to the sm_path and thereby is not found by auto recovery of backup anymore """
if dirty_lock_file is not None \
and not dirty_lock_file == os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]):
logger.debug("Move dirty lock from root tmp folder {0} to state machine folder {1}"
"".format(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1])))
os.rename(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1])) | Move the dirt_lock file to the sm_path and thereby is not found by auto recovery of backup anymore | Below is the the instruction that describes the task:
### Input:
Move the dirt_lock file to the sm_path and thereby is not found by auto recovery of backup anymore
### Response:
def move_dirty_lock_file(dirty_lock_file, sm_path):
""" Move the dirt_lock file to the sm_path and thereby is not found by auto recovery of backup anymore """
if dirty_lock_file is not None \
and not dirty_lock_file == os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]):
logger.debug("Move dirty lock from root tmp folder {0} to state machine folder {1}"
"".format(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1])))
os.rename(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1])) |
def kernel_shap_1000_meanref(model, data):
""" Kernel SHAP 1000 mean ref.
color = red_blue_circle(0.5)
linestyle = solid
"""
return lambda X: KernelExplainer(model.predict, kmeans(data, 1)).shap_values(X, nsamples=1000, l1_reg=0) | Kernel SHAP 1000 mean ref.
color = red_blue_circle(0.5)
linestyle = solid | Below is the the instruction that describes the task:
### Input:
Kernel SHAP 1000 mean ref.
color = red_blue_circle(0.5)
linestyle = solid
### Response:
def kernel_shap_1000_meanref(model, data):
""" Kernel SHAP 1000 mean ref.
color = red_blue_circle(0.5)
linestyle = solid
"""
return lambda X: KernelExplainer(model.predict, kmeans(data, 1)).shap_values(X, nsamples=1000, l1_reg=0) |
def reductions(fn, seq, acc=None):
"""Return the intermediate values of a reduction
:param fn: a function
:param seq: a sequence
:param acc: the accumulator
:returns: a list
>>> reductions(lambda x, y: x + y, [1, 2, 3])
[1, 3, 6]
>>> reductions(lambda x, y: x + y, [1, 2, 3], 10)
[11, 13, 16]
"""
indexes = xrange(len(seq))
if acc:
return map(lambda i: reduce(lambda x, y: fn(x, y), seq[:i+1], acc), indexes)
else:
return map(lambda i: reduce(lambda x, y: fn(x, y), seq[:i+1]), indexes) | Return the intermediate values of a reduction
:param fn: a function
:param seq: a sequence
:param acc: the accumulator
:returns: a list
>>> reductions(lambda x, y: x + y, [1, 2, 3])
[1, 3, 6]
>>> reductions(lambda x, y: x + y, [1, 2, 3], 10)
[11, 13, 16] | Below is the the instruction that describes the task:
### Input:
Return the intermediate values of a reduction
:param fn: a function
:param seq: a sequence
:param acc: the accumulator
:returns: a list
>>> reductions(lambda x, y: x + y, [1, 2, 3])
[1, 3, 6]
>>> reductions(lambda x, y: x + y, [1, 2, 3], 10)
[11, 13, 16]
### Response:
def reductions(fn, seq, acc=None):
"""Return the intermediate values of a reduction
:param fn: a function
:param seq: a sequence
:param acc: the accumulator
:returns: a list
>>> reductions(lambda x, y: x + y, [1, 2, 3])
[1, 3, 6]
>>> reductions(lambda x, y: x + y, [1, 2, 3], 10)
[11, 13, 16]
"""
indexes = xrange(len(seq))
if acc:
return map(lambda i: reduce(lambda x, y: fn(x, y), seq[:i+1], acc), indexes)
else:
return map(lambda i: reduce(lambda x, y: fn(x, y), seq[:i+1]), indexes) |
def construct_kde(samples_array, use_kombine=False):
"""Constructs a KDE from the given samples.
"""
if use_kombine:
try:
import kombine
except ImportError:
raise ImportError("kombine is not installed.")
# construct the kde
if use_kombine:
kde = kombine.clustered_kde.KDE(samples_array)
else:
kde = scipy.stats.gaussian_kde(samples_array.T)
return kde | Constructs a KDE from the given samples. | Below is the the instruction that describes the task:
### Input:
Constructs a KDE from the given samples.
### Response:
def construct_kde(samples_array, use_kombine=False):
"""Constructs a KDE from the given samples.
"""
if use_kombine:
try:
import kombine
except ImportError:
raise ImportError("kombine is not installed.")
# construct the kde
if use_kombine:
kde = kombine.clustered_kde.KDE(samples_array)
else:
kde = scipy.stats.gaussian_kde(samples_array.T)
return kde |
def hybrid_forward(self, F, words1, words2, words3): # pylint: disable=arguments-differ, unused-argument
"""Compute analogies for given question words.
Parameters
----------
words1 : Symbol or NDArray
Word indices of first question words. Shape (batch_size, ).
words2 : Symbol or NDArray
Word indices of second question words. Shape (batch_size, ).
words3 : Symbol or NDArray
Word indices of third question words. Shape (batch_size, ).
Returns
-------
predicted_indices : Symbol or NDArray
Indices of predicted analogies of shape (batch_size, k)
"""
return self.analogy(words1, words2, words3) | Compute analogies for given question words.
Parameters
----------
words1 : Symbol or NDArray
Word indices of first question words. Shape (batch_size, ).
words2 : Symbol or NDArray
Word indices of second question words. Shape (batch_size, ).
words3 : Symbol or NDArray
Word indices of third question words. Shape (batch_size, ).
Returns
-------
predicted_indices : Symbol or NDArray
Indices of predicted analogies of shape (batch_size, k) | Below is the the instruction that describes the task:
### Input:
Compute analogies for given question words.
Parameters
----------
words1 : Symbol or NDArray
Word indices of first question words. Shape (batch_size, ).
words2 : Symbol or NDArray
Word indices of second question words. Shape (batch_size, ).
words3 : Symbol or NDArray
Word indices of third question words. Shape (batch_size, ).
Returns
-------
predicted_indices : Symbol or NDArray
Indices of predicted analogies of shape (batch_size, k)
### Response:
def hybrid_forward(self, F, words1, words2, words3): # pylint: disable=arguments-differ, unused-argument
"""Compute analogies for given question words.
Parameters
----------
words1 : Symbol or NDArray
Word indices of first question words. Shape (batch_size, ).
words2 : Symbol or NDArray
Word indices of second question words. Shape (batch_size, ).
words3 : Symbol or NDArray
Word indices of third question words. Shape (batch_size, ).
Returns
-------
predicted_indices : Symbol or NDArray
Indices of predicted analogies of shape (batch_size, k)
"""
return self.analogy(words1, words2, words3) |
def process_once(self, timeout=0.01):
"""
Handles an event and calls it's handler
Optional arguments:
* timeout=0.01 - Wait for an event until the timeout is reached.
"""
try:
event = self.recv(timeout)
if event:
event_t = event[0]
event_c = event[1]
if event_t == 'JOIN':
self.on_join(event_c[0], event_c[1])
elif event_t == 'PART':
self.on_part(event_c[0], event_c[1], event_c[2])
elif event_t == 'PRIVMSG':
if event_c[1] in self.channels.keys():
self.on_chanmsg(event_c[0], event_c[1], event_c[2])
else:
self.on_privmsg(event_c[0], event_c[2])
elif event_t == 'NOTICE':
if event_c[1] in self.channels.keys():
self.on_channotice(event_c[0], event_c[1], event_c[2])
else:
self.on_privnotice(event_c[0], event_c[2])
elif event_t == 'CTCP':
if event_c[1] in self.channels.keys():
self.on_chanctcp(event_c[0], event_c[1], event_c[2])
else:
self.on_privctcp(event_c[0], event_c[2])
elif event_t == 'CTCP_REPLY':
self.on_ctcp_reply(event_c[0], event_c[2])
elif event_t == 'MODE':
if event_c[0][0] == self.current_nick:
self.on_umode(event_c[1])
else:
self.on_cmode(event_c[0], event_c[1], event_c[2])
elif event_t == 'KICK':
self.on_kick(event_c[0], event_c[1], event_c[2], \
event_c[3])
elif event_t == 'INVITE':
self.on_invite(event_c[0], event_c[2])
elif event_t == 'NICK':
self.on_nick(event_c[0], event_c[1])
elif event_t == 'TOPIC':
self.on_topic(event_c[0], event_c[1], event_c[2])
elif event_t == 'QUIT':
self.on_quit(event_c[0], event_c[1])
elif event_t == 'LUSERS':
self.on_lusers(event_c)
elif event_t == 'ERROR':
self.on_error(event_c[0])
elif event_t == 'UNKNOWN':
self.on_unknown(event_c[0])
except self.LurklibError as exception:
self.on_exception(exception) | Handles an event and calls it's handler
Optional arguments:
* timeout=0.01 - Wait for an event until the timeout is reached. | Below is the the instruction that describes the task:
### Input:
Handles an event and calls it's handler
Optional arguments:
* timeout=0.01 - Wait for an event until the timeout is reached.
### Response:
def process_once(self, timeout=0.01):
"""
Handles an event and calls it's handler
Optional arguments:
* timeout=0.01 - Wait for an event until the timeout is reached.
"""
try:
event = self.recv(timeout)
if event:
event_t = event[0]
event_c = event[1]
if event_t == 'JOIN':
self.on_join(event_c[0], event_c[1])
elif event_t == 'PART':
self.on_part(event_c[0], event_c[1], event_c[2])
elif event_t == 'PRIVMSG':
if event_c[1] in self.channels.keys():
self.on_chanmsg(event_c[0], event_c[1], event_c[2])
else:
self.on_privmsg(event_c[0], event_c[2])
elif event_t == 'NOTICE':
if event_c[1] in self.channels.keys():
self.on_channotice(event_c[0], event_c[1], event_c[2])
else:
self.on_privnotice(event_c[0], event_c[2])
elif event_t == 'CTCP':
if event_c[1] in self.channels.keys():
self.on_chanctcp(event_c[0], event_c[1], event_c[2])
else:
self.on_privctcp(event_c[0], event_c[2])
elif event_t == 'CTCP_REPLY':
self.on_ctcp_reply(event_c[0], event_c[2])
elif event_t == 'MODE':
if event_c[0][0] == self.current_nick:
self.on_umode(event_c[1])
else:
self.on_cmode(event_c[0], event_c[1], event_c[2])
elif event_t == 'KICK':
self.on_kick(event_c[0], event_c[1], event_c[2], \
event_c[3])
elif event_t == 'INVITE':
self.on_invite(event_c[0], event_c[2])
elif event_t == 'NICK':
self.on_nick(event_c[0], event_c[1])
elif event_t == 'TOPIC':
self.on_topic(event_c[0], event_c[1], event_c[2])
elif event_t == 'QUIT':
self.on_quit(event_c[0], event_c[1])
elif event_t == 'LUSERS':
self.on_lusers(event_c)
elif event_t == 'ERROR':
self.on_error(event_c[0])
elif event_t == 'UNKNOWN':
self.on_unknown(event_c[0])
except self.LurklibError as exception:
self.on_exception(exception) |
def _start_actions(self):
"""
Start all the actions for the recipes
"""
Global.LOGGER.info("starting actions")
for recipe in Global.CONFIG_MANAGER.recipes:
Global.CONFIG_MANAGER.read_recipe(recipe)
list(map(lambda section: self._start_action_for_section(
section), Global.CONFIG_MANAGER.sections)) | Start all the actions for the recipes | Below is the the instruction that describes the task:
### Input:
Start all the actions for the recipes
### Response:
def _start_actions(self):
"""
Start all the actions for the recipes
"""
Global.LOGGER.info("starting actions")
for recipe in Global.CONFIG_MANAGER.recipes:
Global.CONFIG_MANAGER.read_recipe(recipe)
list(map(lambda section: self._start_action_for_section(
section), Global.CONFIG_MANAGER.sections)) |
def from_graphtool(cls, graph, weight='weight'):
r"""Import a graph from graph-tool.
Edge weights are retrieved as an edge property,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node properties,
and stored in the :attr:`signals` dictionary under the property name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`graph_tool.Graph`
A graph-tool graph object.
weight : string
The edge property that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
If the graph has multiple edge connecting the same two nodes, a sum
over the edges is taken to merge them.
See Also
--------
from_networkx : import from NetworkX
load : load from a file
Examples
--------
>>> import graph_tool as gt
>>> graph = gt.Graph(directed=False)
>>> e1 = graph.add_edge(0, 1)
>>> e2 = graph.add_edge(1, 2)
>>> v = graph.add_vertex()
>>> eprop = graph.new_edge_property("double")
>>> eprop[e1] = 0.2
>>> eprop[(1, 2)] = 0.9
>>> graph.edge_properties["weight"] = eprop
>>> vprop = graph.new_vertex_property("double", val=np.nan)
>>> vprop[3] = 3.1416
>>> graph.vertex_properties["sig"] = vprop
>>> graph = graphs.Graph.from_graphtool(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': PropertyArray([ nan, nan, nan, 3.1416])}
"""
gt = _import_graphtool()
import graph_tool.spectral
from .graph import Graph
weight = graph.edge_properties.get(weight, None)
adjacency = gt.spectral.adjacency(graph, weight=weight)
graph_pg = Graph(adjacency.T)
for name, signal in graph.vertex_properties.items():
graph_pg.set_signal(signal.get_array(), name)
graph_pg._join_signals()
return graph_pg | r"""Import a graph from graph-tool.
Edge weights are retrieved as an edge property,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node properties,
and stored in the :attr:`signals` dictionary under the property name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`graph_tool.Graph`
A graph-tool graph object.
weight : string
The edge property that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
If the graph has multiple edge connecting the same two nodes, a sum
over the edges is taken to merge them.
See Also
--------
from_networkx : import from NetworkX
load : load from a file
Examples
--------
>>> import graph_tool as gt
>>> graph = gt.Graph(directed=False)
>>> e1 = graph.add_edge(0, 1)
>>> e2 = graph.add_edge(1, 2)
>>> v = graph.add_vertex()
>>> eprop = graph.new_edge_property("double")
>>> eprop[e1] = 0.2
>>> eprop[(1, 2)] = 0.9
>>> graph.edge_properties["weight"] = eprop
>>> vprop = graph.new_vertex_property("double", val=np.nan)
>>> vprop[3] = 3.1416
>>> graph.vertex_properties["sig"] = vprop
>>> graph = graphs.Graph.from_graphtool(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': PropertyArray([ nan, nan, nan, 3.1416])} | Below is the the instruction that describes the task:
### Input:
r"""Import a graph from graph-tool.
Edge weights are retrieved as an edge property,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node properties,
and stored in the :attr:`signals` dictionary under the property name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`graph_tool.Graph`
A graph-tool graph object.
weight : string
The edge property that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
If the graph has multiple edge connecting the same two nodes, a sum
over the edges is taken to merge them.
See Also
--------
from_networkx : import from NetworkX
load : load from a file
Examples
--------
>>> import graph_tool as gt
>>> graph = gt.Graph(directed=False)
>>> e1 = graph.add_edge(0, 1)
>>> e2 = graph.add_edge(1, 2)
>>> v = graph.add_vertex()
>>> eprop = graph.new_edge_property("double")
>>> eprop[e1] = 0.2
>>> eprop[(1, 2)] = 0.9
>>> graph.edge_properties["weight"] = eprop
>>> vprop = graph.new_vertex_property("double", val=np.nan)
>>> vprop[3] = 3.1416
>>> graph.vertex_properties["sig"] = vprop
>>> graph = graphs.Graph.from_graphtool(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': PropertyArray([ nan, nan, nan, 3.1416])}
### Response:
def from_graphtool(cls, graph, weight='weight'):
r"""Import a graph from graph-tool.
Edge weights are retrieved as an edge property,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node properties,
and stored in the :attr:`signals` dictionary under the property name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`graph_tool.Graph`
A graph-tool graph object.
weight : string
The edge property that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
If the graph has multiple edge connecting the same two nodes, a sum
over the edges is taken to merge them.
See Also
--------
from_networkx : import from NetworkX
load : load from a file
Examples
--------
>>> import graph_tool as gt
>>> graph = gt.Graph(directed=False)
>>> e1 = graph.add_edge(0, 1)
>>> e2 = graph.add_edge(1, 2)
>>> v = graph.add_vertex()
>>> eprop = graph.new_edge_property("double")
>>> eprop[e1] = 0.2
>>> eprop[(1, 2)] = 0.9
>>> graph.edge_properties["weight"] = eprop
>>> vprop = graph.new_vertex_property("double", val=np.nan)
>>> vprop[3] = 3.1416
>>> graph.vertex_properties["sig"] = vprop
>>> graph = graphs.Graph.from_graphtool(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': PropertyArray([ nan, nan, nan, 3.1416])}
"""
gt = _import_graphtool()
import graph_tool.spectral
from .graph import Graph
weight = graph.edge_properties.get(weight, None)
adjacency = gt.spectral.adjacency(graph, weight=weight)
graph_pg = Graph(adjacency.T)
for name, signal in graph.vertex_properties.items():
graph_pg.set_signal(signal.get_array(), name)
graph_pg._join_signals()
return graph_pg |
def root(self, value):
"""Set new XML tree"""
self._xml = t2s(value)
self._root = value | Set new XML tree | Below is the the instruction that describes the task:
### Input:
Set new XML tree
### Response:
def root(self, value):
"""Set new XML tree"""
self._xml = t2s(value)
self._root = value |
def update_metadata(self, key, value):
"""Set *key* in the metadata to *value*.
Returns the previous value of *key*, or None if the key was
not previously set.
"""
old_value = self.contents['metadata'].get(key)
self.contents['metadata'][key] = value
self._log('Updated metadata: %s=%s' % (key, value))
return old_value | Set *key* in the metadata to *value*.
Returns the previous value of *key*, or None if the key was
not previously set. | Below is the the instruction that describes the task:
### Input:
Set *key* in the metadata to *value*.
Returns the previous value of *key*, or None if the key was
not previously set.
### Response:
def update_metadata(self, key, value):
"""Set *key* in the metadata to *value*.
Returns the previous value of *key*, or None if the key was
not previously set.
"""
old_value = self.contents['metadata'].get(key)
self.contents['metadata'][key] = value
self._log('Updated metadata: %s=%s' % (key, value))
return old_value |
def hash(filepath, method='sha1', buffer_size=65536):
"""
Calculate a hash of a local file.
Parameters
----------
filepath : str
method : {'sha1', 'md5'}
buffer_size : int, optional (default: 65536 byte = 64 KiB)
in byte
Returns
-------
hash : str
"""
if method == 'sha1':
hash_function = hashlib.sha1()
elif method == 'md5':
hash_function = hashlib.md5()
else:
raise NotImplementedError('Only md5 and sha1 hashes are known, but '
' \'{}\' was specified.'.format(method))
with open(filepath, 'rb') as fp:
while True:
data = fp.read(buffer_size)
if not data:
break
hash_function.update(data)
return hash_function.hexdigest() | Calculate a hash of a local file.
Parameters
----------
filepath : str
method : {'sha1', 'md5'}
buffer_size : int, optional (default: 65536 byte = 64 KiB)
in byte
Returns
-------
hash : str | Below is the the instruction that describes the task:
### Input:
Calculate a hash of a local file.
Parameters
----------
filepath : str
method : {'sha1', 'md5'}
buffer_size : int, optional (default: 65536 byte = 64 KiB)
in byte
Returns
-------
hash : str
### Response:
def hash(filepath, method='sha1', buffer_size=65536):
"""
Calculate a hash of a local file.
Parameters
----------
filepath : str
method : {'sha1', 'md5'}
buffer_size : int, optional (default: 65536 byte = 64 KiB)
in byte
Returns
-------
hash : str
"""
if method == 'sha1':
hash_function = hashlib.sha1()
elif method == 'md5':
hash_function = hashlib.md5()
else:
raise NotImplementedError('Only md5 and sha1 hashes are known, but '
' \'{}\' was specified.'.format(method))
with open(filepath, 'rb') as fp:
while True:
data = fp.read(buffer_size)
if not data:
break
hash_function.update(data)
return hash_function.hexdigest() |
def get_stats_snmp(self, bulk=False, snmp_oid=None):
"""Update stats using SNMP.
If bulk=True, use a bulk request instead of a get request.
"""
snmp_oid = snmp_oid or {}
from glances.snmp import GlancesSNMPClient
# Init the SNMP request
clientsnmp = GlancesSNMPClient(host=self.args.client,
port=self.args.snmp_port,
version=self.args.snmp_version,
community=self.args.snmp_community)
# Process the SNMP request
ret = {}
if bulk:
# Bulk request
snmpresult = clientsnmp.getbulk_by_oid(0, 10, itervalues(*snmp_oid))
if len(snmp_oid) == 1:
# Bulk command for only one OID
# Note: key is the item indexed but the OID result
for item in snmpresult:
if iterkeys(item)[0].startswith(itervalues(snmp_oid)[0]):
ret[iterkeys(snmp_oid)[0] + iterkeys(item)
[0].split(itervalues(snmp_oid)[0])[1]] = itervalues(item)[0]
else:
# Build the internal dict with the SNMP result
# Note: key is the first item in the snmp_oid
index = 1
for item in snmpresult:
item_stats = {}
item_key = None
for key in iterkeys(snmp_oid):
oid = snmp_oid[key] + '.' + str(index)
if oid in item:
if item_key is None:
item_key = item[oid]
else:
item_stats[key] = item[oid]
if item_stats:
ret[item_key] = item_stats
index += 1
else:
# Simple get request
snmpresult = clientsnmp.get_by_oid(itervalues(*snmp_oid))
# Build the internal dict with the SNMP result
for key in iterkeys(snmp_oid):
ret[key] = snmpresult[snmp_oid[key]]
return ret | Update stats using SNMP.
If bulk=True, use a bulk request instead of a get request. | Below is the the instruction that describes the task:
### Input:
Update stats using SNMP.
If bulk=True, use a bulk request instead of a get request.
### Response:
def get_stats_snmp(self, bulk=False, snmp_oid=None):
"""Update stats using SNMP.
If bulk=True, use a bulk request instead of a get request.
"""
snmp_oid = snmp_oid or {}
from glances.snmp import GlancesSNMPClient
# Init the SNMP request
clientsnmp = GlancesSNMPClient(host=self.args.client,
port=self.args.snmp_port,
version=self.args.snmp_version,
community=self.args.snmp_community)
# Process the SNMP request
ret = {}
if bulk:
# Bulk request
snmpresult = clientsnmp.getbulk_by_oid(0, 10, itervalues(*snmp_oid))
if len(snmp_oid) == 1:
# Bulk command for only one OID
# Note: key is the item indexed but the OID result
for item in snmpresult:
if iterkeys(item)[0].startswith(itervalues(snmp_oid)[0]):
ret[iterkeys(snmp_oid)[0] + iterkeys(item)
[0].split(itervalues(snmp_oid)[0])[1]] = itervalues(item)[0]
else:
# Build the internal dict with the SNMP result
# Note: key is the first item in the snmp_oid
index = 1
for item in snmpresult:
item_stats = {}
item_key = None
for key in iterkeys(snmp_oid):
oid = snmp_oid[key] + '.' + str(index)
if oid in item:
if item_key is None:
item_key = item[oid]
else:
item_stats[key] = item[oid]
if item_stats:
ret[item_key] = item_stats
index += 1
else:
# Simple get request
snmpresult = clientsnmp.get_by_oid(itervalues(*snmp_oid))
# Build the internal dict with the SNMP result
for key in iterkeys(snmp_oid):
ret[key] = snmpresult[snmp_oid[key]]
return ret |
def _get_default_language(self):
"""
If a default language has been set, and is still available in
`self.available_languages`, return it and remove it from the list.
If not, simply pop the first available language.
"""
assert hasattr(self, 'available_languages'), \
'No available languages have been generated.'
assert len(self.available_languages) > 0, \
'No available languages to select from.'
if (
settings.DEFAULT_LANGUAGE and
settings.DEFAULT_LANGUAGE in self.available_languages
) or (
'language_code' not in self.form.base_fields
):
# Default language still available
self.available_languages.remove(settings.DEFAULT_LANGUAGE)
return settings.DEFAULT_LANGUAGE
else:
# Select the first item and return it
return self.available_languages.pop(0) | If a default language has been set, and is still available in
`self.available_languages`, return it and remove it from the list.
If not, simply pop the first available language. | Below is the the instruction that describes the task:
### Input:
If a default language has been set, and is still available in
`self.available_languages`, return it and remove it from the list.
If not, simply pop the first available language.
### Response:
def _get_default_language(self):
"""
If a default language has been set, and is still available in
`self.available_languages`, return it and remove it from the list.
If not, simply pop the first available language.
"""
assert hasattr(self, 'available_languages'), \
'No available languages have been generated.'
assert len(self.available_languages) > 0, \
'No available languages to select from.'
if (
settings.DEFAULT_LANGUAGE and
settings.DEFAULT_LANGUAGE in self.available_languages
) or (
'language_code' not in self.form.base_fields
):
# Default language still available
self.available_languages.remove(settings.DEFAULT_LANGUAGE)
return settings.DEFAULT_LANGUAGE
else:
# Select the first item and return it
return self.available_languages.pop(0) |
def default_field_resolver(source, info, **args):
"""Default field resolver.
If a resolve function is not given, then a default resolve behavior is used which
takes the property of the source object of the same name as the field and returns
it as the result, or if it's a function, returns the result of calling that function
while passing along args and context.
For dictionaries, the field names are used as keys, for all other objects they are
used as attribute names.
"""
# Ensure source is a value for which property access is acceptable.
field_name = info.field_name
value = (
source.get(field_name)
if isinstance(source, dict)
else getattr(source, field_name, None)
)
if callable(value):
return value(info, **args)
return value | Default field resolver.
If a resolve function is not given, then a default resolve behavior is used which
takes the property of the source object of the same name as the field and returns
it as the result, or if it's a function, returns the result of calling that function
while passing along args and context.
For dictionaries, the field names are used as keys, for all other objects they are
used as attribute names. | Below is the the instruction that describes the task:
### Input:
Default field resolver.
If a resolve function is not given, then a default resolve behavior is used which
takes the property of the source object of the same name as the field and returns
it as the result, or if it's a function, returns the result of calling that function
while passing along args and context.
For dictionaries, the field names are used as keys, for all other objects they are
used as attribute names.
### Response:
def default_field_resolver(source, info, **args):
"""Default field resolver.
If a resolve function is not given, then a default resolve behavior is used which
takes the property of the source object of the same name as the field and returns
it as the result, or if it's a function, returns the result of calling that function
while passing along args and context.
For dictionaries, the field names are used as keys, for all other objects they are
used as attribute names.
"""
# Ensure source is a value for which property access is acceptable.
field_name = info.field_name
value = (
source.get(field_name)
if isinstance(source, dict)
else getattr(source, field_name, None)
)
if callable(value):
return value(info, **args)
return value |
def pop_all(self):
""" Preserve the context stack by transferring it to a new instance """
ret = ExitStack()
ret._context_stack.append(self._context_stack.pop())
self._context_stack.append([]) | Preserve the context stack by transferring it to a new instance | Below is the the instruction that describes the task:
### Input:
Preserve the context stack by transferring it to a new instance
### Response:
def pop_all(self):
""" Preserve the context stack by transferring it to a new instance """
ret = ExitStack()
ret._context_stack.append(self._context_stack.pop())
self._context_stack.append([]) |
def _find_by(self, key):
"""Find devices."""
by_path = glob.glob('/dev/input/by-{key}/*-event-*'.format(key=key))
for device_path in by_path:
self._parse_device_path(device_path) | Find devices. | Below is the the instruction that describes the task:
### Input:
Find devices.
### Response:
def _find_by(self, key):
"""Find devices."""
by_path = glob.glob('/dev/input/by-{key}/*-event-*'.format(key=key))
for device_path in by_path:
self._parse_device_path(device_path) |
def _export_with_html(self): # pragma: no cover
"Computes substitutions before using nbconvert with preprocessors"
self.export_success = False
try:
tstamp = time.strftime(self.timestamp_format, self._timestamp)
substitutions = {}
for (basename, ext), entry in self._files.items():
(_, info) = entry
html_key = self._replacements.get((basename, ext), None)
if html_key is None: continue
filename = self._format(basename, {'timestamp':tstamp,
'notebook':self.notebook_name})
fpath = filename+(('.%s' % ext) if ext else '')
info = {'src':fpath, 'mime_type':info['mime_type']}
# No mime type
if 'mime_type' not in info: pass
# Not displayable in an HTML tag
elif info['mime_type'] not in self._tags: pass
else:
basename, ext = os.path.splitext(fpath)
truncated = self._truncate_name(basename, ext[1:])
link_html = self._format(self._tags[info['mime_type']],
{'src':truncated,
'mime_type':info['mime_type'],
'css':''})
substitutions[html_key] = (link_html, truncated)
node = self._get_notebook_node()
html = self._generate_html(node, substitutions)
export_filename = self.snapshot_name
# Add the html snapshot
super(NotebookArchive, self).add(filename=export_filename,
data=html, info={'file-ext':'html',
'mime_type':'text/html',
'notebook':self.notebook_name})
# Add cleared notebook
cleared = self._clear_notebook(node)
super(NotebookArchive, self).add(filename=export_filename,
data=cleared, info={'file-ext':'ipynb',
'mime_type':'text/json',
'notebook':self.notebook_name})
# If store cleared_notebook... save here
super(NotebookArchive, self).export(timestamp=self._timestamp,
info={'notebook':self.notebook_name})
except:
self.traceback = traceback.format_exc()
else:
self.export_success = True | Computes substitutions before using nbconvert with preprocessors | Below is the the instruction that describes the task:
### Input:
Computes substitutions before using nbconvert with preprocessors
### Response:
def _export_with_html(self): # pragma: no cover
"Computes substitutions before using nbconvert with preprocessors"
self.export_success = False
try:
tstamp = time.strftime(self.timestamp_format, self._timestamp)
substitutions = {}
for (basename, ext), entry in self._files.items():
(_, info) = entry
html_key = self._replacements.get((basename, ext), None)
if html_key is None: continue
filename = self._format(basename, {'timestamp':tstamp,
'notebook':self.notebook_name})
fpath = filename+(('.%s' % ext) if ext else '')
info = {'src':fpath, 'mime_type':info['mime_type']}
# No mime type
if 'mime_type' not in info: pass
# Not displayable in an HTML tag
elif info['mime_type'] not in self._tags: pass
else:
basename, ext = os.path.splitext(fpath)
truncated = self._truncate_name(basename, ext[1:])
link_html = self._format(self._tags[info['mime_type']],
{'src':truncated,
'mime_type':info['mime_type'],
'css':''})
substitutions[html_key] = (link_html, truncated)
node = self._get_notebook_node()
html = self._generate_html(node, substitutions)
export_filename = self.snapshot_name
# Add the html snapshot
super(NotebookArchive, self).add(filename=export_filename,
data=html, info={'file-ext':'html',
'mime_type':'text/html',
'notebook':self.notebook_name})
# Add cleared notebook
cleared = self._clear_notebook(node)
super(NotebookArchive, self).add(filename=export_filename,
data=cleared, info={'file-ext':'ipynb',
'mime_type':'text/json',
'notebook':self.notebook_name})
# If store cleared_notebook... save here
super(NotebookArchive, self).export(timestamp=self._timestamp,
info={'notebook':self.notebook_name})
except:
self.traceback = traceback.format_exc()
else:
self.export_success = True |
def ls_(active=None, cache=True, path=None):
'''
Return a list of the containers available on the minion
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
active
If ``True``, return only active (i.e. running) containers
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' lxc.ls
salt '*' lxc.ls active=True
'''
contextvar = 'lxc.ls{0}'.format(path)
if active:
contextvar += '.active'
if cache and (contextvar in __context__):
return __context__[contextvar]
else:
ret = []
cmd = 'lxc-ls'
if path:
cmd += ' -P {0}'.format(pipes.quote(path))
if active:
cmd += ' --active'
output = __salt__['cmd.run_stdout'](cmd, python_shell=False)
for line in output.splitlines():
ret.extend(line.split())
__context__[contextvar] = ret
return ret | Return a list of the containers available on the minion
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
active
If ``True``, return only active (i.e. running) containers
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' lxc.ls
salt '*' lxc.ls active=True | Below is the the instruction that describes the task:
### Input:
Return a list of the containers available on the minion
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
active
If ``True``, return only active (i.e. running) containers
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' lxc.ls
salt '*' lxc.ls active=True
### Response:
def ls_(active=None, cache=True, path=None):
'''
Return a list of the containers available on the minion
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
active
If ``True``, return only active (i.e. running) containers
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' lxc.ls
salt '*' lxc.ls active=True
'''
contextvar = 'lxc.ls{0}'.format(path)
if active:
contextvar += '.active'
if cache and (contextvar in __context__):
return __context__[contextvar]
else:
ret = []
cmd = 'lxc-ls'
if path:
cmd += ' -P {0}'.format(pipes.quote(path))
if active:
cmd += ' --active'
output = __salt__['cmd.run_stdout'](cmd, python_shell=False)
for line in output.splitlines():
ret.extend(line.split())
__context__[contextvar] = ret
return ret |
def _interop_prop_to_py_ast(
ctx: GeneratorContext, node: HostField, is_assigning: bool = False
) -> GeneratedPyAST:
"""Generate a Python AST node for Python interop property access."""
assert node.op == NodeOp.HOST_FIELD
target_ast = gen_py_ast(ctx, node.target)
return GeneratedPyAST(
node=ast.Attribute(
value=target_ast.node,
attr=munge(node.field),
ctx=ast.Store() if is_assigning else ast.Load(),
),
dependencies=target_ast.dependencies,
) | Generate a Python AST node for Python interop property access. | Below is the the instruction that describes the task:
### Input:
Generate a Python AST node for Python interop property access.
### Response:
def _interop_prop_to_py_ast(
ctx: GeneratorContext, node: HostField, is_assigning: bool = False
) -> GeneratedPyAST:
"""Generate a Python AST node for Python interop property access."""
assert node.op == NodeOp.HOST_FIELD
target_ast = gen_py_ast(ctx, node.target)
return GeneratedPyAST(
node=ast.Attribute(
value=target_ast.node,
attr=munge(node.field),
ctx=ast.Store() if is_assigning else ast.Load(),
),
dependencies=target_ast.dependencies,
) |
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml | `lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`. | Below is the the instruction that describes the task:
### Input:
`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
### Response:
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml |
def assoc(self, key, value):
'''Returns a new ImmutableDict instance with value associated with key.
The implicit parameter is not modified.'''
copydict = ImmutableDict()
copydict.tree = self.tree.assoc(hash(key), (key, value))
copydict._length = self._length + 1
return copydict | Returns a new ImmutableDict instance with value associated with key.
The implicit parameter is not modified. | Below is the the instruction that describes the task:
### Input:
Returns a new ImmutableDict instance with value associated with key.
The implicit parameter is not modified.
### Response:
def assoc(self, key, value):
'''Returns a new ImmutableDict instance with value associated with key.
The implicit parameter is not modified.'''
copydict = ImmutableDict()
copydict.tree = self.tree.assoc(hash(key), (key, value))
copydict._length = self._length + 1
return copydict |
def generateFromNumbers(self, numbers):
"""
Generate a sequence from a list of numbers.
Note: Any `None` in the list of numbers is considered a reset.
@param numbers (list) List of numbers
@return (list) Generated sequence
"""
sequence = []
for number in numbers:
if number == None:
sequence.append(number)
else:
pattern = self.patternMachine.get(number)
sequence.append(pattern)
return sequence | Generate a sequence from a list of numbers.
Note: Any `None` in the list of numbers is considered a reset.
@param numbers (list) List of numbers
@return (list) Generated sequence | Below is the the instruction that describes the task:
### Input:
Generate a sequence from a list of numbers.
Note: Any `None` in the list of numbers is considered a reset.
@param numbers (list) List of numbers
@return (list) Generated sequence
### Response:
def generateFromNumbers(self, numbers):
"""
Generate a sequence from a list of numbers.
Note: Any `None` in the list of numbers is considered a reset.
@param numbers (list) List of numbers
@return (list) Generated sequence
"""
sequence = []
for number in numbers:
if number == None:
sequence.append(number)
else:
pattern = self.patternMachine.get(number)
sequence.append(pattern)
return sequence |
def format_throughput(available, used=None):
""" Format the read/write throughput for display """
if used is None:
return str(available)
percent = float(used) / available
return "{0:.0f}/{1:.0f} ({2:.0%})".format(used, available, percent) | Format the read/write throughput for display | Below is the the instruction that describes the task:
### Input:
Format the read/write throughput for display
### Response:
def format_throughput(available, used=None):
""" Format the read/write throughput for display """
if used is None:
return str(available)
percent = float(used) / available
return "{0:.0f}/{1:.0f} ({2:.0%})".format(used, available, percent) |
def _build_unicode_character_database(self):
"""
Function for parsing the Unicode character data from the Unicode Character
Database (UCD) and generating a lookup table. For more info on the UCD,
see the following website: https://www.unicode.org/ucd/
"""
filename = "UnicodeData.txt"
current_dir = os.path.abspath(os.path.dirname(__file__))
tag = re.compile(r"<\w+?>")
with codecs.open(os.path.join(current_dir, filename), mode="r", encoding="utf-8") as fp:
for line in fp:
if not line.strip():
continue
data = line.strip().split(";")
# Replace the start/end range markers with their proper derived names.
if data[1].endswith((u"First>", u"Last>")) and _is_derived(int(data[0], 16)):
data[1] = _get_nr_prefix(int(data[0], 16))
if data[1].startswith("HANGUL SYLLABLE"): # For Hangul syllables, use naming rule NR1
data[1] += _get_hangul_syllable_name(int(data[0], 16))
else: # Others should use naming rule NR2
data[1] += data[0]
data[3] = int(data[3]) # Convert the Canonical Combining Class value into an int.
if data[5]: # Convert the contents of the decomposition into characters, preserving tag info.
data[5] = u" ".join([_hexstr_to_unichr(s) if not tag.match(s) else s for s in data[5].split()])
for i in [6, 7, 8]: # Convert the decimal, digit and numeric fields to either ints or fractions.
if data[i]:
if "/" in data[i]:
data[i] = Fraction(data[i])
else:
data[i] = int(data[i])
for i in [12, 13, 14]: # Convert the uppercase, lowercase and titlecase fields to characters.
if data[i]:
data[i] = _hexstr_to_unichr(data[i])
lookup_name = _uax44lm2transform(data[1])
uc_data = UnicodeCharacter(u"U+" + data[0], *data[1:])
self._unicode_character_database[int(data[0], 16)] = uc_data
self._name_database[lookup_name] = uc_data
# Fill out the "compressed" ranges of UnicodeData.txt i.e. fill out the remaining characters per the Name
# Derivation Rules. See the Unicode Standard, ch. 4, section 4.8, Unicode Name Property
for lookup_range, prefix_string in _nr_prefix_strings.items():
exemplar = self._unicode_character_database.__getitem__(lookup_range[0])
for item in lookup_range:
hex_code = _padded_hex(item)
new_name = prefix_string
if prefix_string.startswith("HANGUL SYLLABLE"): # For Hangul, use naming rule NR1
new_name += _get_hangul_syllable_name(item)
else: # Everything else uses naming rule NR2
new_name += hex_code
uc_data = exemplar._replace(code=u"U+" + hex_code, name=new_name)
self._unicode_character_database[item] = uc_data
self._name_database[_uax44lm2transform(new_name)] = uc_data | Function for parsing the Unicode character data from the Unicode Character
Database (UCD) and generating a lookup table. For more info on the UCD,
see the following website: https://www.unicode.org/ucd/ | Below is the the instruction that describes the task:
### Input:
Function for parsing the Unicode character data from the Unicode Character
Database (UCD) and generating a lookup table. For more info on the UCD,
see the following website: https://www.unicode.org/ucd/
### Response:
def _build_unicode_character_database(self):
"""
Function for parsing the Unicode character data from the Unicode Character
Database (UCD) and generating a lookup table. For more info on the UCD,
see the following website: https://www.unicode.org/ucd/
"""
filename = "UnicodeData.txt"
current_dir = os.path.abspath(os.path.dirname(__file__))
tag = re.compile(r"<\w+?>")
with codecs.open(os.path.join(current_dir, filename), mode="r", encoding="utf-8") as fp:
for line in fp:
if not line.strip():
continue
data = line.strip().split(";")
# Replace the start/end range markers with their proper derived names.
if data[1].endswith((u"First>", u"Last>")) and _is_derived(int(data[0], 16)):
data[1] = _get_nr_prefix(int(data[0], 16))
if data[1].startswith("HANGUL SYLLABLE"): # For Hangul syllables, use naming rule NR1
data[1] += _get_hangul_syllable_name(int(data[0], 16))
else: # Others should use naming rule NR2
data[1] += data[0]
data[3] = int(data[3]) # Convert the Canonical Combining Class value into an int.
if data[5]: # Convert the contents of the decomposition into characters, preserving tag info.
data[5] = u" ".join([_hexstr_to_unichr(s) if not tag.match(s) else s for s in data[5].split()])
for i in [6, 7, 8]: # Convert the decimal, digit and numeric fields to either ints or fractions.
if data[i]:
if "/" in data[i]:
data[i] = Fraction(data[i])
else:
data[i] = int(data[i])
for i in [12, 13, 14]: # Convert the uppercase, lowercase and titlecase fields to characters.
if data[i]:
data[i] = _hexstr_to_unichr(data[i])
lookup_name = _uax44lm2transform(data[1])
uc_data = UnicodeCharacter(u"U+" + data[0], *data[1:])
self._unicode_character_database[int(data[0], 16)] = uc_data
self._name_database[lookup_name] = uc_data
# Fill out the "compressed" ranges of UnicodeData.txt i.e. fill out the remaining characters per the Name
# Derivation Rules. See the Unicode Standard, ch. 4, section 4.8, Unicode Name Property
for lookup_range, prefix_string in _nr_prefix_strings.items():
exemplar = self._unicode_character_database.__getitem__(lookup_range[0])
for item in lookup_range:
hex_code = _padded_hex(item)
new_name = prefix_string
if prefix_string.startswith("HANGUL SYLLABLE"): # For Hangul, use naming rule NR1
new_name += _get_hangul_syllable_name(item)
else: # Everything else uses naming rule NR2
new_name += hex_code
uc_data = exemplar._replace(code=u"U+" + hex_code, name=new_name)
self._unicode_character_database[item] = uc_data
self._name_database[_uax44lm2transform(new_name)] = uc_data |
def create_partition(analysis_request, request, analyses, sample_type=None,
container=None, preservation=None, skip_fields=None,
remove_primary_analyses=True):
"""
Creates a partition for the analysis_request (primary) passed in
:param analysis_request: uid/brain/object of IAnalysisRequest type
:param request: the current request object
:param analyses: uids/brains/objects of IAnalysis type
:param sampletype: uid/brain/object of SampleType
:param container: uid/brain/object of Container
:param preservation: uid/brain/object of Preservation
:param skip_fields: names of fields to be skipped on copy from primary
:param remove_primary_analyses: removes the analyses from the parent
:return: the new partition
"""
partition_skip_fields = [
"Analyses",
"Attachment",
"Client",
"Profile",
"Profiles",
"RejectionReasons",
"Remarks",
"ResultsInterpretation",
"ResultsInterpretationDepts",
"Sample",
"Template",
"creation_date",
"id",
"modification_date",
"ParentAnalysisRequest",
"PrimaryAnalysisRequest",
]
if skip_fields:
partition_skip_fields.extend(skip_fields)
partition_skip_fields = list(set(partition_skip_fields))
# Copy field values from the primary analysis request
ar = api.get_object(analysis_request)
record = fields_to_dict(ar, partition_skip_fields)
# Update with values that are partition-specific
record.update({
"InternalUse": True,
"ParentAnalysisRequest": api.get_uid(ar),
})
if sample_type is not None:
record["SampleType"] = sample_type and api.get_uid(sample_type) or ""
if container is not None:
record["Container"] = container and api.get_uid(container) or ""
if preservation is not None:
record["Preservation"] = preservation and api.get_uid(preservation) or ""
# Create the Partition
client = ar.getClient()
analyses = list(set(map(api.get_object, analyses)))
services = map(lambda an: an.getAnalysisService(), analyses)
specs = ar.getSpecification()
specs = specs and specs.getResultsRange() or []
partition = create_analysisrequest(client, request=request, values=record,
analyses=services, specifications=specs)
# Remove analyses from the primary
if remove_primary_analyses:
analyses_ids = map(api.get_id, analyses)
ar.manage_delObjects(analyses_ids)
# Reindex Parent Analysis Request
ar.reindexObject(idxs=["isRootAncestor"])
# Manually set the Date Received to match with its parent. This is
# necessary because crar calls to processForm, so DateReceived is not
# set because the partition has not been received yet
partition.setDateReceived(ar.getDateReceived())
partition.reindexObject(idxs="getDateReceived")
# Force partition to same status as the primary
status = api.get_workflow_status_of(ar)
changeWorkflowState(partition, "bika_ar_workflow", status)
if IReceived.providedBy(ar):
alsoProvides(partition, IReceived)
# And initialize the analyses the partition contains. This is required
# here because the transition "initialize" of analyses rely on a guard,
# so the initialization can only be performed when the sample has been
# received (DateReceived is set)
ActionHandlerPool.get_instance().queue_pool()
for analysis in partition.getAnalyses(full_objects=True):
doActionFor(analysis, "initialize")
ActionHandlerPool.get_instance().resume()
return partition | Creates a partition for the analysis_request (primary) passed in
:param analysis_request: uid/brain/object of IAnalysisRequest type
:param request: the current request object
:param analyses: uids/brains/objects of IAnalysis type
:param sampletype: uid/brain/object of SampleType
:param container: uid/brain/object of Container
:param preservation: uid/brain/object of Preservation
:param skip_fields: names of fields to be skipped on copy from primary
:param remove_primary_analyses: removes the analyses from the parent
:return: the new partition | Below is the the instruction that describes the task:
### Input:
Creates a partition for the analysis_request (primary) passed in
:param analysis_request: uid/brain/object of IAnalysisRequest type
:param request: the current request object
:param analyses: uids/brains/objects of IAnalysis type
:param sampletype: uid/brain/object of SampleType
:param container: uid/brain/object of Container
:param preservation: uid/brain/object of Preservation
:param skip_fields: names of fields to be skipped on copy from primary
:param remove_primary_analyses: removes the analyses from the parent
:return: the new partition
### Response:
def create_partition(analysis_request, request, analyses, sample_type=None,
container=None, preservation=None, skip_fields=None,
remove_primary_analyses=True):
"""
Creates a partition for the analysis_request (primary) passed in
:param analysis_request: uid/brain/object of IAnalysisRequest type
:param request: the current request object
:param analyses: uids/brains/objects of IAnalysis type
:param sampletype: uid/brain/object of SampleType
:param container: uid/brain/object of Container
:param preservation: uid/brain/object of Preservation
:param skip_fields: names of fields to be skipped on copy from primary
:param remove_primary_analyses: removes the analyses from the parent
:return: the new partition
"""
partition_skip_fields = [
"Analyses",
"Attachment",
"Client",
"Profile",
"Profiles",
"RejectionReasons",
"Remarks",
"ResultsInterpretation",
"ResultsInterpretationDepts",
"Sample",
"Template",
"creation_date",
"id",
"modification_date",
"ParentAnalysisRequest",
"PrimaryAnalysisRequest",
]
if skip_fields:
partition_skip_fields.extend(skip_fields)
partition_skip_fields = list(set(partition_skip_fields))
# Copy field values from the primary analysis request
ar = api.get_object(analysis_request)
record = fields_to_dict(ar, partition_skip_fields)
# Update with values that are partition-specific
record.update({
"InternalUse": True,
"ParentAnalysisRequest": api.get_uid(ar),
})
if sample_type is not None:
record["SampleType"] = sample_type and api.get_uid(sample_type) or ""
if container is not None:
record["Container"] = container and api.get_uid(container) or ""
if preservation is not None:
record["Preservation"] = preservation and api.get_uid(preservation) or ""
# Create the Partition
client = ar.getClient()
analyses = list(set(map(api.get_object, analyses)))
services = map(lambda an: an.getAnalysisService(), analyses)
specs = ar.getSpecification()
specs = specs and specs.getResultsRange() or []
partition = create_analysisrequest(client, request=request, values=record,
analyses=services, specifications=specs)
# Remove analyses from the primary
if remove_primary_analyses:
analyses_ids = map(api.get_id, analyses)
ar.manage_delObjects(analyses_ids)
# Reindex Parent Analysis Request
ar.reindexObject(idxs=["isRootAncestor"])
# Manually set the Date Received to match with its parent. This is
# necessary because crar calls to processForm, so DateReceived is not
# set because the partition has not been received yet
partition.setDateReceived(ar.getDateReceived())
partition.reindexObject(idxs="getDateReceived")
# Force partition to same status as the primary
status = api.get_workflow_status_of(ar)
changeWorkflowState(partition, "bika_ar_workflow", status)
if IReceived.providedBy(ar):
alsoProvides(partition, IReceived)
# And initialize the analyses the partition contains. This is required
# here because the transition "initialize" of analyses rely on a guard,
# so the initialization can only be performed when the sample has been
# received (DateReceived is set)
ActionHandlerPool.get_instance().queue_pool()
for analysis in partition.getAnalyses(full_objects=True):
doActionFor(analysis, "initialize")
ActionHandlerPool.get_instance().resume()
return partition |
def _connect(self):
'''
Connect to F5
'''
try:
self.bigIP = f5.BIGIP(hostname=self.lb,
username=self.username,
password=self.password,
fromurl=True,
wsdls=['LocalLB.VirtualServer',
'LocalLB.Pool'])
except Exception:
raise Exception(
'Unable to connect to {0}'.format(self.lb)
)
return True | Connect to F5 | Below is the the instruction that describes the task:
### Input:
Connect to F5
### Response:
def _connect(self):
'''
Connect to F5
'''
try:
self.bigIP = f5.BIGIP(hostname=self.lb,
username=self.username,
password=self.password,
fromurl=True,
wsdls=['LocalLB.VirtualServer',
'LocalLB.Pool'])
except Exception:
raise Exception(
'Unable to connect to {0}'.format(self.lb)
)
return True |
def __multi_arity_fn_to_py_ast( # pylint: disable=too-many-locals
ctx: GeneratorContext,
node: Fn,
methods: Collection[FnMethod],
def_name: Optional[str] = None,
meta_node: Optional[MetaNode] = None,
) -> GeneratedPyAST:
"""Return a Python AST node for a function with multiple arities."""
assert node.op == NodeOp.FN
assert all([method.op == NodeOp.FN_METHOD for method in methods])
lisp_fn_name = node.local.name if node.local is not None else None
py_fn_name = __fn_name(lisp_fn_name) if def_name is None else munge(def_name)
py_fn_node = ast.AsyncFunctionDef if node.is_async else ast.FunctionDef
arity_to_name = {}
rest_arity_name: Optional[str] = None
fn_defs = []
for method in methods:
arity_name = f"{py_fn_name}__arity{'_rest' if method.is_variadic else method.fixed_arity}"
if method.is_variadic:
rest_arity_name = arity_name
else:
arity_to_name[method.fixed_arity] = arity_name
with ctx.new_symbol_table(arity_name), ctx.new_recur_point(
method.loop_id, RecurType.FN, is_variadic=node.is_variadic
):
# Allow named anonymous functions to recursively call themselves
if lisp_fn_name is not None:
ctx.symbol_table.new_symbol(
sym.symbol(lisp_fn_name), py_fn_name, LocalType.FN
)
fn_args, varg, fn_body_ast = __fn_args_to_py_ast(
ctx, method.params, method.body
)
fn_defs.append(
py_fn_node(
name=arity_name,
args=ast.arguments(
args=fn_args,
kwarg=None,
vararg=varg,
kwonlyargs=[],
defaults=[],
kw_defaults=[],
),
body=fn_body_ast,
decorator_list=[_TRAMPOLINE_FN_NAME]
if ctx.recur_point.has_recur
else [],
returns=None,
)
)
dispatch_fn_ast = __multi_arity_dispatch_fn(
ctx,
py_fn_name,
arity_to_name,
default_name=rest_arity_name,
max_fixed_arity=node.max_fixed_arity,
meta_node=meta_node,
is_async=node.is_async,
)
return GeneratedPyAST(
node=dispatch_fn_ast.node,
dependencies=list(chain(fn_defs, dispatch_fn_ast.dependencies)),
) | Return a Python AST node for a function with multiple arities. | Below is the the instruction that describes the task:
### Input:
Return a Python AST node for a function with multiple arities.
### Response:
def __multi_arity_fn_to_py_ast( # pylint: disable=too-many-locals
ctx: GeneratorContext,
node: Fn,
methods: Collection[FnMethod],
def_name: Optional[str] = None,
meta_node: Optional[MetaNode] = None,
) -> GeneratedPyAST:
"""Return a Python AST node for a function with multiple arities."""
assert node.op == NodeOp.FN
assert all([method.op == NodeOp.FN_METHOD for method in methods])
lisp_fn_name = node.local.name if node.local is not None else None
py_fn_name = __fn_name(lisp_fn_name) if def_name is None else munge(def_name)
py_fn_node = ast.AsyncFunctionDef if node.is_async else ast.FunctionDef
arity_to_name = {}
rest_arity_name: Optional[str] = None
fn_defs = []
for method in methods:
arity_name = f"{py_fn_name}__arity{'_rest' if method.is_variadic else method.fixed_arity}"
if method.is_variadic:
rest_arity_name = arity_name
else:
arity_to_name[method.fixed_arity] = arity_name
with ctx.new_symbol_table(arity_name), ctx.new_recur_point(
method.loop_id, RecurType.FN, is_variadic=node.is_variadic
):
# Allow named anonymous functions to recursively call themselves
if lisp_fn_name is not None:
ctx.symbol_table.new_symbol(
sym.symbol(lisp_fn_name), py_fn_name, LocalType.FN
)
fn_args, varg, fn_body_ast = __fn_args_to_py_ast(
ctx, method.params, method.body
)
fn_defs.append(
py_fn_node(
name=arity_name,
args=ast.arguments(
args=fn_args,
kwarg=None,
vararg=varg,
kwonlyargs=[],
defaults=[],
kw_defaults=[],
),
body=fn_body_ast,
decorator_list=[_TRAMPOLINE_FN_NAME]
if ctx.recur_point.has_recur
else [],
returns=None,
)
)
dispatch_fn_ast = __multi_arity_dispatch_fn(
ctx,
py_fn_name,
arity_to_name,
default_name=rest_arity_name,
max_fixed_arity=node.max_fixed_arity,
meta_node=meta_node,
is_async=node.is_async,
)
return GeneratedPyAST(
node=dispatch_fn_ast.node,
dependencies=list(chain(fn_defs, dispatch_fn_ast.dependencies)),
) |
def line_to_args(self, line):
"""This will convert the line passed into the do_xxx functions into
an array of arguments and handle the Output Redirection Operator.
"""
# Note: using shlex.split causes quoted substrings to stay together.
args = shlex.split(line)
self.redirect_filename = ''
self.redirect_dev = None
redirect_index = -1
if '>' in args:
redirect_index = args.index('>')
elif '>>' in args:
redirect_index = args.index('>>')
if redirect_index >= 0:
if redirect_index + 1 >= len(args):
raise ShellError("> requires a filename")
self.redirect_filename = resolve_path(args[redirect_index + 1])
rmode = auto(get_mode, os.path.dirname(self.redirect_filename))
if not mode_isdir(rmode):
raise ShellError("Unable to redirect to '%s', directory doesn't exist" %
self.redirect_filename)
if args[redirect_index] == '>':
self.redirect_mode = 'w'
if DEBUG:
print('Redirecting (write) to', self.redirect_filename)
else:
self.redirect_mode = 'a'
if DEBUG:
print('Redirecting (append) to', self.redirect_filename)
self.redirect_dev, self.redirect_filename = get_dev_and_path(self.redirect_filename)
try:
if self.redirect_dev is None:
self.stdout = SmartFile(open(self.redirect_filename, self.redirect_mode))
else:
# Redirecting to a remote device. We collect the results locally
# and copy them to the remote device at the end of the command.
self.stdout = SmartFile(tempfile.TemporaryFile(mode='w+'))
except OSError as err:
raise ShellError(err)
del args[redirect_index + 1]
del args[redirect_index]
curr_cmd, _, _ = self.parseline(self.lastcmd)
parser = self.create_argparser(curr_cmd)
if parser:
args = parser.parse_args(args)
return args | This will convert the line passed into the do_xxx functions into
an array of arguments and handle the Output Redirection Operator. | Below is the the instruction that describes the task:
### Input:
This will convert the line passed into the do_xxx functions into
an array of arguments and handle the Output Redirection Operator.
### Response:
def line_to_args(self, line):
"""This will convert the line passed into the do_xxx functions into
an array of arguments and handle the Output Redirection Operator.
"""
# Note: using shlex.split causes quoted substrings to stay together.
args = shlex.split(line)
self.redirect_filename = ''
self.redirect_dev = None
redirect_index = -1
if '>' in args:
redirect_index = args.index('>')
elif '>>' in args:
redirect_index = args.index('>>')
if redirect_index >= 0:
if redirect_index + 1 >= len(args):
raise ShellError("> requires a filename")
self.redirect_filename = resolve_path(args[redirect_index + 1])
rmode = auto(get_mode, os.path.dirname(self.redirect_filename))
if not mode_isdir(rmode):
raise ShellError("Unable to redirect to '%s', directory doesn't exist" %
self.redirect_filename)
if args[redirect_index] == '>':
self.redirect_mode = 'w'
if DEBUG:
print('Redirecting (write) to', self.redirect_filename)
else:
self.redirect_mode = 'a'
if DEBUG:
print('Redirecting (append) to', self.redirect_filename)
self.redirect_dev, self.redirect_filename = get_dev_and_path(self.redirect_filename)
try:
if self.redirect_dev is None:
self.stdout = SmartFile(open(self.redirect_filename, self.redirect_mode))
else:
# Redirecting to a remote device. We collect the results locally
# and copy them to the remote device at the end of the command.
self.stdout = SmartFile(tempfile.TemporaryFile(mode='w+'))
except OSError as err:
raise ShellError(err)
del args[redirect_index + 1]
del args[redirect_index]
curr_cmd, _, _ = self.parseline(self.lastcmd)
parser = self.create_argparser(curr_cmd)
if parser:
args = parser.parse_args(args)
return args |
def step(self):
"""Do a single iteration over all cbpdn and ccmod steps. Those that
are not coupled on the K axis are performed in parallel."""
# If the nproc parameter of __init__ is zero, just iterate
# over the K consensus instances instead of using
# multiprocessing to do the computations in parallel. This is
# useful for debugging and timing comparisons.
if self.nproc == 0:
for k in range(self.xstep.cri.K):
md_step_group(k)
else:
self.pool.map(md_step_group, range(self.xstep.cri.K))
ccmodmd_ystep()
ccmodmd_ustep()
cbpdnmd_setdict() | Do a single iteration over all cbpdn and ccmod steps. Those that
are not coupled on the K axis are performed in parallel. | Below is the the instruction that describes the task:
### Input:
Do a single iteration over all cbpdn and ccmod steps. Those that
are not coupled on the K axis are performed in parallel.
### Response:
def step(self):
"""Do a single iteration over all cbpdn and ccmod steps. Those that
are not coupled on the K axis are performed in parallel."""
# If the nproc parameter of __init__ is zero, just iterate
# over the K consensus instances instead of using
# multiprocessing to do the computations in parallel. This is
# useful for debugging and timing comparisons.
if self.nproc == 0:
for k in range(self.xstep.cri.K):
md_step_group(k)
else:
self.pool.map(md_step_group, range(self.xstep.cri.K))
ccmodmd_ystep()
ccmodmd_ustep()
cbpdnmd_setdict() |
def _check_package(pkg_xml, zipfilename, zf):
"""
Helper for ``build_index()``: Perform some checks to make sure that
the given package is consistent.
"""
# The filename must patch the id given in the XML file.
uid = os.path.splitext(os.path.split(zipfilename)[1])[0]
if pkg_xml.get('id') != uid:
raise ValueError('package identifier mismatch (%s vs %s)' %
(pkg_xml.get('id'), uid))
# Zip file must expand to a subdir whose name matches uid.
if sum( (name!=uid and not name.startswith(uid+'/'))
for name in zf.namelist() ):
raise ValueError('Zipfile %s.zip does not expand to a single '
'subdirectory %s/' % (uid, uid)) | Helper for ``build_index()``: Perform some checks to make sure that
the given package is consistent. | Below is the the instruction that describes the task:
### Input:
Helper for ``build_index()``: Perform some checks to make sure that
the given package is consistent.
### Response:
def _check_package(pkg_xml, zipfilename, zf):
"""
Helper for ``build_index()``: Perform some checks to make sure that
the given package is consistent.
"""
# The filename must patch the id given in the XML file.
uid = os.path.splitext(os.path.split(zipfilename)[1])[0]
if pkg_xml.get('id') != uid:
raise ValueError('package identifier mismatch (%s vs %s)' %
(pkg_xml.get('id'), uid))
# Zip file must expand to a subdir whose name matches uid.
if sum( (name!=uid and not name.startswith(uid+'/'))
for name in zf.namelist() ):
raise ValueError('Zipfile %s.zip does not expand to a single '
'subdirectory %s/' % (uid, uid)) |
def run(self):
"""
Consume message from the Advanced Message Queuing Protocol
(AMPQ) compliant broker as soon as they are queued, and call
the instance method ``__on_message_received`` that the
inheriting class must have implemented.
"""
self._setUp()
naming = 'skunkworks.lbs.%s.%s.%s' % (settings.ENVIRONMENT_STAGE, \
self.service_name.lower(), self.message_type.lower())
self.channel.queue_declare(queue=naming, durable=True, exclusive=False, auto_delete=False)
self.channel.exchange_declare(exchange=naming, type="direct", durable=True, auto_delete=False,)
self.channel.queue_bind(queue=naming, exchange=naming, routing_key=naming)
self.channel.basic_consume(queue=self.queue, no_ack=True,
callback=self._on_message_received,
consumer_tag=self.__class__.__name__)
try:
while True:
self.channel.wait()
finally:
self.channel.basic_cancel(self.__class__.__name__) | Consume message from the Advanced Message Queuing Protocol
(AMPQ) compliant broker as soon as they are queued, and call
the instance method ``__on_message_received`` that the
inheriting class must have implemented. | Below is the the instruction that describes the task:
### Input:
Consume message from the Advanced Message Queuing Protocol
(AMPQ) compliant broker as soon as they are queued, and call
the instance method ``__on_message_received`` that the
inheriting class must have implemented.
### Response:
def run(self):
"""
Consume message from the Advanced Message Queuing Protocol
(AMPQ) compliant broker as soon as they are queued, and call
the instance method ``__on_message_received`` that the
inheriting class must have implemented.
"""
self._setUp()
naming = 'skunkworks.lbs.%s.%s.%s' % (settings.ENVIRONMENT_STAGE, \
self.service_name.lower(), self.message_type.lower())
self.channel.queue_declare(queue=naming, durable=True, exclusive=False, auto_delete=False)
self.channel.exchange_declare(exchange=naming, type="direct", durable=True, auto_delete=False,)
self.channel.queue_bind(queue=naming, exchange=naming, routing_key=naming)
self.channel.basic_consume(queue=self.queue, no_ack=True,
callback=self._on_message_received,
consumer_tag=self.__class__.__name__)
try:
while True:
self.channel.wait()
finally:
self.channel.basic_cancel(self.__class__.__name__) |
def make_aliased_type(cls, other_base):
"""
Factory for making Aliased{Filter,Factor,Classifier}.
"""
docstring = dedent(
"""
A {t} that names another {t}.
Parameters
----------
term : {t}
{{name}}
"""
).format(t=other_base.__name__)
doc = format_docstring(
owner_name=other_base.__name__,
docstring=docstring,
formatters={'name': PIPELINE_ALIAS_NAME_DOC},
)
return type(
'Aliased' + other_base.__name__,
(cls, other_base),
{'__doc__': doc,
'__module__': other_base.__module__},
) | Factory for making Aliased{Filter,Factor,Classifier}. | Below is the the instruction that describes the task:
### Input:
Factory for making Aliased{Filter,Factor,Classifier}.
### Response:
def make_aliased_type(cls, other_base):
"""
Factory for making Aliased{Filter,Factor,Classifier}.
"""
docstring = dedent(
"""
A {t} that names another {t}.
Parameters
----------
term : {t}
{{name}}
"""
).format(t=other_base.__name__)
doc = format_docstring(
owner_name=other_base.__name__,
docstring=docstring,
formatters={'name': PIPELINE_ALIAS_NAME_DOC},
)
return type(
'Aliased' + other_base.__name__,
(cls, other_base),
{'__doc__': doc,
'__module__': other_base.__module__},
) |
def _h2ab_s(s):
"""Define the saturated line boundary between Region 4 and 2a-2b, h=f(s)
Parameters
----------
s : float
Specific entropy, [kJ/kgK]
Returns
-------
h : float
Specific enthalpy, [kJ/kg]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 5.85 ≤ s ≤ s"(273.15K)
References
----------
IAPWS, Revised Supplementary Release on Backward Equations p(h,s) for
Region 3, Equations as a Function of h and s for the Region Boundaries, and
an Equation Tsat(h,s) for Region 4 of the IAPWS Industrial Formulation 1997
for the Thermodynamic Properties of Water and Steam,
http://www.iapws.org/relguide/Supp-phs3-2014.pdf. Eq 5
Examples
--------
>>> _h2ab_s(7)
2723.729985
>>> _h2ab_s(9)
2511.861477
"""
# Check input parameters
if s < 5.85 or s > 9.155759395:
raise NotImplementedError("Incoming out of bound")
sigma1 = s/5.21
sigma2 = s/9.2
I = [1, 1, 2, 2, 4, 4, 7, 8, 8, 10, 12, 12, 18, 20, 24, 28, 28, 28, 28, 28,
32, 32, 32, 32, 32, 36, 36, 36, 36, 36]
J = [8, 24, 4, 32, 1, 2, 7, 5, 12, 1, 0, 7, 10, 12, 32, 8, 12, 20, 22, 24,
2, 7, 12, 14, 24, 10, 12, 20, 22, 28]
n = [-0.524581170928788e3, -0.926947218142218e7, -0.237385107491666e3,
0.210770155812776e11, -0.239494562010986e2, 0.221802480294197e3,
-0.510472533393438e7, 0.124981396109147e7, 0.200008436996201e10,
-0.815158509791035e3, -0.157612685637523e3, -0.114200422332791e11,
0.662364680776872e16, -0.227622818296144e19, -0.171048081348406e32,
0.660788766938091e16, 0.166320055886021e23, -0.218003784381501e30,
-0.787276140295618e30, 0.151062329700346e32, 0.795732170300541e7,
0.131957647355347e16, -0.325097068299140e24, -0.418600611419248e26,
0.297478906557467e35, -0.953588761745473e20, 0.166957699620939e25,
-0.175407764869978e33, 0.347581490626396e35, -0.710971318427851e39]
suma = 0
for i, j, ni in zip(I, J, n):
suma += ni * (1/sigma1-0.513)**i * (sigma2-0.524)**j
return 2800*exp(suma) | Define the saturated line boundary between Region 4 and 2a-2b, h=f(s)
Parameters
----------
s : float
Specific entropy, [kJ/kgK]
Returns
-------
h : float
Specific enthalpy, [kJ/kg]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 5.85 ≤ s ≤ s"(273.15K)
References
----------
IAPWS, Revised Supplementary Release on Backward Equations p(h,s) for
Region 3, Equations as a Function of h and s for the Region Boundaries, and
an Equation Tsat(h,s) for Region 4 of the IAPWS Industrial Formulation 1997
for the Thermodynamic Properties of Water and Steam,
http://www.iapws.org/relguide/Supp-phs3-2014.pdf. Eq 5
Examples
--------
>>> _h2ab_s(7)
2723.729985
>>> _h2ab_s(9)
2511.861477 | Below is the the instruction that describes the task:
### Input:
Define the saturated line boundary between Region 4 and 2a-2b, h=f(s)
Parameters
----------
s : float
Specific entropy, [kJ/kgK]
Returns
-------
h : float
Specific enthalpy, [kJ/kg]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 5.85 ≤ s ≤ s"(273.15K)
References
----------
IAPWS, Revised Supplementary Release on Backward Equations p(h,s) for
Region 3, Equations as a Function of h and s for the Region Boundaries, and
an Equation Tsat(h,s) for Region 4 of the IAPWS Industrial Formulation 1997
for the Thermodynamic Properties of Water and Steam,
http://www.iapws.org/relguide/Supp-phs3-2014.pdf. Eq 5
Examples
--------
>>> _h2ab_s(7)
2723.729985
>>> _h2ab_s(9)
2511.861477
### Response:
def _h2ab_s(s):
"""Define the saturated line boundary between Region 4 and 2a-2b, h=f(s)
Parameters
----------
s : float
Specific entropy, [kJ/kgK]
Returns
-------
h : float
Specific enthalpy, [kJ/kg]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 5.85 ≤ s ≤ s"(273.15K)
References
----------
IAPWS, Revised Supplementary Release on Backward Equations p(h,s) for
Region 3, Equations as a Function of h and s for the Region Boundaries, and
an Equation Tsat(h,s) for Region 4 of the IAPWS Industrial Formulation 1997
for the Thermodynamic Properties of Water and Steam,
http://www.iapws.org/relguide/Supp-phs3-2014.pdf. Eq 5
Examples
--------
>>> _h2ab_s(7)
2723.729985
>>> _h2ab_s(9)
2511.861477
"""
# Check input parameters
if s < 5.85 or s > 9.155759395:
raise NotImplementedError("Incoming out of bound")
sigma1 = s/5.21
sigma2 = s/9.2
I = [1, 1, 2, 2, 4, 4, 7, 8, 8, 10, 12, 12, 18, 20, 24, 28, 28, 28, 28, 28,
32, 32, 32, 32, 32, 36, 36, 36, 36, 36]
J = [8, 24, 4, 32, 1, 2, 7, 5, 12, 1, 0, 7, 10, 12, 32, 8, 12, 20, 22, 24,
2, 7, 12, 14, 24, 10, 12, 20, 22, 28]
n = [-0.524581170928788e3, -0.926947218142218e7, -0.237385107491666e3,
0.210770155812776e11, -0.239494562010986e2, 0.221802480294197e3,
-0.510472533393438e7, 0.124981396109147e7, 0.200008436996201e10,
-0.815158509791035e3, -0.157612685637523e3, -0.114200422332791e11,
0.662364680776872e16, -0.227622818296144e19, -0.171048081348406e32,
0.660788766938091e16, 0.166320055886021e23, -0.218003784381501e30,
-0.787276140295618e30, 0.151062329700346e32, 0.795732170300541e7,
0.131957647355347e16, -0.325097068299140e24, -0.418600611419248e26,
0.297478906557467e35, -0.953588761745473e20, 0.166957699620939e25,
-0.175407764869978e33, 0.347581490626396e35, -0.710971318427851e39]
suma = 0
for i, j, ni in zip(I, J, n):
suma += ni * (1/sigma1-0.513)**i * (sigma2-0.524)**j
return 2800*exp(suma) |
def mean(self, start=None, end=None, mask=None):
"""This calculated the average value of the time series over the given
time range from `start` to `end`, when `mask` is truthy.
"""
return self.distribution(start=start, end=end, mask=mask).mean() | This calculated the average value of the time series over the given
time range from `start` to `end`, when `mask` is truthy. | Below is the the instruction that describes the task:
### Input:
This calculated the average value of the time series over the given
time range from `start` to `end`, when `mask` is truthy.
### Response:
def mean(self, start=None, end=None, mask=None):
"""This calculated the average value of the time series over the given
time range from `start` to `end`, when `mask` is truthy.
"""
return self.distribution(start=start, end=end, mask=mask).mean() |
def task_dropped(self):
""" Track (if required) drop event and do the same job as :meth:`.WScheduleRecord.task_dropped`
method do
:return: None
"""
tracker = self.task().tracker_storage()
if tracker is not None and self.track_drop() is True:
details = self.task().event_details(WTrackerEvents.drop)
tracker.register_drop(self.task(), event_details=details)
WScheduleRecord.task_dropped(self) | Track (if required) drop event and do the same job as :meth:`.WScheduleRecord.task_dropped`
method do
:return: None | Below is the the instruction that describes the task:
### Input:
Track (if required) drop event and do the same job as :meth:`.WScheduleRecord.task_dropped`
method do
:return: None
### Response:
def task_dropped(self):
""" Track (if required) drop event and do the same job as :meth:`.WScheduleRecord.task_dropped`
method do
:return: None
"""
tracker = self.task().tracker_storage()
if tracker is not None and self.track_drop() is True:
details = self.task().event_details(WTrackerEvents.drop)
tracker.register_drop(self.task(), event_details=details)
WScheduleRecord.task_dropped(self) |
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap | Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy. | Below is the the instruction that describes the task:
### Input:
Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy.
### Response:
def deepcopy(self):
"""
Create a deep copy of the segmentation map object.
Returns
-------
imgaug.SegmentationMapOnImage
Deep copy.
"""
segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)
segmap.input_was = self.input_was
return segmap |
def parse_bookmark_json (data):
"""Parse complete JSON data for Chromium Bookmarks."""
for entry in data["roots"].values():
for url, name in parse_bookmark_node(entry):
yield url, name | Parse complete JSON data for Chromium Bookmarks. | Below is the the instruction that describes the task:
### Input:
Parse complete JSON data for Chromium Bookmarks.
### Response:
def parse_bookmark_json (data):
"""Parse complete JSON data for Chromium Bookmarks."""
for entry in data["roots"].values():
for url, name in parse_bookmark_node(entry):
yield url, name |
def resolve_sound(self, sound):
"""Function tries to identify a sound in the data.
Notes
-----
The function tries to resolve sounds to take a sound with less complex
features in order to yield the next approximate sound class, if the
transcription data are sound classes.
"""
sound = sound if isinstance(sound, Symbol) else self.system[sound]
if sound.name in self.data:
return self.data[sound.name]['grapheme']
if not sound.type == 'unknownsound':
if sound.type in ['diphthong', 'cluster']:
return self.resolve_sound(sound.from_sound)
name = [
s for s in sound.name.split(' ') if
self.system._feature_values.get(s, '') not in
['laminality', 'ejection', 'tone']]
while len(name) >= 4:
sound = self.system.get(' '.join(name))
if sound and sound.name in self.data:
return self.resolve_sound(sound)
name.pop(0)
raise KeyError(":sc:resolve_sound: No sound could be found.") | Function tries to identify a sound in the data.
Notes
-----
The function tries to resolve sounds to take a sound with less complex
features in order to yield the next approximate sound class, if the
transcription data are sound classes. | Below is the the instruction that describes the task:
### Input:
Function tries to identify a sound in the data.
Notes
-----
The function tries to resolve sounds to take a sound with less complex
features in order to yield the next approximate sound class, if the
transcription data are sound classes.
### Response:
def resolve_sound(self, sound):
"""Function tries to identify a sound in the data.
Notes
-----
The function tries to resolve sounds to take a sound with less complex
features in order to yield the next approximate sound class, if the
transcription data are sound classes.
"""
sound = sound if isinstance(sound, Symbol) else self.system[sound]
if sound.name in self.data:
return self.data[sound.name]['grapheme']
if not sound.type == 'unknownsound':
if sound.type in ['diphthong', 'cluster']:
return self.resolve_sound(sound.from_sound)
name = [
s for s in sound.name.split(' ') if
self.system._feature_values.get(s, '') not in
['laminality', 'ejection', 'tone']]
while len(name) >= 4:
sound = self.system.get(' '.join(name))
if sound and sound.name in self.data:
return self.resolve_sound(sound)
name.pop(0)
raise KeyError(":sc:resolve_sound: No sound could be found.") |
def get_DIR(self, WD=None):
"""
open dialog box for choosing a working directory
"""
if "-WD" in sys.argv and FIRST_RUN:
ind = sys.argv.index('-WD')
self.WD = sys.argv[ind + 1]
elif not WD: # if no arg was passed in for WD, make a dialog to choose one
dialog = wx.DirDialog(None, "Choose a directory:", defaultPath=self.currentDirectory,
style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR)
ok = self.show_dlg(dialog)
if ok == wx.ID_OK:
self.WD = dialog.GetPath()
else:
self.WD = os.getcwd()
dialog.Destroy()
self.WD = os.path.realpath(self.WD)
# name measurement file
if self.data_model == 3:
meas_file = 'measurements.txt'
else:
meas_file = 'magic_measurements.txt'
self.magic_file = os.path.join(self.WD, meas_file)
# intialize GUI_log
self.GUI_log = open(os.path.join(self.WD, "thellier_GUI.log"), 'w+')
self.GUI_log.write("starting...\n")
self.GUI_log.close()
self.GUI_log = open(os.path.join(self.WD, "thellier_GUI.log"), 'a')
os.chdir(self.WD)
self.WD = os.getcwd() | open dialog box for choosing a working directory | Below is the the instruction that describes the task:
### Input:
open dialog box for choosing a working directory
### Response:
def get_DIR(self, WD=None):
"""
open dialog box for choosing a working directory
"""
if "-WD" in sys.argv and FIRST_RUN:
ind = sys.argv.index('-WD')
self.WD = sys.argv[ind + 1]
elif not WD: # if no arg was passed in for WD, make a dialog to choose one
dialog = wx.DirDialog(None, "Choose a directory:", defaultPath=self.currentDirectory,
style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR)
ok = self.show_dlg(dialog)
if ok == wx.ID_OK:
self.WD = dialog.GetPath()
else:
self.WD = os.getcwd()
dialog.Destroy()
self.WD = os.path.realpath(self.WD)
# name measurement file
if self.data_model == 3:
meas_file = 'measurements.txt'
else:
meas_file = 'magic_measurements.txt'
self.magic_file = os.path.join(self.WD, meas_file)
# intialize GUI_log
self.GUI_log = open(os.path.join(self.WD, "thellier_GUI.log"), 'w+')
self.GUI_log.write("starting...\n")
self.GUI_log.close()
self.GUI_log = open(os.path.join(self.WD, "thellier_GUI.log"), 'a')
os.chdir(self.WD)
self.WD = os.getcwd() |
def populate_translation_fields(sender, kwargs):
"""
When models are created or loaded from fixtures, replicates values
provided for translatable fields to some / all empty translation fields,
according to the current population mode.
Population is performed only on keys (field names) present in kwargs.
Nothing is returned, but passed kwargs dictionary is altered.
With ``mode`` set to:
-- ``all``: fills all translation fields, skipping just those for
which a translated value is also provided;
-- ``default``: fills only the default translation (unless it is
additionally provided);
-- ``required``: like ``default``, but only if the original field is
non-nullable;
At least the ``required`` mode should be used when loading untranslated
fixtures to keep the database consistent (note that Django management
commands are normally forced to run with hardcoded ``en-us`` language
active). The ``default`` mode is useful if you need to ensure fallback
values are available, and ``all`` if you need to have all translations
defined (for example to make lookups / filtering without resorting to
query fallbacks).
"""
populate = mt_settings.AUTO_POPULATE
if not populate:
return
if populate is True:
# What was meant by ``True`` is now called ``all``.
populate = 'all'
opts = translator.get_options_for_model(sender)
for key, val in list(kwargs.items()):
if key in opts.fields:
if populate == 'all':
# Set the value for every language.
for translation_field in opts.fields[key]:
kwargs.setdefault(translation_field.name, val)
elif populate == 'default':
default = build_localized_fieldname(key, mt_settings.DEFAULT_LANGUAGE)
kwargs.setdefault(default, val)
elif populate == 'required':
default = build_localized_fieldname(key, mt_settings.DEFAULT_LANGUAGE)
if not sender._meta.get_field(key).null:
kwargs.setdefault(default, val)
else:
raise AttributeError("Unknown population mode '%s'." % populate) | When models are created or loaded from fixtures, replicates values
provided for translatable fields to some / all empty translation fields,
according to the current population mode.
Population is performed only on keys (field names) present in kwargs.
Nothing is returned, but passed kwargs dictionary is altered.
With ``mode`` set to:
-- ``all``: fills all translation fields, skipping just those for
which a translated value is also provided;
-- ``default``: fills only the default translation (unless it is
additionally provided);
-- ``required``: like ``default``, but only if the original field is
non-nullable;
At least the ``required`` mode should be used when loading untranslated
fixtures to keep the database consistent (note that Django management
commands are normally forced to run with hardcoded ``en-us`` language
active). The ``default`` mode is useful if you need to ensure fallback
values are available, and ``all`` if you need to have all translations
defined (for example to make lookups / filtering without resorting to
query fallbacks). | Below is the the instruction that describes the task:
### Input:
When models are created or loaded from fixtures, replicates values
provided for translatable fields to some / all empty translation fields,
according to the current population mode.
Population is performed only on keys (field names) present in kwargs.
Nothing is returned, but passed kwargs dictionary is altered.
With ``mode`` set to:
-- ``all``: fills all translation fields, skipping just those for
which a translated value is also provided;
-- ``default``: fills only the default translation (unless it is
additionally provided);
-- ``required``: like ``default``, but only if the original field is
non-nullable;
At least the ``required`` mode should be used when loading untranslated
fixtures to keep the database consistent (note that Django management
commands are normally forced to run with hardcoded ``en-us`` language
active). The ``default`` mode is useful if you need to ensure fallback
values are available, and ``all`` if you need to have all translations
defined (for example to make lookups / filtering without resorting to
query fallbacks).
### Response:
def populate_translation_fields(sender, kwargs):
"""
When models are created or loaded from fixtures, replicates values
provided for translatable fields to some / all empty translation fields,
according to the current population mode.
Population is performed only on keys (field names) present in kwargs.
Nothing is returned, but passed kwargs dictionary is altered.
With ``mode`` set to:
-- ``all``: fills all translation fields, skipping just those for
which a translated value is also provided;
-- ``default``: fills only the default translation (unless it is
additionally provided);
-- ``required``: like ``default``, but only if the original field is
non-nullable;
At least the ``required`` mode should be used when loading untranslated
fixtures to keep the database consistent (note that Django management
commands are normally forced to run with hardcoded ``en-us`` language
active). The ``default`` mode is useful if you need to ensure fallback
values are available, and ``all`` if you need to have all translations
defined (for example to make lookups / filtering without resorting to
query fallbacks).
"""
populate = mt_settings.AUTO_POPULATE
if not populate:
return
if populate is True:
# What was meant by ``True`` is now called ``all``.
populate = 'all'
opts = translator.get_options_for_model(sender)
for key, val in list(kwargs.items()):
if key in opts.fields:
if populate == 'all':
# Set the value for every language.
for translation_field in opts.fields[key]:
kwargs.setdefault(translation_field.name, val)
elif populate == 'default':
default = build_localized_fieldname(key, mt_settings.DEFAULT_LANGUAGE)
kwargs.setdefault(default, val)
elif populate == 'required':
default = build_localized_fieldname(key, mt_settings.DEFAULT_LANGUAGE)
if not sender._meta.get_field(key).null:
kwargs.setdefault(default, val)
else:
raise AttributeError("Unknown population mode '%s'." % populate) |
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer) | Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024) | Below is the the instruction that describes the task:
### Input:
Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
### Response:
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer) |
def _ends_with(self, string_to_edit, end): # pylint: disable=no-self-use
"""
Check if string ends with characters in end, if not merge end to string.
:param string_to_edit: string to check and edit.
:param end: str
:return: string_to_edit or string_to_edit + end
"""
if not string_to_edit.endswith(end):
return string_to_edit + end
return string_to_edit | Check if string ends with characters in end, if not merge end to string.
:param string_to_edit: string to check and edit.
:param end: str
:return: string_to_edit or string_to_edit + end | Below is the the instruction that describes the task:
### Input:
Check if string ends with characters in end, if not merge end to string.
:param string_to_edit: string to check and edit.
:param end: str
:return: string_to_edit or string_to_edit + end
### Response:
def _ends_with(self, string_to_edit, end): # pylint: disable=no-self-use
"""
Check if string ends with characters in end, if not merge end to string.
:param string_to_edit: string to check and edit.
:param end: str
:return: string_to_edit or string_to_edit + end
"""
if not string_to_edit.endswith(end):
return string_to_edit + end
return string_to_edit |
def roll_dice(self): # Roll dice with current roll
"""
Rolls dicebag and sets last_roll and last_explanation to roll results
:return: Roll results.
"""
roll = roll_dice(self.roll, floats=self.floats, functions=self.functions)
self._last_roll = roll[0]
self._last_explanation = roll[1]
return self.last_roll, self.last_explanation | Rolls dicebag and sets last_roll and last_explanation to roll results
:return: Roll results. | Below is the the instruction that describes the task:
### Input:
Rolls dicebag and sets last_roll and last_explanation to roll results
:return: Roll results.
### Response:
def roll_dice(self): # Roll dice with current roll
"""
Rolls dicebag and sets last_roll and last_explanation to roll results
:return: Roll results.
"""
roll = roll_dice(self.roll, floats=self.floats, functions=self.functions)
self._last_roll = roll[0]
self._last_explanation = roll[1]
return self.last_roll, self.last_explanation |
def check_perms(self, perms='0600,0400'):
"""Check and enforce the permissions of the config file.
Enforce permission on a provided configuration file. This will check
and see if the permission are set based on the permission octet as
set in the ``perms`` value. ``perms`` is a comma separated list
of acceptable perms in octal form. Defaults permissions to, 0600 and
0400.
:param perms: ``str``
"""
confpath = os.path.realpath(self.config_file)
mode = stat.S_IMODE(os.stat(confpath).st_mode)
if not any([mode == int(i, 8) for i in perms.split(',')]):
msg = (
'To use a configuration file the permissions'
' need to be any of the following "%s"' % perms
)
self.log.fatal(msg)
raise SystemExit(msg)
else:
self.log.info(
'Configuration file [ %s ] has been loaded',
self.config_file
)
return True | Check and enforce the permissions of the config file.
Enforce permission on a provided configuration file. This will check
and see if the permission are set based on the permission octet as
set in the ``perms`` value. ``perms`` is a comma separated list
of acceptable perms in octal form. Defaults permissions to, 0600 and
0400.
:param perms: ``str`` | Below is the the instruction that describes the task:
### Input:
Check and enforce the permissions of the config file.
Enforce permission on a provided configuration file. This will check
and see if the permission are set based on the permission octet as
set in the ``perms`` value. ``perms`` is a comma separated list
of acceptable perms in octal form. Defaults permissions to, 0600 and
0400.
:param perms: ``str``
### Response:
def check_perms(self, perms='0600,0400'):
"""Check and enforce the permissions of the config file.
Enforce permission on a provided configuration file. This will check
and see if the permission are set based on the permission octet as
set in the ``perms`` value. ``perms`` is a comma separated list
of acceptable perms in octal form. Defaults permissions to, 0600 and
0400.
:param perms: ``str``
"""
confpath = os.path.realpath(self.config_file)
mode = stat.S_IMODE(os.stat(confpath).st_mode)
if not any([mode == int(i, 8) for i in perms.split(',')]):
msg = (
'To use a configuration file the permissions'
' need to be any of the following "%s"' % perms
)
self.log.fatal(msg)
raise SystemExit(msg)
else:
self.log.info(
'Configuration file [ %s ] has been loaded',
self.config_file
)
return True |
def installed(name, default=False, user=None):
'''
Verify that the specified python is installed with pyenv. pyenv is
installed if necessary.
name
The version of python to install
default : False
Whether to make this python the default.
user: None
The user to run pyenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if name.startswith('python-'):
name = re.sub(r'^python-', '', name)
if __opts__['test']:
ret['comment'] = 'python {0} is set to be installed'.format(name)
return ret
ret = _check_pyenv(ret, user)
if ret['result'] is False:
if not __salt__['pyenv.install'](user):
ret['comment'] = 'pyenv failed to install'
return ret
else:
return _check_and_install_python(ret, name, default, user=user)
else:
return _check_and_install_python(ret, name, default, user=user) | Verify that the specified python is installed with pyenv. pyenv is
installed if necessary.
name
The version of python to install
default : False
Whether to make this python the default.
user: None
The user to run pyenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0 | Below is the the instruction that describes the task:
### Input:
Verify that the specified python is installed with pyenv. pyenv is
installed if necessary.
name
The version of python to install
default : False
Whether to make this python the default.
user: None
The user to run pyenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0
### Response:
def installed(name, default=False, user=None):
'''
Verify that the specified python is installed with pyenv. pyenv is
installed if necessary.
name
The version of python to install
default : False
Whether to make this python the default.
user: None
The user to run pyenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if name.startswith('python-'):
name = re.sub(r'^python-', '', name)
if __opts__['test']:
ret['comment'] = 'python {0} is set to be installed'.format(name)
return ret
ret = _check_pyenv(ret, user)
if ret['result'] is False:
if not __salt__['pyenv.install'](user):
ret['comment'] = 'pyenv failed to install'
return ret
else:
return _check_and_install_python(ret, name, default, user=user)
else:
return _check_and_install_python(ret, name, default, user=user) |
def get_scorer(scoring, compute=True):
"""Get a scorer from string
Parameters
----------
scoring : str | callable
scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer.
"""
# This is the same as sklearns, only we use our SCORERS dict,
# and don't have back-compat code
if isinstance(scoring, six.string_types):
try:
scorer, kwargs = SCORERS[scoring]
except KeyError:
raise ValueError(
"{} is not a valid scoring value. "
"Valid options are {}".format(scoring, sorted(SCORERS))
)
else:
scorer = scoring
kwargs = {}
kwargs["compute"] = compute
return make_scorer(scorer, **kwargs) | Get a scorer from string
Parameters
----------
scoring : str | callable
scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer. | Below is the the instruction that describes the task:
### Input:
Get a scorer from string
Parameters
----------
scoring : str | callable
scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer.
### Response:
def get_scorer(scoring, compute=True):
"""Get a scorer from string
Parameters
----------
scoring : str | callable
scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer.
"""
# This is the same as sklearns, only we use our SCORERS dict,
# and don't have back-compat code
if isinstance(scoring, six.string_types):
try:
scorer, kwargs = SCORERS[scoring]
except KeyError:
raise ValueError(
"{} is not a valid scoring value. "
"Valid options are {}".format(scoring, sorted(SCORERS))
)
else:
scorer = scoring
kwargs = {}
kwargs["compute"] = compute
return make_scorer(scorer, **kwargs) |
def openable(cls):
"""
A class decorator to fill in certain methods and properties to ensure
a class can be used by :func:`.create_open`.
These instance methods and property will be added, if not defined
by the class:
- ``open(self, initial_msg, seed)``
- ``on_message(self, msg)``
- ``on_close(self, ex)``
- ``close(self, ex=None)``
- property ``listener``
"""
def open(self, initial_msg, seed):
pass
def on_message(self, msg):
raise NotImplementedError()
def on_close(self, ex):
logging.error('on_close() called due to %s: %s', type(ex).__name__, ex)
def close(self, ex=None):
raise ex if ex else exception.StopListening()
@property
def listener(self):
raise NotImplementedError()
def ensure_method(name, fn):
if getattr(cls, name, None) is None:
setattr(cls, name, fn)
# set attribute if no such attribute
ensure_method('open', open)
ensure_method('on_message', on_message)
ensure_method('on_close', on_close)
ensure_method('close', close)
ensure_method('listener', listener)
return cls | A class decorator to fill in certain methods and properties to ensure
a class can be used by :func:`.create_open`.
These instance methods and property will be added, if not defined
by the class:
- ``open(self, initial_msg, seed)``
- ``on_message(self, msg)``
- ``on_close(self, ex)``
- ``close(self, ex=None)``
- property ``listener`` | Below is the the instruction that describes the task:
### Input:
A class decorator to fill in certain methods and properties to ensure
a class can be used by :func:`.create_open`.
These instance methods and property will be added, if not defined
by the class:
- ``open(self, initial_msg, seed)``
- ``on_message(self, msg)``
- ``on_close(self, ex)``
- ``close(self, ex=None)``
- property ``listener``
### Response:
def openable(cls):
"""
A class decorator to fill in certain methods and properties to ensure
a class can be used by :func:`.create_open`.
These instance methods and property will be added, if not defined
by the class:
- ``open(self, initial_msg, seed)``
- ``on_message(self, msg)``
- ``on_close(self, ex)``
- ``close(self, ex=None)``
- property ``listener``
"""
def open(self, initial_msg, seed):
pass
def on_message(self, msg):
raise NotImplementedError()
def on_close(self, ex):
logging.error('on_close() called due to %s: %s', type(ex).__name__, ex)
def close(self, ex=None):
raise ex if ex else exception.StopListening()
@property
def listener(self):
raise NotImplementedError()
def ensure_method(name, fn):
if getattr(cls, name, None) is None:
setattr(cls, name, fn)
# set attribute if no such attribute
ensure_method('open', open)
ensure_method('on_message', on_message)
ensure_method('on_close', on_close)
ensure_method('close', close)
ensure_method('listener', listener)
return cls |
def authorized_create_user(self, identities=None, primary=None, permissions=None):
"""Creates Vingd user (profile & account), links it with the provided
identities (to be verified later), and sets the delegate-user
permissions (creator being the delegate). Returns Vingd user's `huid`
(hashed user id).
Example::
vingd.authorized_create_user(
identities={"facebook": "12312312", "mail": "user@example.com"},
primary="facebook",
permissions=["get.account.balance", "purchase.object"]
)
If `identities` and `primary` are unspecified, a "zombie" ("headless")
account is created (i.e. account with no identities associated,
user-unreachable).
:rtype: ``dict``
:returns: ``{'huid': <huid>}``
:raises GeneralException:
:resource: ``id/objects/<oid>/purchases``
:access: authorized users with ACL flag ``user.create``
"""
return self.request('post', 'id/users/', json.dumps({
'identities': identities,
'primary_identity': primary,
'delegate_permissions': permissions
})) | Creates Vingd user (profile & account), links it with the provided
identities (to be verified later), and sets the delegate-user
permissions (creator being the delegate). Returns Vingd user's `huid`
(hashed user id).
Example::
vingd.authorized_create_user(
identities={"facebook": "12312312", "mail": "user@example.com"},
primary="facebook",
permissions=["get.account.balance", "purchase.object"]
)
If `identities` and `primary` are unspecified, a "zombie" ("headless")
account is created (i.e. account with no identities associated,
user-unreachable).
:rtype: ``dict``
:returns: ``{'huid': <huid>}``
:raises GeneralException:
:resource: ``id/objects/<oid>/purchases``
:access: authorized users with ACL flag ``user.create`` | Below is the the instruction that describes the task:
### Input:
Creates Vingd user (profile & account), links it with the provided
identities (to be verified later), and sets the delegate-user
permissions (creator being the delegate). Returns Vingd user's `huid`
(hashed user id).
Example::
vingd.authorized_create_user(
identities={"facebook": "12312312", "mail": "user@example.com"},
primary="facebook",
permissions=["get.account.balance", "purchase.object"]
)
If `identities` and `primary` are unspecified, a "zombie" ("headless")
account is created (i.e. account with no identities associated,
user-unreachable).
:rtype: ``dict``
:returns: ``{'huid': <huid>}``
:raises GeneralException:
:resource: ``id/objects/<oid>/purchases``
:access: authorized users with ACL flag ``user.create``
### Response:
def authorized_create_user(self, identities=None, primary=None, permissions=None):
"""Creates Vingd user (profile & account), links it with the provided
identities (to be verified later), and sets the delegate-user
permissions (creator being the delegate). Returns Vingd user's `huid`
(hashed user id).
Example::
vingd.authorized_create_user(
identities={"facebook": "12312312", "mail": "user@example.com"},
primary="facebook",
permissions=["get.account.balance", "purchase.object"]
)
If `identities` and `primary` are unspecified, a "zombie" ("headless")
account is created (i.e. account with no identities associated,
user-unreachable).
:rtype: ``dict``
:returns: ``{'huid': <huid>}``
:raises GeneralException:
:resource: ``id/objects/<oid>/purchases``
:access: authorized users with ACL flag ``user.create``
"""
return self.request('post', 'id/users/', json.dumps({
'identities': identities,
'primary_identity': primary,
'delegate_permissions': permissions
})) |
def automatic_slug_renaming(slug, is_slug_safe):
"""Helper to add numbers to slugs"""
if not isinstance(is_slug_safe, collections.Callable):
raise TypeError('is_slug_safe must be callable')
if is_slug_safe(slug):
return slug
count = 2
new_slug = slug + "-" + str(count)
while not is_slug_safe(new_slug):
count = count + 1
new_slug = slug + "-" + str(count)
return new_slug | Helper to add numbers to slugs | Below is the the instruction that describes the task:
### Input:
Helper to add numbers to slugs
### Response:
def automatic_slug_renaming(slug, is_slug_safe):
"""Helper to add numbers to slugs"""
if not isinstance(is_slug_safe, collections.Callable):
raise TypeError('is_slug_safe must be callable')
if is_slug_safe(slug):
return slug
count = 2
new_slug = slug + "-" + str(count)
while not is_slug_safe(new_slug):
count = count + 1
new_slug = slug + "-" + str(count)
return new_slug |
def _to_utc(self, dt):
"""Takes a naive timezone with an localized value and return it formatted
as utc."""
tz = self._get_tz()
loc_dt = tz.localize(dt)
return loc_dt.astimezone(pytz.utc) | Takes a naive timezone with an localized value and return it formatted
as utc. | Below is the the instruction that describes the task:
### Input:
Takes a naive timezone with an localized value and return it formatted
as utc.
### Response:
def _to_utc(self, dt):
"""Takes a naive timezone with an localized value and return it formatted
as utc."""
tz = self._get_tz()
loc_dt = tz.localize(dt)
return loc_dt.astimezone(pytz.utc) |
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name)) | Set port path cost value for STP protocol. | Below is the the instruction that describes the task:
### Input:
Set port path cost value for STP protocol.
### Response:
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name)) |
def format_table(table, column_names=None, column_specs=None, max_col_width=32,
report_dimensions=False):
''' Table pretty printer.
Expects tables to be given as arrays of arrays.
Example:
print format_table([[1, "2"], [3, "456"]], column_names=['A', 'B'])
'''
if len(table) > 0:
col_widths = [0] * len(list(table)[0])
elif column_specs is not None:
col_widths = [0] * (len(column_specs) + 1)
elif column_names is not None:
col_widths = [0] * len(column_names)
my_column_names = []
if column_specs is not None:
column_names = ['Row']
column_names.extend([col['name'] for col in column_specs])
column_specs = [{'name': 'Row', 'type': 'float'}] + column_specs
if column_names is not None:
for i in range(len(column_names)):
my_col = str(column_names[i])
if len(my_col) > max_col_width:
my_col = my_col[:max_col_width-1] + '…'
my_column_names.append(my_col)
col_widths[i] = max(col_widths[i], len(my_col))
my_table = []
for row in table:
my_row = []
for i in range(len(row)):
my_item = escape_unicode_string(str(row[i]))
if len(my_item) > max_col_width:
my_item = my_item[:max_col_width-1] + '…'
my_row.append(my_item)
col_widths[i] = max(col_widths[i], len(my_item))
my_table.append(my_row)
def border(i):
return WHITE() + i + ENDC()
type_colormap = {'boolean': BLUE(),
'integer': YELLOW(),
'float': WHITE(),
'string': GREEN()}
for i in 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64':
type_colormap[i] = type_colormap['integer']
type_colormap['double'] = type_colormap['float']
def col_head(i):
if column_specs is not None:
return BOLD() + type_colormap[column_specs[i]['type']] + column_names[i] + ENDC()
else:
return BOLD() + WHITE() + column_names[i] + ENDC()
formatted_table = [border('┌') + border('┬').join(border('─')*i for i in col_widths) + border('┐')]
if len(my_column_names) > 0:
padded_column_names = [col_head(i) + ' '*(col_widths[i]-len(my_column_names[i])) for i in range(len(my_column_names))]
formatted_table.append(border('│') + border('│').join(padded_column_names) + border('│'))
formatted_table.append(border('├') + border('┼').join(border('─')*i for i in col_widths) + border('┤'))
for row in my_table:
padded_row = [row[i] + ' '*(col_widths[i]-len(row[i])) for i in range(len(row))]
formatted_table.append(border('│') + border('│').join(padded_row) + border('│'))
formatted_table.append(border('└') + border('┴').join(border('─')*i for i in col_widths) + border('┘'))
if report_dimensions:
return '\n'.join(formatted_table), len(formatted_table), sum(col_widths) + len(col_widths) + 1
else:
return '\n'.join(formatted_table) | Table pretty printer.
Expects tables to be given as arrays of arrays.
Example:
print format_table([[1, "2"], [3, "456"]], column_names=['A', 'B']) | Below is the the instruction that describes the task:
### Input:
Table pretty printer.
Expects tables to be given as arrays of arrays.
Example:
print format_table([[1, "2"], [3, "456"]], column_names=['A', 'B'])
### Response:
def format_table(table, column_names=None, column_specs=None, max_col_width=32,
report_dimensions=False):
''' Table pretty printer.
Expects tables to be given as arrays of arrays.
Example:
print format_table([[1, "2"], [3, "456"]], column_names=['A', 'B'])
'''
if len(table) > 0:
col_widths = [0] * len(list(table)[0])
elif column_specs is not None:
col_widths = [0] * (len(column_specs) + 1)
elif column_names is not None:
col_widths = [0] * len(column_names)
my_column_names = []
if column_specs is not None:
column_names = ['Row']
column_names.extend([col['name'] for col in column_specs])
column_specs = [{'name': 'Row', 'type': 'float'}] + column_specs
if column_names is not None:
for i in range(len(column_names)):
my_col = str(column_names[i])
if len(my_col) > max_col_width:
my_col = my_col[:max_col_width-1] + '…'
my_column_names.append(my_col)
col_widths[i] = max(col_widths[i], len(my_col))
my_table = []
for row in table:
my_row = []
for i in range(len(row)):
my_item = escape_unicode_string(str(row[i]))
if len(my_item) > max_col_width:
my_item = my_item[:max_col_width-1] + '…'
my_row.append(my_item)
col_widths[i] = max(col_widths[i], len(my_item))
my_table.append(my_row)
def border(i):
return WHITE() + i + ENDC()
type_colormap = {'boolean': BLUE(),
'integer': YELLOW(),
'float': WHITE(),
'string': GREEN()}
for i in 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64':
type_colormap[i] = type_colormap['integer']
type_colormap['double'] = type_colormap['float']
def col_head(i):
if column_specs is not None:
return BOLD() + type_colormap[column_specs[i]['type']] + column_names[i] + ENDC()
else:
return BOLD() + WHITE() + column_names[i] + ENDC()
formatted_table = [border('┌') + border('┬').join(border('─')*i for i in col_widths) + border('┐')]
if len(my_column_names) > 0:
padded_column_names = [col_head(i) + ' '*(col_widths[i]-len(my_column_names[i])) for i in range(len(my_column_names))]
formatted_table.append(border('│') + border('│').join(padded_column_names) + border('│'))
formatted_table.append(border('├') + border('┼').join(border('─')*i for i in col_widths) + border('┤'))
for row in my_table:
padded_row = [row[i] + ' '*(col_widths[i]-len(row[i])) for i in range(len(row))]
formatted_table.append(border('│') + border('│').join(padded_row) + border('│'))
formatted_table.append(border('└') + border('┴').join(border('─')*i for i in col_widths) + border('┘'))
if report_dimensions:
return '\n'.join(formatted_table), len(formatted_table), sum(col_widths) + len(col_widths) + 1
else:
return '\n'.join(formatted_table) |
def __get_canonical_separate(self, input_separate):
"""!
@brief Return unified representation of separation value.
@details It represents list whose size is equal to amount of dynamics, where index of dynamic will show
where it should be displayed.
@param[in] input_separate (bool|list): Input separate representation that should transformed.
@return (list) Indexes where each dynamic should be displayed.
"""
if (isinstance(input_separate, list)):
separate = [0] * len(self.dynamics[0]);
for canvas_index in range(len(input_separate)):
dynamic_indexes = input_separate[canvas_index];
for dynamic_index in dynamic_indexes:
separate[dynamic_index] = canvas_index;
return separate;
elif (input_separate is False):
if (isinstance(self.dynamics[0], list) is True):
return [ self.canvas ] * len(self.dynamics[0]);
else:
return [ self.canvas ];
elif (input_separate is True):
if (isinstance(self.dynamics[0], list) is True):
return range(self.canvas, self.canvas + len(self.dynamics[0]));
else:
return [ self.canvas ];
else:
raise Exception("Incorrect type of argument 'separate' '%s'." % type(input_separate)); | !
@brief Return unified representation of separation value.
@details It represents list whose size is equal to amount of dynamics, where index of dynamic will show
where it should be displayed.
@param[in] input_separate (bool|list): Input separate representation that should transformed.
@return (list) Indexes where each dynamic should be displayed. | Below is the the instruction that describes the task:
### Input:
!
@brief Return unified representation of separation value.
@details It represents list whose size is equal to amount of dynamics, where index of dynamic will show
where it should be displayed.
@param[in] input_separate (bool|list): Input separate representation that should transformed.
@return (list) Indexes where each dynamic should be displayed.
### Response:
def __get_canonical_separate(self, input_separate):
"""!
@brief Return unified representation of separation value.
@details It represents list whose size is equal to amount of dynamics, where index of dynamic will show
where it should be displayed.
@param[in] input_separate (bool|list): Input separate representation that should transformed.
@return (list) Indexes where each dynamic should be displayed.
"""
if (isinstance(input_separate, list)):
separate = [0] * len(self.dynamics[0]);
for canvas_index in range(len(input_separate)):
dynamic_indexes = input_separate[canvas_index];
for dynamic_index in dynamic_indexes:
separate[dynamic_index] = canvas_index;
return separate;
elif (input_separate is False):
if (isinstance(self.dynamics[0], list) is True):
return [ self.canvas ] * len(self.dynamics[0]);
else:
return [ self.canvas ];
elif (input_separate is True):
if (isinstance(self.dynamics[0], list) is True):
return range(self.canvas, self.canvas + len(self.dynamics[0]));
else:
return [ self.canvas ];
else:
raise Exception("Incorrect type of argument 'separate' '%s'." % type(input_separate)); |
def snmp_server_group_notify(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
group = ET.SubElement(snmp_server, "group")
group_name_key = ET.SubElement(group, "group-name")
group_name_key.text = kwargs.pop('group_name')
group_version_key = ET.SubElement(group, "group-version")
group_version_key.text = kwargs.pop('group_version')
notify = ET.SubElement(group, "notify")
notify.text = kwargs.pop('notify')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def snmp_server_group_notify(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
group = ET.SubElement(snmp_server, "group")
group_name_key = ET.SubElement(group, "group-name")
group_name_key.text = kwargs.pop('group_name')
group_version_key = ET.SubElement(group, "group-version")
group_version_key.text = kwargs.pop('group_version')
notify = ET.SubElement(group, "notify")
notify.text = kwargs.pop('notify')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def case(text, casingformat='sentence'):
"""
Change the casing of some text.
:type text: string
:param text: The text to change the casing of.
:type casingformat: string
:param casingformat: The format of casing to apply to the text. Can be 'uppercase', 'lowercase', 'sentence' or 'caterpillar'.
:raises ValueError: Invalid text format specified.
>>> case("HELLO world", "uppercase")
'HELLO WORLD'
"""
# If the lowercase version of the casing format is 'uppercase'
if casingformat.lower() == 'uppercase':
# Return the uppercase version
return str(text.upper())
# If the lowercase version of the casing format is 'lowercase'
elif casingformat.lower() == 'lowercase':
# Return the lowercase version
return str(text.lower())
# If the lowercase version of the casing format is 'sentence'
elif casingformat.lower() == 'sentence':
# Return the sentence case version
return str(text[0].upper()) + str(text[1:])
# If the lowercase version of the casing format is 'caterpillar'
elif casingformat.lower() == 'caterpillar':
# Return the caterpillar case version
return str(text.lower().replace(" ", "_"))
# Raise a warning
raise ValueError("Invalid text format specified.") | Change the casing of some text.
:type text: string
:param text: The text to change the casing of.
:type casingformat: string
:param casingformat: The format of casing to apply to the text. Can be 'uppercase', 'lowercase', 'sentence' or 'caterpillar'.
:raises ValueError: Invalid text format specified.
>>> case("HELLO world", "uppercase")
'HELLO WORLD' | Below is the the instruction that describes the task:
### Input:
Change the casing of some text.
:type text: string
:param text: The text to change the casing of.
:type casingformat: string
:param casingformat: The format of casing to apply to the text. Can be 'uppercase', 'lowercase', 'sentence' or 'caterpillar'.
:raises ValueError: Invalid text format specified.
>>> case("HELLO world", "uppercase")
'HELLO WORLD'
### Response:
def case(text, casingformat='sentence'):
"""
Change the casing of some text.
:type text: string
:param text: The text to change the casing of.
:type casingformat: string
:param casingformat: The format of casing to apply to the text. Can be 'uppercase', 'lowercase', 'sentence' or 'caterpillar'.
:raises ValueError: Invalid text format specified.
>>> case("HELLO world", "uppercase")
'HELLO WORLD'
"""
# If the lowercase version of the casing format is 'uppercase'
if casingformat.lower() == 'uppercase':
# Return the uppercase version
return str(text.upper())
# If the lowercase version of the casing format is 'lowercase'
elif casingformat.lower() == 'lowercase':
# Return the lowercase version
return str(text.lower())
# If the lowercase version of the casing format is 'sentence'
elif casingformat.lower() == 'sentence':
# Return the sentence case version
return str(text[0].upper()) + str(text[1:])
# If the lowercase version of the casing format is 'caterpillar'
elif casingformat.lower() == 'caterpillar':
# Return the caterpillar case version
return str(text.lower().replace(" ", "_"))
# Raise a warning
raise ValueError("Invalid text format specified.") |
def find_duplicate_schedule_items(all_items):
"""Find talks / pages assigned to mulitple schedule items"""
duplicates = []
seen_talks = {}
for item in all_items:
if item.talk and item.talk in seen_talks:
duplicates.append(item)
if seen_talks[item.talk] not in duplicates:
duplicates.append(seen_talks[item.talk])
else:
seen_talks[item.talk] = item
# We currently allow duplicate pages for cases were we need disjoint
# schedule items, like multiple open space sessions on different
# days and similar cases. This may be revisited later
return duplicates | Find talks / pages assigned to mulitple schedule items | Below is the the instruction that describes the task:
### Input:
Find talks / pages assigned to mulitple schedule items
### Response:
def find_duplicate_schedule_items(all_items):
"""Find talks / pages assigned to mulitple schedule items"""
duplicates = []
seen_talks = {}
for item in all_items:
if item.talk and item.talk in seen_talks:
duplicates.append(item)
if seen_talks[item.talk] not in duplicates:
duplicates.append(seen_talks[item.talk])
else:
seen_talks[item.talk] = item
# We currently allow duplicate pages for cases were we need disjoint
# schedule items, like multiple open space sessions on different
# days and similar cases. This may be revisited later
return duplicates |
def calc_phase_space(self, freq, ConvFactor, PeakWidth=10000, FractionOfSampleFreq=1, timeStart=None, timeEnd =None, PointsOfPadding=500, ShowPSD=False):
"""
Calculates the position and velocity (in m) for use in plotting the phase space distribution.
Parameters
----------
freq : float
The frequenecy of the peak (Trapping frequency of the dimension of interest)
ConvFactor : float (or ufloat)
The conversion factor between Volts and Meters
PeakWidth : float, optional
The width of the peak. Defaults to 10KHz
FractionOfSampleFreq : int, optional
The fraction of the sample freq to use to filter the data.
Defaults to 1.
timeStart : float, optional
Starting time for data from which to calculate the phase space.
Defaults to start of time data.
timeEnd : float, optional
Ending time for data from which to calculate the phase space.
Defaults to start of time data.
PointsOfPadding : float, optional
How many points of the data at the beginning and end to disregard for plotting
the phase space, to remove filtering artifacts. Defaults to 500
ShowPSD : bool, optional
Where to show the PSD of the unfiltered and the filtered signal used
to make the phase space plot. Defaults to False.
*args, **kwargs : optional
args and kwargs passed to qplots.joint_plot
Returns
-------
time : ndarray
time corresponding to position and velocity
PosArray : ndarray
Array of position of the particle in time
VelArray : ndarray
Array of velocity of the particle in time
"""
_, Pos, fig, ax = self.filter_data(
freq, FractionOfSampleFreq, PeakWidth, MakeFig=ShowPSD, show_fig=ShowPSD, timeStart=timeStart, timeEnd=timeEnd)
time = self.time.get_array()
if timeStart != None:
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
else:
StartIndex = 0
if timeEnd != None:
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
else:
EndIndex = -1
Pos = Pos[PointsOfPadding : -PointsOfPadding+1]
time = time[StartIndex:EndIndex][::FractionOfSampleFreq][PointsOfPadding : -PointsOfPadding+1]
if type(ConvFactor) == _uncertainties.core.Variable:
conv = ConvFactor.n
else:
conv = ConvFactor
PosArray = Pos / conv # converts V to m
VelArray = _np.diff(PosArray) * (self.SampleFreq / FractionOfSampleFreq) # calcs velocity (in m/s) by differtiating position
return time, PosArray, VelArray | Calculates the position and velocity (in m) for use in plotting the phase space distribution.
Parameters
----------
freq : float
The frequenecy of the peak (Trapping frequency of the dimension of interest)
ConvFactor : float (or ufloat)
The conversion factor between Volts and Meters
PeakWidth : float, optional
The width of the peak. Defaults to 10KHz
FractionOfSampleFreq : int, optional
The fraction of the sample freq to use to filter the data.
Defaults to 1.
timeStart : float, optional
Starting time for data from which to calculate the phase space.
Defaults to start of time data.
timeEnd : float, optional
Ending time for data from which to calculate the phase space.
Defaults to start of time data.
PointsOfPadding : float, optional
How many points of the data at the beginning and end to disregard for plotting
the phase space, to remove filtering artifacts. Defaults to 500
ShowPSD : bool, optional
Where to show the PSD of the unfiltered and the filtered signal used
to make the phase space plot. Defaults to False.
*args, **kwargs : optional
args and kwargs passed to qplots.joint_plot
Returns
-------
time : ndarray
time corresponding to position and velocity
PosArray : ndarray
Array of position of the particle in time
VelArray : ndarray
Array of velocity of the particle in time | Below is the the instruction that describes the task:
### Input:
Calculates the position and velocity (in m) for use in plotting the phase space distribution.
Parameters
----------
freq : float
The frequenecy of the peak (Trapping frequency of the dimension of interest)
ConvFactor : float (or ufloat)
The conversion factor between Volts and Meters
PeakWidth : float, optional
The width of the peak. Defaults to 10KHz
FractionOfSampleFreq : int, optional
The fraction of the sample freq to use to filter the data.
Defaults to 1.
timeStart : float, optional
Starting time for data from which to calculate the phase space.
Defaults to start of time data.
timeEnd : float, optional
Ending time for data from which to calculate the phase space.
Defaults to start of time data.
PointsOfPadding : float, optional
How many points of the data at the beginning and end to disregard for plotting
the phase space, to remove filtering artifacts. Defaults to 500
ShowPSD : bool, optional
Where to show the PSD of the unfiltered and the filtered signal used
to make the phase space plot. Defaults to False.
*args, **kwargs : optional
args and kwargs passed to qplots.joint_plot
Returns
-------
time : ndarray
time corresponding to position and velocity
PosArray : ndarray
Array of position of the particle in time
VelArray : ndarray
Array of velocity of the particle in time
### Response:
def calc_phase_space(self, freq, ConvFactor, PeakWidth=10000, FractionOfSampleFreq=1, timeStart=None, timeEnd =None, PointsOfPadding=500, ShowPSD=False):
"""
Calculates the position and velocity (in m) for use in plotting the phase space distribution.
Parameters
----------
freq : float
The frequenecy of the peak (Trapping frequency of the dimension of interest)
ConvFactor : float (or ufloat)
The conversion factor between Volts and Meters
PeakWidth : float, optional
The width of the peak. Defaults to 10KHz
FractionOfSampleFreq : int, optional
The fraction of the sample freq to use to filter the data.
Defaults to 1.
timeStart : float, optional
Starting time for data from which to calculate the phase space.
Defaults to start of time data.
timeEnd : float, optional
Ending time for data from which to calculate the phase space.
Defaults to start of time data.
PointsOfPadding : float, optional
How many points of the data at the beginning and end to disregard for plotting
the phase space, to remove filtering artifacts. Defaults to 500
ShowPSD : bool, optional
Where to show the PSD of the unfiltered and the filtered signal used
to make the phase space plot. Defaults to False.
*args, **kwargs : optional
args and kwargs passed to qplots.joint_plot
Returns
-------
time : ndarray
time corresponding to position and velocity
PosArray : ndarray
Array of position of the particle in time
VelArray : ndarray
Array of velocity of the particle in time
"""
_, Pos, fig, ax = self.filter_data(
freq, FractionOfSampleFreq, PeakWidth, MakeFig=ShowPSD, show_fig=ShowPSD, timeStart=timeStart, timeEnd=timeEnd)
time = self.time.get_array()
if timeStart != None:
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
else:
StartIndex = 0
if timeEnd != None:
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
else:
EndIndex = -1
Pos = Pos[PointsOfPadding : -PointsOfPadding+1]
time = time[StartIndex:EndIndex][::FractionOfSampleFreq][PointsOfPadding : -PointsOfPadding+1]
if type(ConvFactor) == _uncertainties.core.Variable:
conv = ConvFactor.n
else:
conv = ConvFactor
PosArray = Pos / conv # converts V to m
VelArray = _np.diff(PosArray) * (self.SampleFreq / FractionOfSampleFreq) # calcs velocity (in m/s) by differtiating position
return time, PosArray, VelArray |
def prepare_search_body(self, should_terms=None, must_terms=None, must_not_terms=None, search_text='', start=None, end=None):
"""
Prepare body for elasticsearch query
Search parameters
^^^^^^^^^^^^^^^^^
These parameters are dictionaries and have format: <term>: [<value 1>, <value 2> ...]
should_terms: it resembles logical OR
must_terms: it resembles logical AND
must_not_terms: it resembles logical NOT
search_text : string
Text for FTS(full text search)
start, end : datetime
Filter for event creation time
"""
self.body = self.SearchBody()
self.body.set_should_terms(should_terms)
self.body.set_must_terms(must_terms)
self.body.set_must_not_terms(must_not_terms)
self.body.set_search_text(search_text)
self.body.set_timestamp_filter(start, end)
self.body.prepare() | Prepare body for elasticsearch query
Search parameters
^^^^^^^^^^^^^^^^^
These parameters are dictionaries and have format: <term>: [<value 1>, <value 2> ...]
should_terms: it resembles logical OR
must_terms: it resembles logical AND
must_not_terms: it resembles logical NOT
search_text : string
Text for FTS(full text search)
start, end : datetime
Filter for event creation time | Below is the the instruction that describes the task:
### Input:
Prepare body for elasticsearch query
Search parameters
^^^^^^^^^^^^^^^^^
These parameters are dictionaries and have format: <term>: [<value 1>, <value 2> ...]
should_terms: it resembles logical OR
must_terms: it resembles logical AND
must_not_terms: it resembles logical NOT
search_text : string
Text for FTS(full text search)
start, end : datetime
Filter for event creation time
### Response:
def prepare_search_body(self, should_terms=None, must_terms=None, must_not_terms=None, search_text='', start=None, end=None):
"""
Prepare body for elasticsearch query
Search parameters
^^^^^^^^^^^^^^^^^
These parameters are dictionaries and have format: <term>: [<value 1>, <value 2> ...]
should_terms: it resembles logical OR
must_terms: it resembles logical AND
must_not_terms: it resembles logical NOT
search_text : string
Text for FTS(full text search)
start, end : datetime
Filter for event creation time
"""
self.body = self.SearchBody()
self.body.set_should_terms(should_terms)
self.body.set_must_terms(must_terms)
self.body.set_must_not_terms(must_not_terms)
self.body.set_search_text(search_text)
self.body.set_timestamp_filter(start, end)
self.body.prepare() |
def ang2disc(nside, lon, lat, radius, inclusive=False, fact=4, nest=False):
"""
Wrap `query_disc` to use lon, lat, and radius in degrees.
"""
vec = ang2vec(lon,lat)
return query_disc(nside,vec,radius,inclusive,fact,nest) | Wrap `query_disc` to use lon, lat, and radius in degrees. | Below is the the instruction that describes the task:
### Input:
Wrap `query_disc` to use lon, lat, and radius in degrees.
### Response:
def ang2disc(nside, lon, lat, radius, inclusive=False, fact=4, nest=False):
"""
Wrap `query_disc` to use lon, lat, and radius in degrees.
"""
vec = ang2vec(lon,lat)
return query_disc(nside,vec,radius,inclusive,fact,nest) |
def set_value(self, key, field, value):
"""Add the state of the key and field"""
self._db.hset(key, field, value) | Add the state of the key and field | Below is the the instruction that describes the task:
### Input:
Add the state of the key and field
### Response:
def set_value(self, key, field, value):
"""Add the state of the key and field"""
self._db.hset(key, field, value) |
def _load_version2(self,filename):
"""load a version 2 control file
"""
self.lcount = 0
self.comments = {}
self.prior_information = self.null_prior
assert os.path.exists(filename), "couldn't find control file {0}".format(filename)
f = open(filename, 'r')
last_section = ""
req_sections = {"* parameter data":False, "* observation data":False,
"* model command line":False,"* model input":False, "* model output":False}
while True:
next_section, section_lines, comments = self._read_section_comments(f, True)
if "* control data" in last_section.lower():
req_sections[last_section] = True
iskeyword = False
if "keyword" in last_section.lower():
iskeyword = True
self.pestpp_options = self.control_data.parse_values_from_lines(section_lines, iskeyword=iskeyword)
if len(self.pestpp_options) > 0:
ppo = self.pestpp_options
svd_opts = ["svdmode","eigthresh","maxsing","eigwrite"]
for svd_opt in svd_opts:
if svd_opt in ppo:
self.svd_data.__setattr__(svd_opt, ppo.pop(svd_opt))
for reg_opt in self.reg_data.should_write:
if reg_opt in ppo:
self.reg_data.__setattr__(reg_opt, ppo.pop(reg_opt))
elif "* parameter groups" in last_section.lower():
req_sections[last_section] = True
self.parameter_groups = self._cast_df_from_lines(next_section, section_lines, self.pargp_fieldnames,
self.pargp_converters, self.pargp_defaults)
self.parameter_groups.index = self.parameter_groups.pargpnme
elif "* parameter data" in last_section.lower():
req_sections[last_section] = True
self.parameter_data = self._cast_df_from_lines(next_section, section_lines, self.par_fieldnames,
self.par_converters, self.par_defaults)
self.parameter_data.index = self.parameter_data.parnme
elif "* observation data" in last_section.lower():
req_sections[last_section] = True
self.observation_data = self._cast_df_from_lines(next_section, section_lines, self.obs_fieldnames,
self.obs_converters, self.obs_defaults)
self.observation_data.index = self.observation_data.obsnme
elif "* model command line" in last_section.lower():
req_sections[last_section] = True
for line in section_lines:
self.model_command.append(line.strip())
elif "* model input" in last_section.lower():
req_sections[last_section] = True
if section_lines[0].strip().split()[0].lower() == "external":
filename = section_lines[0].strip().split()[1]
assert os.path.exists(filename),"Pst.flex_load() external template data file '{0}' not found".format(filename)
df = pd.read_csv(filename)
df.columns = df.columns.str.lower()
assert "pest_file" in df.columns,"Pst.flex_load() external template data file must have 'pest_file' in columns"
assert "model_file" in df.columns, "Pst.flex_load() external template data file must have 'model_file' in columns"
for pfile,mfile in zip(df.pest_file,df.model_file):
self.template_files.append(pfile)
self.input_files.append(mfile)
else:
for iline,line in enumerate(section_lines):
raw = line.split()
self.template_files.append(raw[0])
self.input_files.append(raw[1])
elif "* model output" in last_section.lower():
req_sections[last_section] = True
if section_lines[0].strip().split()[0].lower() == "external":
filename = section_lines[0].strip().split()[1]
assert os.path.exists(filename), "Pst.flex_load() external instruction data file '{0}' not found".format(
filename)
df = pd.read_csv(filename)
df.columns = df.columns.str.lower()
assert "pest_file" in df.columns, "Pst.flex_load() external instruction data file must have 'pest_file' in columns"
assert "model_file" in df.columns, "Pst.flex_load() external instruction data file must have 'model_file' in columns"
for pfile, mfile in zip(df.pest_file, df.model_file):
self.instruction_files.append(pfile)
self.output_files.append(mfile)
else:
for iline, line in enumerate(section_lines):
raw = line.split()
self.instruction_files.append(raw[0])
self.output_files.append(raw[1])
elif "* prior information" in last_section.lower():
req_sections[last_section] = True
self._cast_prior_df_from_lines(section_lines)
elif len(last_section) > 0:
print("Pst._load_version2() warning: unrecognized section: ", last_section)
self.comments[last_section] = section_lines
last_section = next_section
if next_section == None or len(section_lines) == 0:
break
not_found = []
for section,found in req_sections.items():
if not found:
not_found.append(section)
if len(not_found) > 0:
raise Exception("Pst._load_version2() error: the following required sections were"+\
"not found:{0}".format(",".join(not_found))) | load a version 2 control file | Below is the the instruction that describes the task:
### Input:
load a version 2 control file
### Response:
def _load_version2(self,filename):
"""load a version 2 control file
"""
self.lcount = 0
self.comments = {}
self.prior_information = self.null_prior
assert os.path.exists(filename), "couldn't find control file {0}".format(filename)
f = open(filename, 'r')
last_section = ""
req_sections = {"* parameter data":False, "* observation data":False,
"* model command line":False,"* model input":False, "* model output":False}
while True:
next_section, section_lines, comments = self._read_section_comments(f, True)
if "* control data" in last_section.lower():
req_sections[last_section] = True
iskeyword = False
if "keyword" in last_section.lower():
iskeyword = True
self.pestpp_options = self.control_data.parse_values_from_lines(section_lines, iskeyword=iskeyword)
if len(self.pestpp_options) > 0:
ppo = self.pestpp_options
svd_opts = ["svdmode","eigthresh","maxsing","eigwrite"]
for svd_opt in svd_opts:
if svd_opt in ppo:
self.svd_data.__setattr__(svd_opt, ppo.pop(svd_opt))
for reg_opt in self.reg_data.should_write:
if reg_opt in ppo:
self.reg_data.__setattr__(reg_opt, ppo.pop(reg_opt))
elif "* parameter groups" in last_section.lower():
req_sections[last_section] = True
self.parameter_groups = self._cast_df_from_lines(next_section, section_lines, self.pargp_fieldnames,
self.pargp_converters, self.pargp_defaults)
self.parameter_groups.index = self.parameter_groups.pargpnme
elif "* parameter data" in last_section.lower():
req_sections[last_section] = True
self.parameter_data = self._cast_df_from_lines(next_section, section_lines, self.par_fieldnames,
self.par_converters, self.par_defaults)
self.parameter_data.index = self.parameter_data.parnme
elif "* observation data" in last_section.lower():
req_sections[last_section] = True
self.observation_data = self._cast_df_from_lines(next_section, section_lines, self.obs_fieldnames,
self.obs_converters, self.obs_defaults)
self.observation_data.index = self.observation_data.obsnme
elif "* model command line" in last_section.lower():
req_sections[last_section] = True
for line in section_lines:
self.model_command.append(line.strip())
elif "* model input" in last_section.lower():
req_sections[last_section] = True
if section_lines[0].strip().split()[0].lower() == "external":
filename = section_lines[0].strip().split()[1]
assert os.path.exists(filename),"Pst.flex_load() external template data file '{0}' not found".format(filename)
df = pd.read_csv(filename)
df.columns = df.columns.str.lower()
assert "pest_file" in df.columns,"Pst.flex_load() external template data file must have 'pest_file' in columns"
assert "model_file" in df.columns, "Pst.flex_load() external template data file must have 'model_file' in columns"
for pfile,mfile in zip(df.pest_file,df.model_file):
self.template_files.append(pfile)
self.input_files.append(mfile)
else:
for iline,line in enumerate(section_lines):
raw = line.split()
self.template_files.append(raw[0])
self.input_files.append(raw[1])
elif "* model output" in last_section.lower():
req_sections[last_section] = True
if section_lines[0].strip().split()[0].lower() == "external":
filename = section_lines[0].strip().split()[1]
assert os.path.exists(filename), "Pst.flex_load() external instruction data file '{0}' not found".format(
filename)
df = pd.read_csv(filename)
df.columns = df.columns.str.lower()
assert "pest_file" in df.columns, "Pst.flex_load() external instruction data file must have 'pest_file' in columns"
assert "model_file" in df.columns, "Pst.flex_load() external instruction data file must have 'model_file' in columns"
for pfile, mfile in zip(df.pest_file, df.model_file):
self.instruction_files.append(pfile)
self.output_files.append(mfile)
else:
for iline, line in enumerate(section_lines):
raw = line.split()
self.instruction_files.append(raw[0])
self.output_files.append(raw[1])
elif "* prior information" in last_section.lower():
req_sections[last_section] = True
self._cast_prior_df_from_lines(section_lines)
elif len(last_section) > 0:
print("Pst._load_version2() warning: unrecognized section: ", last_section)
self.comments[last_section] = section_lines
last_section = next_section
if next_section == None or len(section_lines) == 0:
break
not_found = []
for section,found in req_sections.items():
if not found:
not_found.append(section)
if len(not_found) > 0:
raise Exception("Pst._load_version2() error: the following required sections were"+\
"not found:{0}".format(",".join(not_found))) |
def possible_completions(self, e): # (M-?)
u"""List the possible completions of the text before point. """
completions = self._get_completions()
self._display_completions(completions)
self.finalize() | u"""List the possible completions of the text before point. | Below is the the instruction that describes the task:
### Input:
u"""List the possible completions of the text before point.
### Response:
def possible_completions(self, e): # (M-?)
u"""List the possible completions of the text before point. """
completions = self._get_completions()
self._display_completions(completions)
self.finalize() |
def hook_param(self, hook, p):
"""Parse a hook parameter"""
hook.listparam.append(p.pair)
return True | Parse a hook parameter | Below is the the instruction that describes the task:
### Input:
Parse a hook parameter
### Response:
def hook_param(self, hook, p):
"""Parse a hook parameter"""
hook.listparam.append(p.pair)
return True |
async def prover_create_credential_req(wallet_handle: int,
prover_did: str,
cred_offer_json: str,
cred_def_json: str,
master_secret_id: str) -> (str, str):
"""
Creates a clam request for the given credential offer.
The method creates a blinded master secret for a master secret identified by a provided name.
The master secret identified by the name must be already stored in the secure wallet (see prover_create_master_secret)
The blinded master secret is a part of the credential request.
:param wallet_handle: wallet handler (created by open_wallet).
:param prover_did: a DID of the prover
:param cred_offer_json: credential offer as a json containing information about the issuer and a credential
:param cred_def_json: credential definition json related to <cred_def_id> in <cred_offer_json>
:param master_secret_id: the id of the master secret stored in the wallet
:return:
cred_req_json: Credential request json for creation of credential by Issuer
{
"prover_did" : string,
"cred_def_id" : string,
// Fields below can depend on Cred Def type
"blinded_ms" : <blinded_master_secret>,
"blinded_ms_correctness_proof" : <blinded_ms_correctness_proof>,
"nonce": string
}
cred_req_metadata_json: Credential request metadata json for processing of received form Issuer credential.
Note: cred_req_metadata_json mustn't be shared with Issuer.
"""
logger = logging.getLogger(__name__)
logger.debug("prover_create_credential_req: >>> wallet_handle: %r, prover_did: %r, cred_offer_json: %r,"
" cred_def_json: %r, master_secret_id: %r",
wallet_handle,
prover_did,
cred_offer_json,
cred_def_json,
master_secret_id)
if not hasattr(prover_create_credential_req, "cb"):
logger.debug("prover_create_credential_req: Creating callback")
prover_create_credential_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_prover_did = c_char_p(prover_did.encode('utf-8'))
c_cred_offer_json = c_char_p(cred_offer_json.encode('utf-8'))
c_cred_def_json = c_char_p(cred_def_json.encode('utf-8'))
c_master_secret_id = c_char_p(master_secret_id.encode('utf-8'))
(credential_req_json, credential_req_metadata_json) = await do_call('indy_prover_create_credential_req',
c_wallet_handle,
c_prover_did,
c_cred_offer_json,
c_cred_def_json,
c_master_secret_id,
prover_create_credential_req.cb)
credential_req_json = credential_req_json.decode()
credential_req_metadata_json = credential_req_metadata_json.decode()
res = (credential_req_json, credential_req_metadata_json)
logger.debug("prover_create_credential_req: <<< res: %r", res)
return res | Creates a clam request for the given credential offer.
The method creates a blinded master secret for a master secret identified by a provided name.
The master secret identified by the name must be already stored in the secure wallet (see prover_create_master_secret)
The blinded master secret is a part of the credential request.
:param wallet_handle: wallet handler (created by open_wallet).
:param prover_did: a DID of the prover
:param cred_offer_json: credential offer as a json containing information about the issuer and a credential
:param cred_def_json: credential definition json related to <cred_def_id> in <cred_offer_json>
:param master_secret_id: the id of the master secret stored in the wallet
:return:
cred_req_json: Credential request json for creation of credential by Issuer
{
"prover_did" : string,
"cred_def_id" : string,
// Fields below can depend on Cred Def type
"blinded_ms" : <blinded_master_secret>,
"blinded_ms_correctness_proof" : <blinded_ms_correctness_proof>,
"nonce": string
}
cred_req_metadata_json: Credential request metadata json for processing of received form Issuer credential.
Note: cred_req_metadata_json mustn't be shared with Issuer. | Below is the the instruction that describes the task:
### Input:
Creates a clam request for the given credential offer.
The method creates a blinded master secret for a master secret identified by a provided name.
The master secret identified by the name must be already stored in the secure wallet (see prover_create_master_secret)
The blinded master secret is a part of the credential request.
:param wallet_handle: wallet handler (created by open_wallet).
:param prover_did: a DID of the prover
:param cred_offer_json: credential offer as a json containing information about the issuer and a credential
:param cred_def_json: credential definition json related to <cred_def_id> in <cred_offer_json>
:param master_secret_id: the id of the master secret stored in the wallet
:return:
cred_req_json: Credential request json for creation of credential by Issuer
{
"prover_did" : string,
"cred_def_id" : string,
// Fields below can depend on Cred Def type
"blinded_ms" : <blinded_master_secret>,
"blinded_ms_correctness_proof" : <blinded_ms_correctness_proof>,
"nonce": string
}
cred_req_metadata_json: Credential request metadata json for processing of received form Issuer credential.
Note: cred_req_metadata_json mustn't be shared with Issuer.
### Response:
async def prover_create_credential_req(wallet_handle: int,
prover_did: str,
cred_offer_json: str,
cred_def_json: str,
master_secret_id: str) -> (str, str):
"""
Creates a clam request for the given credential offer.
The method creates a blinded master secret for a master secret identified by a provided name.
The master secret identified by the name must be already stored in the secure wallet (see prover_create_master_secret)
The blinded master secret is a part of the credential request.
:param wallet_handle: wallet handler (created by open_wallet).
:param prover_did: a DID of the prover
:param cred_offer_json: credential offer as a json containing information about the issuer and a credential
:param cred_def_json: credential definition json related to <cred_def_id> in <cred_offer_json>
:param master_secret_id: the id of the master secret stored in the wallet
:return:
cred_req_json: Credential request json for creation of credential by Issuer
{
"prover_did" : string,
"cred_def_id" : string,
// Fields below can depend on Cred Def type
"blinded_ms" : <blinded_master_secret>,
"blinded_ms_correctness_proof" : <blinded_ms_correctness_proof>,
"nonce": string
}
cred_req_metadata_json: Credential request metadata json for processing of received form Issuer credential.
Note: cred_req_metadata_json mustn't be shared with Issuer.
"""
logger = logging.getLogger(__name__)
logger.debug("prover_create_credential_req: >>> wallet_handle: %r, prover_did: %r, cred_offer_json: %r,"
" cred_def_json: %r, master_secret_id: %r",
wallet_handle,
prover_did,
cred_offer_json,
cred_def_json,
master_secret_id)
if not hasattr(prover_create_credential_req, "cb"):
logger.debug("prover_create_credential_req: Creating callback")
prover_create_credential_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_prover_did = c_char_p(prover_did.encode('utf-8'))
c_cred_offer_json = c_char_p(cred_offer_json.encode('utf-8'))
c_cred_def_json = c_char_p(cred_def_json.encode('utf-8'))
c_master_secret_id = c_char_p(master_secret_id.encode('utf-8'))
(credential_req_json, credential_req_metadata_json) = await do_call('indy_prover_create_credential_req',
c_wallet_handle,
c_prover_did,
c_cred_offer_json,
c_cred_def_json,
c_master_secret_id,
prover_create_credential_req.cb)
credential_req_json = credential_req_json.decode()
credential_req_metadata_json = credential_req_metadata_json.decode()
res = (credential_req_json, credential_req_metadata_json)
logger.debug("prover_create_credential_req: <<< res: %r", res)
return res |
def threshold(self, scalars, vmin=None, vmax=None, useCells=False):
"""
Extracts cells where scalar value satisfies threshold criterion.
:param scalars: name of the scalars array.
:type scalars: str, list
:param float vmin: minimum value of the scalar
:param float vmax: maximum value of the scalar
:param bool useCells: if `True`, assume array scalars refers to cells.
.. hint:: |mesh_threshold| |mesh_threshold.py|_
"""
if utils.isSequence(scalars):
self.addPointScalars(scalars, "threshold")
scalars = "threshold"
elif self.scalars(scalars) is None:
colors.printc("~times No scalars found with name", scalars, c=1)
exit()
thres = vtk.vtkThreshold()
thres.SetInputData(self.poly)
if useCells:
asso = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
else:
asso = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
thres.SetInputArrayToProcess(0, 0, 0, asso, scalars)
if vmin is None and vmax is not None:
thres.ThresholdByLower(vmax)
elif vmax is None and vmin is not None:
thres.ThresholdByUpper(vmin)
else:
thres.ThresholdBetween(vmin, vmax)
thres.Update()
gf = vtk.vtkGeometryFilter()
gf.SetInputData(thres.GetOutput())
gf.Update()
return self.updateMesh(gf.GetOutput()) | Extracts cells where scalar value satisfies threshold criterion.
:param scalars: name of the scalars array.
:type scalars: str, list
:param float vmin: minimum value of the scalar
:param float vmax: maximum value of the scalar
:param bool useCells: if `True`, assume array scalars refers to cells.
.. hint:: |mesh_threshold| |mesh_threshold.py|_ | Below is the the instruction that describes the task:
### Input:
Extracts cells where scalar value satisfies threshold criterion.
:param scalars: name of the scalars array.
:type scalars: str, list
:param float vmin: minimum value of the scalar
:param float vmax: maximum value of the scalar
:param bool useCells: if `True`, assume array scalars refers to cells.
.. hint:: |mesh_threshold| |mesh_threshold.py|_
### Response:
def threshold(self, scalars, vmin=None, vmax=None, useCells=False):
"""
Extracts cells where scalar value satisfies threshold criterion.
:param scalars: name of the scalars array.
:type scalars: str, list
:param float vmin: minimum value of the scalar
:param float vmax: maximum value of the scalar
:param bool useCells: if `True`, assume array scalars refers to cells.
.. hint:: |mesh_threshold| |mesh_threshold.py|_
"""
if utils.isSequence(scalars):
self.addPointScalars(scalars, "threshold")
scalars = "threshold"
elif self.scalars(scalars) is None:
colors.printc("~times No scalars found with name", scalars, c=1)
exit()
thres = vtk.vtkThreshold()
thres.SetInputData(self.poly)
if useCells:
asso = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
else:
asso = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
thres.SetInputArrayToProcess(0, 0, 0, asso, scalars)
if vmin is None and vmax is not None:
thres.ThresholdByLower(vmax)
elif vmax is None and vmin is not None:
thres.ThresholdByUpper(vmin)
else:
thres.ThresholdBetween(vmin, vmax)
thres.Update()
gf = vtk.vtkGeometryFilter()
gf.SetInputData(thres.GetOutput())
gf.Update()
return self.updateMesh(gf.GetOutput()) |
def dump_code(disassembly, pc = None,
bLowercase = True,
bits = None):
"""
Dump a disassembly. Optionally mark where the program counter is.
@type disassembly: list of tuple( int, int, str, str )
@param disassembly: Disassembly dump as returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type pc: int
@param pc: (Optional) Program counter.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not disassembly:
return ''
table = Table(sep = ' | ')
for (addr, size, code, dump) in disassembly:
if bLowercase:
code = code.lower()
if addr == pc:
addr = ' * %s' % HexDump.address(addr, bits)
else:
addr = ' %s' % HexDump.address(addr, bits)
table.addRow(addr, dump, code)
table.justify(1, 1)
return table.getOutput() | Dump a disassembly. Optionally mark where the program counter is.
@type disassembly: list of tuple( int, int, str, str )
@param disassembly: Disassembly dump as returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type pc: int
@param pc: (Optional) Program counter.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging. | Below is the the instruction that describes the task:
### Input:
Dump a disassembly. Optionally mark where the program counter is.
@type disassembly: list of tuple( int, int, str, str )
@param disassembly: Disassembly dump as returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type pc: int
@param pc: (Optional) Program counter.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
### Response:
def dump_code(disassembly, pc = None,
bLowercase = True,
bits = None):
"""
Dump a disassembly. Optionally mark where the program counter is.
@type disassembly: list of tuple( int, int, str, str )
@param disassembly: Disassembly dump as returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type pc: int
@param pc: (Optional) Program counter.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not disassembly:
return ''
table = Table(sep = ' | ')
for (addr, size, code, dump) in disassembly:
if bLowercase:
code = code.lower()
if addr == pc:
addr = ' * %s' % HexDump.address(addr, bits)
else:
addr = ' %s' % HexDump.address(addr, bits)
table.addRow(addr, dump, code)
table.justify(1, 1)
return table.getOutput() |
def branchlist2branches(data, commdct, branchlist):
"""get branches from the branchlist"""
objkey = 'BranchList'.upper()
theobjects = data.dt[objkey]
fieldlists = []
objnames = [obj[1] for obj in theobjects]
for theobject in theobjects:
fieldlists.append(list(range(2, len(theobject))))
blists = extractfields(data, commdct, objkey, fieldlists)
thebranches = [branches for name, branches in zip(objnames, blists)
if name == branchlist]
return thebranches[0] | get branches from the branchlist | Below is the the instruction that describes the task:
### Input:
get branches from the branchlist
### Response:
def branchlist2branches(data, commdct, branchlist):
"""get branches from the branchlist"""
objkey = 'BranchList'.upper()
theobjects = data.dt[objkey]
fieldlists = []
objnames = [obj[1] for obj in theobjects]
for theobject in theobjects:
fieldlists.append(list(range(2, len(theobject))))
blists = extractfields(data, commdct, objkey, fieldlists)
thebranches = [branches for name, branches in zip(objnames, blists)
if name == branchlist]
return thebranches[0] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.