code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _set_amz_headers(self):
"""
Sets x-amz-* error response fields from response headers.
"""
if self._response.headers:
# keeping x-amz-id-2 as part of amz_host_id.
if 'x-amz-id-2' in self._response.headers:
self.host_id = self._response.headers['x-amz-id-2']
if 'x-amz-request-id' in self._response.headers:
self.request_id = self._response.headers['x-amz-request-id']
# This is a new undocumented field, set only if available.
if 'x-amz-bucket-region' in self._response.headers:
self.region = self._response.headers['x-amz-bucket-region'] | Sets x-amz-* error response fields from response headers. | Below is the the instruction that describes the task:
### Input:
Sets x-amz-* error response fields from response headers.
### Response:
def _set_amz_headers(self):
"""
Sets x-amz-* error response fields from response headers.
"""
if self._response.headers:
# keeping x-amz-id-2 as part of amz_host_id.
if 'x-amz-id-2' in self._response.headers:
self.host_id = self._response.headers['x-amz-id-2']
if 'x-amz-request-id' in self._response.headers:
self.request_id = self._response.headers['x-amz-request-id']
# This is a new undocumented field, set only if available.
if 'x-amz-bucket-region' in self._response.headers:
self.region = self._response.headers['x-amz-bucket-region'] |
def _build_likelihood(self):
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood.
"""
pX = DiagonalGaussian(self.X_mean, self.X_var)
num_inducing = len(self.feature)
psi0 = tf.reduce_sum(expectation(pX, self.kern))
psi1 = expectation(pX, (self.kern, self.feature))
psi2 = tf.reduce_sum(expectation(pX, (self.kern, self.feature), (self.kern, self.feature)), axis=0)
Kuu = features.Kuu(self.feature, self.kern, jitter=settings.jitter)
L = tf.cholesky(Kuu)
sigma2 = self.likelihood.variance
sigma = tf.sqrt(sigma2)
# Compute intermediate matrices
A = tf.matrix_triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.matrix_triangular_solve(L, psi2, lower=True)
AAT = tf.matrix_triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
log_det_B = 2. * tf.reduce_sum(tf.log(tf.matrix_diag_part(LB)))
c = tf.matrix_triangular_solve(LB, tf.matmul(A, self.Y), lower=True) / sigma
# KL[q(x) || p(x)]
dX_var = self.X_var if len(self.X_var.get_shape()) == 2 else tf.matrix_diag_part(self.X_var)
NQ = tf.cast(tf.size(self.X_mean), settings.float_type)
D = tf.cast(tf.shape(self.Y)[1], settings.float_type)
KL = -0.5 * tf.reduce_sum(tf.log(dX_var)) \
+ 0.5 * tf.reduce_sum(tf.log(self.X_prior_var)) \
- 0.5 * NQ \
+ 0.5 * tf.reduce_sum((tf.square(self.X_mean - self.X_prior_mean) + dX_var) / self.X_prior_var)
# compute log marginal bound
ND = tf.cast(tf.size(self.Y), settings.float_type)
bound = -0.5 * ND * tf.log(2 * np.pi * sigma2)
bound += -0.5 * D * log_det_B
bound += -0.5 * tf.reduce_sum(tf.square(self.Y)) / sigma2
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * D * (tf.reduce_sum(psi0) / sigma2 -
tf.reduce_sum(tf.matrix_diag_part(AAT)))
bound -= KL
return bound | Construct a tensorflow function to compute the bound on the marginal
likelihood. | Below is the the instruction that describes the task:
### Input:
Construct a tensorflow function to compute the bound on the marginal
likelihood.
### Response:
def _build_likelihood(self):
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood.
"""
pX = DiagonalGaussian(self.X_mean, self.X_var)
num_inducing = len(self.feature)
psi0 = tf.reduce_sum(expectation(pX, self.kern))
psi1 = expectation(pX, (self.kern, self.feature))
psi2 = tf.reduce_sum(expectation(pX, (self.kern, self.feature), (self.kern, self.feature)), axis=0)
Kuu = features.Kuu(self.feature, self.kern, jitter=settings.jitter)
L = tf.cholesky(Kuu)
sigma2 = self.likelihood.variance
sigma = tf.sqrt(sigma2)
# Compute intermediate matrices
A = tf.matrix_triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.matrix_triangular_solve(L, psi2, lower=True)
AAT = tf.matrix_triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
log_det_B = 2. * tf.reduce_sum(tf.log(tf.matrix_diag_part(LB)))
c = tf.matrix_triangular_solve(LB, tf.matmul(A, self.Y), lower=True) / sigma
# KL[q(x) || p(x)]
dX_var = self.X_var if len(self.X_var.get_shape()) == 2 else tf.matrix_diag_part(self.X_var)
NQ = tf.cast(tf.size(self.X_mean), settings.float_type)
D = tf.cast(tf.shape(self.Y)[1], settings.float_type)
KL = -0.5 * tf.reduce_sum(tf.log(dX_var)) \
+ 0.5 * tf.reduce_sum(tf.log(self.X_prior_var)) \
- 0.5 * NQ \
+ 0.5 * tf.reduce_sum((tf.square(self.X_mean - self.X_prior_mean) + dX_var) / self.X_prior_var)
# compute log marginal bound
ND = tf.cast(tf.size(self.Y), settings.float_type)
bound = -0.5 * ND * tf.log(2 * np.pi * sigma2)
bound += -0.5 * D * log_det_B
bound += -0.5 * tf.reduce_sum(tf.square(self.Y)) / sigma2
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * D * (tf.reduce_sum(psi0) / sigma2 -
tf.reduce_sum(tf.matrix_diag_part(AAT)))
bound -= KL
return bound |
def list(self, request, project):
''' Routing to /api/project/{project}/seta/job-priorities/
This API can potentially have these consumers:
* Buildbot
* build_system_type=buildbot
* priority=5
* format=json
* TaskCluster (Gecko decision task)
* build_system_type=taskcluster
* format=json
'''
build_system_type = request.query_params.get('build_system_type', '*')
priority = request.query_params.get('priority')
try:
return Response(seta_job_scheduling(project, build_system_type, priority))
except SetaError as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST) | Routing to /api/project/{project}/seta/job-priorities/
This API can potentially have these consumers:
* Buildbot
* build_system_type=buildbot
* priority=5
* format=json
* TaskCluster (Gecko decision task)
* build_system_type=taskcluster
* format=json | Below is the the instruction that describes the task:
### Input:
Routing to /api/project/{project}/seta/job-priorities/
This API can potentially have these consumers:
* Buildbot
* build_system_type=buildbot
* priority=5
* format=json
* TaskCluster (Gecko decision task)
* build_system_type=taskcluster
* format=json
### Response:
def list(self, request, project):
''' Routing to /api/project/{project}/seta/job-priorities/
This API can potentially have these consumers:
* Buildbot
* build_system_type=buildbot
* priority=5
* format=json
* TaskCluster (Gecko decision task)
* build_system_type=taskcluster
* format=json
'''
build_system_type = request.query_params.get('build_system_type', '*')
priority = request.query_params.get('priority')
try:
return Response(seta_job_scheduling(project, build_system_type, priority))
except SetaError as e:
return Response(str(e), status=status.HTTP_400_BAD_REQUEST) |
def compress_dir(path, compression="gz"):
"""
Recursively compresses all files in a directory. Note that this
compresses all files singly, i.e., it does not create a tar archive. For
that, just use Python tarfile class.
Args:
path (str): Path to parent directory.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to gz.
"""
for parent, subdirs, files in os.walk(path):
for f in files:
compress_file(os.path.join(parent, f), compression=compression) | Recursively compresses all files in a directory. Note that this
compresses all files singly, i.e., it does not create a tar archive. For
that, just use Python tarfile class.
Args:
path (str): Path to parent directory.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to gz. | Below is the the instruction that describes the task:
### Input:
Recursively compresses all files in a directory. Note that this
compresses all files singly, i.e., it does not create a tar archive. For
that, just use Python tarfile class.
Args:
path (str): Path to parent directory.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to gz.
### Response:
def compress_dir(path, compression="gz"):
"""
Recursively compresses all files in a directory. Note that this
compresses all files singly, i.e., it does not create a tar archive. For
that, just use Python tarfile class.
Args:
path (str): Path to parent directory.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to gz.
"""
for parent, subdirs, files in os.walk(path):
for f in files:
compress_file(os.path.join(parent, f), compression=compression) |
def soft_triplet_loss(anchor, positive, negative, extra=True, scope="soft_triplet_loss"):
r"""Loss for triplet networks as described in the paper:
`Deep Metric Learning using Triplet Network
<https://arxiv.org/abs/1412.6622>`_ by Hoffer et al.
It is a softmax loss using :math:`(anchor-positive)^2` and
:math:`(anchor-negative)^2` as logits.
Args:
anchor (tf.Tensor): anchor feature vectors of shape [Batch, N].
positive (tf.Tensor): features of positive match of the same shape.
negative (tf.Tensor): features of negative match of the same shape.
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist)
"""
eps = 1e-10
with tf.name_scope(scope):
d_pos = tf.sqrt(tf.reduce_sum(tf.square(anchor - positive), 1) + eps)
d_neg = tf.sqrt(tf.reduce_sum(tf.square(anchor - negative), 1) + eps)
logits = tf.stack([d_pos, d_neg], axis=1)
ones = tf.ones_like(tf.squeeze(d_pos), dtype="int32")
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=ones))
if extra:
pos_dist = tf.reduce_mean(d_pos, name='pos-dist')
neg_dist = tf.reduce_mean(d_neg, name='neg-dist')
return loss, pos_dist, neg_dist
else:
return loss | r"""Loss for triplet networks as described in the paper:
`Deep Metric Learning using Triplet Network
<https://arxiv.org/abs/1412.6622>`_ by Hoffer et al.
It is a softmax loss using :math:`(anchor-positive)^2` and
:math:`(anchor-negative)^2` as logits.
Args:
anchor (tf.Tensor): anchor feature vectors of shape [Batch, N].
positive (tf.Tensor): features of positive match of the same shape.
negative (tf.Tensor): features of negative match of the same shape.
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist) | Below is the the instruction that describes the task:
### Input:
r"""Loss for triplet networks as described in the paper:
`Deep Metric Learning using Triplet Network
<https://arxiv.org/abs/1412.6622>`_ by Hoffer et al.
It is a softmax loss using :math:`(anchor-positive)^2` and
:math:`(anchor-negative)^2` as logits.
Args:
anchor (tf.Tensor): anchor feature vectors of shape [Batch, N].
positive (tf.Tensor): features of positive match of the same shape.
negative (tf.Tensor): features of negative match of the same shape.
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist)
### Response:
def soft_triplet_loss(anchor, positive, negative, extra=True, scope="soft_triplet_loss"):
r"""Loss for triplet networks as described in the paper:
`Deep Metric Learning using Triplet Network
<https://arxiv.org/abs/1412.6622>`_ by Hoffer et al.
It is a softmax loss using :math:`(anchor-positive)^2` and
:math:`(anchor-negative)^2` as logits.
Args:
anchor (tf.Tensor): anchor feature vectors of shape [Batch, N].
positive (tf.Tensor): features of positive match of the same shape.
negative (tf.Tensor): features of negative match of the same shape.
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist)
"""
eps = 1e-10
with tf.name_scope(scope):
d_pos = tf.sqrt(tf.reduce_sum(tf.square(anchor - positive), 1) + eps)
d_neg = tf.sqrt(tf.reduce_sum(tf.square(anchor - negative), 1) + eps)
logits = tf.stack([d_pos, d_neg], axis=1)
ones = tf.ones_like(tf.squeeze(d_pos), dtype="int32")
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=ones))
if extra:
pos_dist = tf.reduce_mean(d_pos, name='pos-dist')
neg_dist = tf.reduce_mean(d_neg, name='neg-dist')
return loss, pos_dist, neg_dist
else:
return loss |
def get(self):
"""
Return the HTTP code status.
:return: The matched and formatted status code.
:rtype: str|int|None
"""
if PyFunceble.HTTP_CODE["active"]:
# The http status code extraction is activated.
# We get the http status code.
http_code = self._access()
# We initiate a variable which will save the list of allowed
# http status code.
list_of_valid_http_code = []
for codes in [
PyFunceble.HTTP_CODE["list"]["up"],
PyFunceble.HTTP_CODE["list"]["potentially_down"],
PyFunceble.HTTP_CODE["list"]["potentially_up"],
]:
# We loop throught the list of http status code.
# We extend the list of valid with the currently read
# codes.
list_of_valid_http_code.extend(codes)
if http_code not in list_of_valid_http_code or http_code is None:
# * The extracted http code is not in the list of valid http code.
# or
# * The extracted http code is equal to `None`.
# We return 3 star in order to mention that we were not eable to extract
# the http status code.
return "*" * 3
# * The extracted http code is in the list of valid http code.
# or
# * The extracted http code is not equal to `None`.
# We return the extracted http status code.
return http_code
# The http status code extraction is activated.
# We return None.
return None | Return the HTTP code status.
:return: The matched and formatted status code.
:rtype: str|int|None | Below is the the instruction that describes the task:
### Input:
Return the HTTP code status.
:return: The matched and formatted status code.
:rtype: str|int|None
### Response:
def get(self):
"""
Return the HTTP code status.
:return: The matched and formatted status code.
:rtype: str|int|None
"""
if PyFunceble.HTTP_CODE["active"]:
# The http status code extraction is activated.
# We get the http status code.
http_code = self._access()
# We initiate a variable which will save the list of allowed
# http status code.
list_of_valid_http_code = []
for codes in [
PyFunceble.HTTP_CODE["list"]["up"],
PyFunceble.HTTP_CODE["list"]["potentially_down"],
PyFunceble.HTTP_CODE["list"]["potentially_up"],
]:
# We loop throught the list of http status code.
# We extend the list of valid with the currently read
# codes.
list_of_valid_http_code.extend(codes)
if http_code not in list_of_valid_http_code or http_code is None:
# * The extracted http code is not in the list of valid http code.
# or
# * The extracted http code is equal to `None`.
# We return 3 star in order to mention that we were not eable to extract
# the http status code.
return "*" * 3
# * The extracted http code is in the list of valid http code.
# or
# * The extracted http code is not equal to `None`.
# We return the extracted http status code.
return http_code
# The http status code extraction is activated.
# We return None.
return None |
def set_linked_probs(self, linked_statements):
"""Sets the belief probabilities for a list of linked INDRA Statements.
The list of LinkedStatement objects is assumed to come from the
MechanismLinker. The belief probability of the inferred Statement is
assigned the joint probability of its source Statements.
Parameters
----------
linked_statements : list[indra.mechlinker.LinkedStatement]
A list of INDRA LinkedStatements whose belief scores are to
be calculated. The belief attribute of the inferred Statement in
the LinkedStatement object is updated by this function.
"""
for st in linked_statements:
source_probs = [s.belief for s in st.source_stmts]
st.inferred_stmt.belief = numpy.prod(source_probs) | Sets the belief probabilities for a list of linked INDRA Statements.
The list of LinkedStatement objects is assumed to come from the
MechanismLinker. The belief probability of the inferred Statement is
assigned the joint probability of its source Statements.
Parameters
----------
linked_statements : list[indra.mechlinker.LinkedStatement]
A list of INDRA LinkedStatements whose belief scores are to
be calculated. The belief attribute of the inferred Statement in
the LinkedStatement object is updated by this function. | Below is the the instruction that describes the task:
### Input:
Sets the belief probabilities for a list of linked INDRA Statements.
The list of LinkedStatement objects is assumed to come from the
MechanismLinker. The belief probability of the inferred Statement is
assigned the joint probability of its source Statements.
Parameters
----------
linked_statements : list[indra.mechlinker.LinkedStatement]
A list of INDRA LinkedStatements whose belief scores are to
be calculated. The belief attribute of the inferred Statement in
the LinkedStatement object is updated by this function.
### Response:
def set_linked_probs(self, linked_statements):
"""Sets the belief probabilities for a list of linked INDRA Statements.
The list of LinkedStatement objects is assumed to come from the
MechanismLinker. The belief probability of the inferred Statement is
assigned the joint probability of its source Statements.
Parameters
----------
linked_statements : list[indra.mechlinker.LinkedStatement]
A list of INDRA LinkedStatements whose belief scores are to
be calculated. The belief attribute of the inferred Statement in
the LinkedStatement object is updated by this function.
"""
for st in linked_statements:
source_probs = [s.belief for s in st.source_stmts]
st.inferred_stmt.belief = numpy.prod(source_probs) |
def default(self, obj):
"""Default object encoder function
Args:
obj (:obj:`Any`): Object to be serialized
Returns:
JSON string
"""
if isinstance(obj, datetime):
return obj.isoformat()
if issubclass(obj.__class__, Enum.__class__):
return obj.value
to_json = getattr(obj, 'to_json', None)
if to_json:
out = obj.to_json()
if issubclass(obj.__class__, Model):
out.update({'__type': obj.__class__.__name__})
return out
return JSONEncoder.default(self, obj) | Default object encoder function
Args:
obj (:obj:`Any`): Object to be serialized
Returns:
JSON string | Below is the the instruction that describes the task:
### Input:
Default object encoder function
Args:
obj (:obj:`Any`): Object to be serialized
Returns:
JSON string
### Response:
def default(self, obj):
"""Default object encoder function
Args:
obj (:obj:`Any`): Object to be serialized
Returns:
JSON string
"""
if isinstance(obj, datetime):
return obj.isoformat()
if issubclass(obj.__class__, Enum.__class__):
return obj.value
to_json = getattr(obj, 'to_json', None)
if to_json:
out = obj.to_json()
if issubclass(obj.__class__, Model):
out.update({'__type': obj.__class__.__name__})
return out
return JSONEncoder.default(self, obj) |
def __execute_cmd(name, cmd):
'''
Execute Riak commands
'''
return __salt__['cmd.run_all'](
'{0} {1}'.format(salt.utils.path.which(name), cmd)
) | Execute Riak commands | Below is the the instruction that describes the task:
### Input:
Execute Riak commands
### Response:
def __execute_cmd(name, cmd):
'''
Execute Riak commands
'''
return __salt__['cmd.run_all'](
'{0} {1}'.format(salt.utils.path.which(name), cmd)
) |
def execute(command, detach=False):
"""
Runs a command in background. No output is retrieved. Useful for running GUI
applications that would block click events.
:param command: A string or a list of strings containing the name and
arguments of the program.
:param detach: If set to `True` the program will be executed using the
`i3-msg` command. As a result the program is executed independent of
i3pystatus as a child of i3 process. Because of how i3-msg parses its
arguments the type of `command` is limited to string in this mode.
"""
if detach:
if not isinstance(command, str):
msg = "Detached mode expects a string as command, not {}".format(
command)
logging.getLogger("i3pystatus.core.command").error(msg)
raise AttributeError(msg)
command = ["i3-msg", "exec", command]
else:
if isinstance(command, str):
command = shlex.split(command)
try:
subprocess.Popen(command, stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except OSError:
logging.getLogger("i3pystatus.core.command").exception("")
except subprocess.CalledProcessError:
logging.getLogger("i3pystatus.core.command").exception("") | Runs a command in background. No output is retrieved. Useful for running GUI
applications that would block click events.
:param command: A string or a list of strings containing the name and
arguments of the program.
:param detach: If set to `True` the program will be executed using the
`i3-msg` command. As a result the program is executed independent of
i3pystatus as a child of i3 process. Because of how i3-msg parses its
arguments the type of `command` is limited to string in this mode. | Below is the the instruction that describes the task:
### Input:
Runs a command in background. No output is retrieved. Useful for running GUI
applications that would block click events.
:param command: A string or a list of strings containing the name and
arguments of the program.
:param detach: If set to `True` the program will be executed using the
`i3-msg` command. As a result the program is executed independent of
i3pystatus as a child of i3 process. Because of how i3-msg parses its
arguments the type of `command` is limited to string in this mode.
### Response:
def execute(command, detach=False):
"""
Runs a command in background. No output is retrieved. Useful for running GUI
applications that would block click events.
:param command: A string or a list of strings containing the name and
arguments of the program.
:param detach: If set to `True` the program will be executed using the
`i3-msg` command. As a result the program is executed independent of
i3pystatus as a child of i3 process. Because of how i3-msg parses its
arguments the type of `command` is limited to string in this mode.
"""
if detach:
if not isinstance(command, str):
msg = "Detached mode expects a string as command, not {}".format(
command)
logging.getLogger("i3pystatus.core.command").error(msg)
raise AttributeError(msg)
command = ["i3-msg", "exec", command]
else:
if isinstance(command, str):
command = shlex.split(command)
try:
subprocess.Popen(command, stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except OSError:
logging.getLogger("i3pystatus.core.command").exception("")
except subprocess.CalledProcessError:
logging.getLogger("i3pystatus.core.command").exception("") |
def maybe_curry(maybe_fn, first_arg) -> 'Function | Any':
"""
If maybe_fn is a function, curries it and passes in first_arg. Otherwise
returns maybe_fn.
"""
if not callable(maybe_fn):
return maybe_fn
return tz.curry(maybe_fn)(first_arg) | If maybe_fn is a function, curries it and passes in first_arg. Otherwise
returns maybe_fn. | Below is the the instruction that describes the task:
### Input:
If maybe_fn is a function, curries it and passes in first_arg. Otherwise
returns maybe_fn.
### Response:
def maybe_curry(maybe_fn, first_arg) -> 'Function | Any':
"""
If maybe_fn is a function, curries it and passes in first_arg. Otherwise
returns maybe_fn.
"""
if not callable(maybe_fn):
return maybe_fn
return tz.curry(maybe_fn)(first_arg) |
def Cube(x, a, b, c, d):
"""Third order polynomial
Inputs:
-------
``x``: independent variable
``a``: coefficient of the third-order term
``b``: coefficient of the second-order term
``c``: coefficient of the first-order term
``d``: additive constant
Formula:
--------
``a*x^3 + b*x^2 + c*x + d``
"""
return a * x ** 3 + b * x ** 2 + c * x + d | Third order polynomial
Inputs:
-------
``x``: independent variable
``a``: coefficient of the third-order term
``b``: coefficient of the second-order term
``c``: coefficient of the first-order term
``d``: additive constant
Formula:
--------
``a*x^3 + b*x^2 + c*x + d`` | Below is the the instruction that describes the task:
### Input:
Third order polynomial
Inputs:
-------
``x``: independent variable
``a``: coefficient of the third-order term
``b``: coefficient of the second-order term
``c``: coefficient of the first-order term
``d``: additive constant
Formula:
--------
``a*x^3 + b*x^2 + c*x + d``
### Response:
def Cube(x, a, b, c, d):
"""Third order polynomial
Inputs:
-------
``x``: independent variable
``a``: coefficient of the third-order term
``b``: coefficient of the second-order term
``c``: coefficient of the first-order term
``d``: additive constant
Formula:
--------
``a*x^3 + b*x^2 + c*x + d``
"""
return a * x ** 3 + b * x ** 2 + c * x + d |
def start(self, s):
# type: (Optional[Type[Nonterminal]]) -> None
"""
Set start symbol of the grammar.
:param s: Start symbol to set.
:raise NonterminalDoesNotExistsException: If the start symbol is not in nonterminals.
"""
if s is not None and s not in self.nonterminals:
raise NonterminalDoesNotExistsException(None, s, self)
self._start_symbol = s | Set start symbol of the grammar.
:param s: Start symbol to set.
:raise NonterminalDoesNotExistsException: If the start symbol is not in nonterminals. | Below is the the instruction that describes the task:
### Input:
Set start symbol of the grammar.
:param s: Start symbol to set.
:raise NonterminalDoesNotExistsException: If the start symbol is not in nonterminals.
### Response:
def start(self, s):
# type: (Optional[Type[Nonterminal]]) -> None
"""
Set start symbol of the grammar.
:param s: Start symbol to set.
:raise NonterminalDoesNotExistsException: If the start symbol is not in nonterminals.
"""
if s is not None and s not in self.nonterminals:
raise NonterminalDoesNotExistsException(None, s, self)
self._start_symbol = s |
def encryption_key(self, alg, **kwargs):
"""
Return an encryption key as per
http://openid.net/specs/openid-connect-core-1_0.html#Encryption
:param alg: encryption algorithm
:param kwargs:
:return: encryption key as byte string
"""
if not self.key:
self.deserialize()
try:
tsize = ALG2KEYLEN[alg]
except KeyError:
raise UnsupportedAlgorithm(alg)
if tsize <= 32:
# SHA256
_enc_key = sha256_digest(self.key)[:tsize]
elif tsize <= 48:
# SHA384
_enc_key = sha384_digest(self.key)[:tsize]
elif tsize <= 64:
# SHA512
_enc_key = sha512_digest(self.key)[:tsize]
else:
raise JWKException("No support for symmetric keys > 512 bits")
logger.debug('Symmetric encryption key: {}'.format(
as_unicode(b64e(_enc_key))))
return _enc_key | Return an encryption key as per
http://openid.net/specs/openid-connect-core-1_0.html#Encryption
:param alg: encryption algorithm
:param kwargs:
:return: encryption key as byte string | Below is the the instruction that describes the task:
### Input:
Return an encryption key as per
http://openid.net/specs/openid-connect-core-1_0.html#Encryption
:param alg: encryption algorithm
:param kwargs:
:return: encryption key as byte string
### Response:
def encryption_key(self, alg, **kwargs):
"""
Return an encryption key as per
http://openid.net/specs/openid-connect-core-1_0.html#Encryption
:param alg: encryption algorithm
:param kwargs:
:return: encryption key as byte string
"""
if not self.key:
self.deserialize()
try:
tsize = ALG2KEYLEN[alg]
except KeyError:
raise UnsupportedAlgorithm(alg)
if tsize <= 32:
# SHA256
_enc_key = sha256_digest(self.key)[:tsize]
elif tsize <= 48:
# SHA384
_enc_key = sha384_digest(self.key)[:tsize]
elif tsize <= 64:
# SHA512
_enc_key = sha512_digest(self.key)[:tsize]
else:
raise JWKException("No support for symmetric keys > 512 bits")
logger.debug('Symmetric encryption key: {}'.format(
as_unicode(b64e(_enc_key))))
return _enc_key |
def getVC(self):
"""
Variance componenrs
"""
_Cr = decompose_GxE(self.full['Cr'])
RV = {}
for key in list(_Cr.keys()):
RV['var_%s' % key] = sp.array([var_CoXX(_Cr[key], self.Xr)])
RV['var_c'] = self.full['var_c']
RV['var_n'] = self.full['var_n']
return RV | Variance componenrs | Below is the the instruction that describes the task:
### Input:
Variance componenrs
### Response:
def getVC(self):
"""
Variance componenrs
"""
_Cr = decompose_GxE(self.full['Cr'])
RV = {}
for key in list(_Cr.keys()):
RV['var_%s' % key] = sp.array([var_CoXX(_Cr[key], self.Xr)])
RV['var_c'] = self.full['var_c']
RV['var_n'] = self.full['var_n']
return RV |
def options(self, parser, env=None):
"""
Adds command-line options for this plugin.
"""
if env is None:
env = os.environ
env_opt_name = 'NOSE_%s' % self.__dest_opt_name.upper()
parser.add_option("--%s" % self.__opt_name,
dest=self.__dest_opt_name,
type="string",
default=env.get(env_opt_name),
help=".ini file providing the environment for the "
"test web application.") | Adds command-line options for this plugin. | Below is the the instruction that describes the task:
### Input:
Adds command-line options for this plugin.
### Response:
def options(self, parser, env=None):
"""
Adds command-line options for this plugin.
"""
if env is None:
env = os.environ
env_opt_name = 'NOSE_%s' % self.__dest_opt_name.upper()
parser.add_option("--%s" % self.__opt_name,
dest=self.__dest_opt_name,
type="string",
default=env.get(env_opt_name),
help=".ini file providing the environment for the "
"test web application.") |
def block_sep1(self, Y):
r"""Separate variable into component corresponding to
:math:`\mathbf{y}_1` in :math:`\mathbf{y}\;\;`.
"""
# This method is overridden because we have to change the
# mechanism for combining the Y0 and Y1 blocks into a single
# array (see comment in the __init__ method).
shp = Y.shape[0:self.cri.axisC] + self.y1shp[self.cri.axisC:]
return Y[(slice(None),)*self.cri.axisC +
(slice(self.y0I, None),)].reshape(shp) | r"""Separate variable into component corresponding to
:math:`\mathbf{y}_1` in :math:`\mathbf{y}\;\;`. | Below is the the instruction that describes the task:
### Input:
r"""Separate variable into component corresponding to
:math:`\mathbf{y}_1` in :math:`\mathbf{y}\;\;`.
### Response:
def block_sep1(self, Y):
r"""Separate variable into component corresponding to
:math:`\mathbf{y}_1` in :math:`\mathbf{y}\;\;`.
"""
# This method is overridden because we have to change the
# mechanism for combining the Y0 and Y1 blocks into a single
# array (see comment in the __init__ method).
shp = Y.shape[0:self.cri.axisC] + self.y1shp[self.cri.axisC:]
return Y[(slice(None),)*self.cri.axisC +
(slice(self.y0I, None),)].reshape(shp) |
def alignment(job, ids, input_args, sample):
"""
Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
"""
uuid, urls = sample
# ids['bam'] = job.fileStore.getEmptyFileStoreID()
work_dir = job.fileStore.getLocalTempDir()
output_dir = input_args['output_dir']
key_path = input_args['ssec']
cores = multiprocessing.cpu_count()
# I/O
return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann',
'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai')
# Get fastqs associated with this sample
for url in urls:
download_encrypted_file(work_dir, url, key_path, os.path.basename(url))
# Parameters for BWA and Bamsort
docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)]
bwa_command = ["jvivian/bwa",
"mem",
"-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid),
"-T", str(0),
"-t", str(cores),
"/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls]
bamsort_command = ["jeltje/biobambam",
"/usr/local/bin/bamsort",
"inputformat=sam",
"level=1",
"inputthreads={}".format(cores),
"outputthreads={}".format(cores),
"calmdnm=1",
"calmdnmrecompindetonly=1",
"calmdnmreference=/data/ref.fa",
"I=/data/{}".format(uuid + '.sam')]
# Piping the output to a file handle
with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bwa_command, stdout=f_out)
with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out)
# Save in JobStore
# job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam'))
# Copy file to S3
if input_args['s3_dir']:
job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G')
# Move file in output_dir
if input_args['output_dir']:
move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam']) | Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample | Below is the the instruction that describes the task:
### Input:
Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
### Response:
def alignment(job, ids, input_args, sample):
"""
Runs BWA and then Bamsort on the supplied fastqs for this sample
Input1: Toil Job instance
Input2: jobstore id dictionary
Input3: Input arguments dictionary
Input4: Sample tuple -- contains uuid and urls for the sample
"""
uuid, urls = sample
# ids['bam'] = job.fileStore.getEmptyFileStoreID()
work_dir = job.fileStore.getLocalTempDir()
output_dir = input_args['output_dir']
key_path = input_args['ssec']
cores = multiprocessing.cpu_count()
# I/O
return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann',
'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai')
# Get fastqs associated with this sample
for url in urls:
download_encrypted_file(work_dir, url, key_path, os.path.basename(url))
# Parameters for BWA and Bamsort
docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)]
bwa_command = ["jvivian/bwa",
"mem",
"-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid),
"-T", str(0),
"-t", str(cores),
"/data/ref.fa"] + [os.path.join('/data/', os.path.basename(x)) for x in urls]
bamsort_command = ["jeltje/biobambam",
"/usr/local/bin/bamsort",
"inputformat=sam",
"level=1",
"inputthreads={}".format(cores),
"outputthreads={}".format(cores),
"calmdnm=1",
"calmdnmrecompindetonly=1",
"calmdnmreference=/data/ref.fa",
"I=/data/{}".format(uuid + '.sam')]
# Piping the output to a file handle
with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bwa_command, stdout=f_out)
with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out:
subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out)
# Save in JobStore
# job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam'))
# Copy file to S3
if input_args['s3_dir']:
job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G')
# Move file in output_dir
if input_args['output_dir']:
move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam']) |
def getTotalDiscountedBulkPrice(self):
"""Compute total discounted corporate bulk price
"""
price = self.getDiscountedCorporatePrice()
vat = self.getVAT()
price = price and price or 0
vat = vat and vat or 0
return float(price) + (float(price) * float(vat)) / 100 | Compute total discounted corporate bulk price | Below is the the instruction that describes the task:
### Input:
Compute total discounted corporate bulk price
### Response:
def getTotalDiscountedBulkPrice(self):
"""Compute total discounted corporate bulk price
"""
price = self.getDiscountedCorporatePrice()
vat = self.getVAT()
price = price and price or 0
vat = vat and vat or 0
return float(price) + (float(price) * float(vat)) / 100 |
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
# del chunks[-1]
chunks.pop()
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
# del cur_line[-1]
cur_line.pop()
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines | _wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved. | Below is the the instruction that describes the task:
### Input:
_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
### Response:
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
# del chunks[-1]
chunks.pop()
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
# del cur_line[-1]
cur_line.pop()
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines |
def validate_current_versions(self): # type: () -> bool
"""
Can a version be found? Are all versions currently the same? Are they valid sem ver?
:return:
"""
versions = self.all_current_versions()
for _, version in versions.items():
if "Invalid Semantic Version" in version:
logger.error(
"Invalid versions, can't compare them, can't determine if in sync"
)
return False
if not versions:
logger.warning("Found no versions, will use default 0.1.0")
return True
if not self.all_versions_equal(versions):
if self.almost_the_same_version([x for x in versions.values()]):
# TODO: disable with strict option
logger.warning("Version very by a patch level, will use greater.")
return True
logger.error("Found various versions, how can we rationally pick?")
logger.error(unicode(versions))
return False
for _ in versions:
return True
return False | Can a version be found? Are all versions currently the same? Are they valid sem ver?
:return: | Below is the the instruction that describes the task:
### Input:
Can a version be found? Are all versions currently the same? Are they valid sem ver?
:return:
### Response:
def validate_current_versions(self): # type: () -> bool
"""
Can a version be found? Are all versions currently the same? Are they valid sem ver?
:return:
"""
versions = self.all_current_versions()
for _, version in versions.items():
if "Invalid Semantic Version" in version:
logger.error(
"Invalid versions, can't compare them, can't determine if in sync"
)
return False
if not versions:
logger.warning("Found no versions, will use default 0.1.0")
return True
if not self.all_versions_equal(versions):
if self.almost_the_same_version([x for x in versions.values()]):
# TODO: disable with strict option
logger.warning("Version very by a patch level, will use greater.")
return True
logger.error("Found various versions, how can we rationally pick?")
logger.error(unicode(versions))
return False
for _ in versions:
return True
return False |
def start_ppp_link(self):
'''startup the link'''
cmd = ['pppd']
cmd.extend(self.command)
(self.pid, self.ppp_fd) = pty.fork()
if self.pid == 0:
os.execvp("pppd", cmd)
raise RuntimeError("pppd exited")
if self.ppp_fd == -1:
print("Failed to create link fd")
return
# ensure fd is non-blocking
fcntl.fcntl(self.ppp_fd, fcntl.F_SETFL, fcntl.fcntl(self.ppp_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
self.byte_count = 0
self.packet_count = 0
# ask mavproxy to add us to the select loop
self.mpself.select_extra[self.ppp_fd] = (self.ppp_read, self.ppp_fd) | startup the link | Below is the the instruction that describes the task:
### Input:
startup the link
### Response:
def start_ppp_link(self):
'''startup the link'''
cmd = ['pppd']
cmd.extend(self.command)
(self.pid, self.ppp_fd) = pty.fork()
if self.pid == 0:
os.execvp("pppd", cmd)
raise RuntimeError("pppd exited")
if self.ppp_fd == -1:
print("Failed to create link fd")
return
# ensure fd is non-blocking
fcntl.fcntl(self.ppp_fd, fcntl.F_SETFL, fcntl.fcntl(self.ppp_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
self.byte_count = 0
self.packet_count = 0
# ask mavproxy to add us to the select loop
self.mpself.select_extra[self.ppp_fd] = (self.ppp_read, self.ppp_fd) |
def parseCommandLineArguments():
"""
Set up command line parsing.
"""
parser = argparse.ArgumentParser(description="Calculate parallax error for given G and (V-I)")
parser.add_argument("gmag", help="G-band magnitude of source", type=float)
parser.add_argument("vmini", help="(V-I) colour of source", type=float)
args=vars(parser.parse_args())
return args | Set up command line parsing. | Below is the the instruction that describes the task:
### Input:
Set up command line parsing.
### Response:
def parseCommandLineArguments():
"""
Set up command line parsing.
"""
parser = argparse.ArgumentParser(description="Calculate parallax error for given G and (V-I)")
parser.add_argument("gmag", help="G-band magnitude of source", type=float)
parser.add_argument("vmini", help="(V-I) colour of source", type=float)
args=vars(parser.parse_args())
return args |
def get_by_index(self, index):
"""Returns a Volume or Disk by its index."""
try:
return self[index]
except KeyError:
for v in self.get_volumes():
if v.index == str(index):
return v
raise KeyError(index) | Returns a Volume or Disk by its index. | Below is the the instruction that describes the task:
### Input:
Returns a Volume or Disk by its index.
### Response:
def get_by_index(self, index):
"""Returns a Volume or Disk by its index."""
try:
return self[index]
except KeyError:
for v in self.get_volumes():
if v.index == str(index):
return v
raise KeyError(index) |
def _getW(self):
"""
Gets a value of `w` for use in generating a pattern.
"""
w = self._w
if type(w) is list:
return w[self._random.getUInt32(len(w))]
else:
return w | Gets a value of `w` for use in generating a pattern. | Below is the the instruction that describes the task:
### Input:
Gets a value of `w` for use in generating a pattern.
### Response:
def _getW(self):
"""
Gets a value of `w` for use in generating a pattern.
"""
w = self._w
if type(w) is list:
return w[self._random.getUInt32(len(w))]
else:
return w |
def _start_new_cdx_file(self):
'''Create and set current CDX file.'''
self._cdx_filename = '{0}.cdx'.format(self._prefix_filename)
if not self._params.appending:
wpull.util.truncate_file(self._cdx_filename)
self._write_cdx_header()
elif not os.path.exists(self._cdx_filename):
self._write_cdx_header() | Create and set current CDX file. | Below is the the instruction that describes the task:
### Input:
Create and set current CDX file.
### Response:
def _start_new_cdx_file(self):
'''Create and set current CDX file.'''
self._cdx_filename = '{0}.cdx'.format(self._prefix_filename)
if not self._params.appending:
wpull.util.truncate_file(self._cdx_filename)
self._write_cdx_header()
elif not os.path.exists(self._cdx_filename):
self._write_cdx_header() |
def process_deltadir(delta_dir, org_lengths, logger=None):
"""Returns a tuple of ANIm results for .deltas in passed directory.
- delta_dir - path to the directory containing .delta files
- org_lengths - dictionary of total sequence lengths, keyed by sequence
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - symmetrical: total length of alignment
- percentage_identity - symmetrical: percentage identity of alignment
- alignment_coverage - non-symmetrical: coverage of query and subject
- similarity_errors - symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more NUCmer runs failed, or a
very distant sequence was included in the analysis.
"""
# Process directory to identify input files - as of v0.2.4 we use the
# .filter files that result from delta-filter (1:1 alignments)
deltafiles = pyani_files.get_input_files(delta_dir, ".filter")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), "ANIm")
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .delta files assuming that the filename format holds:
# org1_vs_org2.delta
for deltafile in deltafiles:
qname, sname = os.path.splitext(os.path.split(deltafile)[-1])[0].split("_vs_")
# We may have .delta files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the .delta file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % deltafile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % deltafile
)
continue
tot_length, tot_sim_error = parse_delta(deltafile)
if tot_length == 0 and logger is not None:
if logger:
logger.warning(
"Total alignment length reported in " + "%s is zero!" % deltafile
)
query_cover = float(tot_length) / org_lengths[qname]
sbjct_cover = float(tot_length) / org_lengths[sname]
# Calculate percentage ID of aligned length. This may fail if
# total length is zero.
# The ZeroDivisionError that would arise should be handled
# Common causes are that a NUCmer run failed, or that a very
# distant sequence was included in the analysis.
try:
perc_id = 1 - float(tot_sim_error) / tot_length
except ZeroDivisionError:
perc_id = 0 # set arbitrary value of zero identity
results.zero_error = True
# Populate dataframes: when assigning data from symmetrical MUMmer
# output, both upper and lower triangles will be populated
results.add_tot_length(qname, sname, tot_length)
results.add_sim_errors(qname, sname, tot_sim_error)
results.add_pid(qname, sname, perc_id)
results.add_coverage(qname, sname, query_cover, sbjct_cover)
return results | Returns a tuple of ANIm results for .deltas in passed directory.
- delta_dir - path to the directory containing .delta files
- org_lengths - dictionary of total sequence lengths, keyed by sequence
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - symmetrical: total length of alignment
- percentage_identity - symmetrical: percentage identity of alignment
- alignment_coverage - non-symmetrical: coverage of query and subject
- similarity_errors - symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more NUCmer runs failed, or a
very distant sequence was included in the analysis. | Below is the the instruction that describes the task:
### Input:
Returns a tuple of ANIm results for .deltas in passed directory.
- delta_dir - path to the directory containing .delta files
- org_lengths - dictionary of total sequence lengths, keyed by sequence
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - symmetrical: total length of alignment
- percentage_identity - symmetrical: percentage identity of alignment
- alignment_coverage - non-symmetrical: coverage of query and subject
- similarity_errors - symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more NUCmer runs failed, or a
very distant sequence was included in the analysis.
### Response:
def process_deltadir(delta_dir, org_lengths, logger=None):
"""Returns a tuple of ANIm results for .deltas in passed directory.
- delta_dir - path to the directory containing .delta files
- org_lengths - dictionary of total sequence lengths, keyed by sequence
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - symmetrical: total length of alignment
- percentage_identity - symmetrical: percentage identity of alignment
- alignment_coverage - non-symmetrical: coverage of query and subject
- similarity_errors - symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more NUCmer runs failed, or a
very distant sequence was included in the analysis.
"""
# Process directory to identify input files - as of v0.2.4 we use the
# .filter files that result from delta-filter (1:1 alignments)
deltafiles = pyani_files.get_input_files(delta_dir, ".filter")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), "ANIm")
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .delta files assuming that the filename format holds:
# org1_vs_org2.delta
for deltafile in deltafiles:
qname, sname = os.path.splitext(os.path.split(deltafile)[-1])[0].split("_vs_")
# We may have .delta files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the .delta file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % deltafile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % deltafile
)
continue
tot_length, tot_sim_error = parse_delta(deltafile)
if tot_length == 0 and logger is not None:
if logger:
logger.warning(
"Total alignment length reported in " + "%s is zero!" % deltafile
)
query_cover = float(tot_length) / org_lengths[qname]
sbjct_cover = float(tot_length) / org_lengths[sname]
# Calculate percentage ID of aligned length. This may fail if
# total length is zero.
# The ZeroDivisionError that would arise should be handled
# Common causes are that a NUCmer run failed, or that a very
# distant sequence was included in the analysis.
try:
perc_id = 1 - float(tot_sim_error) / tot_length
except ZeroDivisionError:
perc_id = 0 # set arbitrary value of zero identity
results.zero_error = True
# Populate dataframes: when assigning data from symmetrical MUMmer
# output, both upper and lower triangles will be populated
results.add_tot_length(qname, sname, tot_length)
results.add_sim_errors(qname, sname, tot_sim_error)
results.add_pid(qname, sname, perc_id)
results.add_coverage(qname, sname, query_cover, sbjct_cover)
return results |
def text_log_errors(self, request, project, pk=None):
"""
Gets a list of steps associated with this job
"""
try:
job = Job.objects.get(repository__name=project,
id=pk)
except Job.DoesNotExist:
return Response("No job with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND)
textlog_errors = (TextLogError.objects
.filter(step__job=job)
.select_related("_metadata",
"_metadata__failure_line")
.prefetch_related("classified_failures", "matches")
.order_by('id'))
return Response(serializers.TextLogErrorSerializer(textlog_errors,
many=True,
read_only=True).data) | Gets a list of steps associated with this job | Below is the the instruction that describes the task:
### Input:
Gets a list of steps associated with this job
### Response:
def text_log_errors(self, request, project, pk=None):
"""
Gets a list of steps associated with this job
"""
try:
job = Job.objects.get(repository__name=project,
id=pk)
except Job.DoesNotExist:
return Response("No job with id: {0}".format(pk),
status=HTTP_404_NOT_FOUND)
textlog_errors = (TextLogError.objects
.filter(step__job=job)
.select_related("_metadata",
"_metadata__failure_line")
.prefetch_related("classified_failures", "matches")
.order_by('id'))
return Response(serializers.TextLogErrorSerializer(textlog_errors,
many=True,
read_only=True).data) |
def internal_classpath(cls, targets, classpath_products, confs=('default',)):
"""Return the list of internal classpath entries for a classpath covering all `targets`.
Any classpath entries contributed by external dependencies will be omitted.
:param targets: Targets to build an aggregated classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
"""
classpath_tuples = classpath_products.get_internal_classpath_entries_for_targets(targets)
filtered_tuples_iter = cls._filtered_classpath_by_confs_iter(classpath_tuples, confs)
return [entry.path for entry in cls._entries_iter(filtered_tuples_iter)] | Return the list of internal classpath entries for a classpath covering all `targets`.
Any classpath entries contributed by external dependencies will be omitted.
:param targets: Targets to build an aggregated classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string | Below is the the instruction that describes the task:
### Input:
Return the list of internal classpath entries for a classpath covering all `targets`.
Any classpath entries contributed by external dependencies will be omitted.
:param targets: Targets to build an aggregated classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
### Response:
def internal_classpath(cls, targets, classpath_products, confs=('default',)):
"""Return the list of internal classpath entries for a classpath covering all `targets`.
Any classpath entries contributed by external dependencies will be omitted.
:param targets: Targets to build an aggregated classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
"""
classpath_tuples = classpath_products.get_internal_classpath_entries_for_targets(targets)
filtered_tuples_iter = cls._filtered_classpath_by_confs_iter(classpath_tuples, confs)
return [entry.path for entry in cls._entries_iter(filtered_tuples_iter)] |
def crud_permission_name(model, action, convert=True):
"""Returns permission name using Django naming convention: app_label.action_object.
If `convert` is True, `create` and `update` actions would be renamed
to `add` and `change`.
"""
app_label = model._meta.app_label
model_lower = model.__name__.lower()
if convert:
action = MAP_PERMISSION_ACTIONS.get(action, action)
return '%s.%s_%s' % (
app_label,
action,
model_lower
) | Returns permission name using Django naming convention: app_label.action_object.
If `convert` is True, `create` and `update` actions would be renamed
to `add` and `change`. | Below is the the instruction that describes the task:
### Input:
Returns permission name using Django naming convention: app_label.action_object.
If `convert` is True, `create` and `update` actions would be renamed
to `add` and `change`.
### Response:
def crud_permission_name(model, action, convert=True):
"""Returns permission name using Django naming convention: app_label.action_object.
If `convert` is True, `create` and `update` actions would be renamed
to `add` and `change`.
"""
app_label = model._meta.app_label
model_lower = model.__name__.lower()
if convert:
action = MAP_PERMISSION_ACTIONS.get(action, action)
return '%s.%s_%s' % (
app_label,
action,
model_lower
) |
def _gc_prop(s, length):
"""Get proportion of GC from a string
Parameters
----------
s : str
Arbitrary string
Returns
-------
x : float
GC proportion.
"""
gc = sum(map(s.count, ["c", "g"]))
return gc / length | Get proportion of GC from a string
Parameters
----------
s : str
Arbitrary string
Returns
-------
x : float
GC proportion. | Below is the the instruction that describes the task:
### Input:
Get proportion of GC from a string
Parameters
----------
s : str
Arbitrary string
Returns
-------
x : float
GC proportion.
### Response:
def _gc_prop(s, length):
"""Get proportion of GC from a string
Parameters
----------
s : str
Arbitrary string
Returns
-------
x : float
GC proportion.
"""
gc = sum(map(s.count, ["c", "g"]))
return gc / length |
def main():
"""
A CLI application for performing factory calibration of an Opentrons robot
Instructions:
- Robot must be set up with two 300ul or 50ul single-channel pipettes
installed on the right-hand and left-hand mount.
- Put a GEB 300ul tip onto the pipette.
- Use the arrow keys to jog the robot over slot 5 in an open space that
is not an engraving or a hole.
- Use the 'q' and 'a' keys to jog the pipette up and down respectively
until the tip is just touching the deck surface, then press 'z'. This
will save the 'Z' height.
- Press '1' to automatically go to the expected location of the first
calibration point. Jog the robot until the tip is actually at
the point, then press 'enter'.
- Repeat with '2' and '3'.
- After calibrating all three points, press the space bar to save the
configuration.
- Optionally, press 4,5,6 or 7 to validate the new configuration.
- Press 'p' to perform tip probe. Press the space bar to save again.
- Press 'm' to perform mount calibration.
Press enter and then space bar to save again.
- Press 'esc' to exit the program.
"""
prompt = input(
">>> Warning! Running this tool backup and clear any previous "
"calibration data. Proceed (y/[n])? ")
if prompt not in ['y', 'Y', 'yes']:
print('Exiting--prior configuration data not changed')
sys.exit()
# Notes:
# - 200ul tip is 51.7mm long when attached to a pipette
# - For xyz coordinates, (0, 0, 0) is the lower-left corner of the robot
cli = CLITool(
point_set=get_calibration_points(),
tip_length=51.7)
hardware = cli.hardware
backup_configuration_and_reload(hardware)
if not feature_flags.use_protocol_api_v2():
hardware.connect()
hardware.turn_on_rail_lights()
atexit.register(hardware.turn_off_rail_lights)
else:
hardware.set_lights(rails=True)
cli.home()
# lights help the script user to see the points on the deck
cli.ui_loop.run()
if feature_flags.use_protocol_api_v2():
hardware.set_lights(rails=False)
print('Robot config: \n', cli._config) | A CLI application for performing factory calibration of an Opentrons robot
Instructions:
- Robot must be set up with two 300ul or 50ul single-channel pipettes
installed on the right-hand and left-hand mount.
- Put a GEB 300ul tip onto the pipette.
- Use the arrow keys to jog the robot over slot 5 in an open space that
is not an engraving or a hole.
- Use the 'q' and 'a' keys to jog the pipette up and down respectively
until the tip is just touching the deck surface, then press 'z'. This
will save the 'Z' height.
- Press '1' to automatically go to the expected location of the first
calibration point. Jog the robot until the tip is actually at
the point, then press 'enter'.
- Repeat with '2' and '3'.
- After calibrating all three points, press the space bar to save the
configuration.
- Optionally, press 4,5,6 or 7 to validate the new configuration.
- Press 'p' to perform tip probe. Press the space bar to save again.
- Press 'm' to perform mount calibration.
Press enter and then space bar to save again.
- Press 'esc' to exit the program. | Below is the the instruction that describes the task:
### Input:
A CLI application for performing factory calibration of an Opentrons robot
Instructions:
- Robot must be set up with two 300ul or 50ul single-channel pipettes
installed on the right-hand and left-hand mount.
- Put a GEB 300ul tip onto the pipette.
- Use the arrow keys to jog the robot over slot 5 in an open space that
is not an engraving or a hole.
- Use the 'q' and 'a' keys to jog the pipette up and down respectively
until the tip is just touching the deck surface, then press 'z'. This
will save the 'Z' height.
- Press '1' to automatically go to the expected location of the first
calibration point. Jog the robot until the tip is actually at
the point, then press 'enter'.
- Repeat with '2' and '3'.
- After calibrating all three points, press the space bar to save the
configuration.
- Optionally, press 4,5,6 or 7 to validate the new configuration.
- Press 'p' to perform tip probe. Press the space bar to save again.
- Press 'm' to perform mount calibration.
Press enter and then space bar to save again.
- Press 'esc' to exit the program.
### Response:
def main():
"""
A CLI application for performing factory calibration of an Opentrons robot
Instructions:
- Robot must be set up with two 300ul or 50ul single-channel pipettes
installed on the right-hand and left-hand mount.
- Put a GEB 300ul tip onto the pipette.
- Use the arrow keys to jog the robot over slot 5 in an open space that
is not an engraving or a hole.
- Use the 'q' and 'a' keys to jog the pipette up and down respectively
until the tip is just touching the deck surface, then press 'z'. This
will save the 'Z' height.
- Press '1' to automatically go to the expected location of the first
calibration point. Jog the robot until the tip is actually at
the point, then press 'enter'.
- Repeat with '2' and '3'.
- After calibrating all three points, press the space bar to save the
configuration.
- Optionally, press 4,5,6 or 7 to validate the new configuration.
- Press 'p' to perform tip probe. Press the space bar to save again.
- Press 'm' to perform mount calibration.
Press enter and then space bar to save again.
- Press 'esc' to exit the program.
"""
prompt = input(
">>> Warning! Running this tool backup and clear any previous "
"calibration data. Proceed (y/[n])? ")
if prompt not in ['y', 'Y', 'yes']:
print('Exiting--prior configuration data not changed')
sys.exit()
# Notes:
# - 200ul tip is 51.7mm long when attached to a pipette
# - For xyz coordinates, (0, 0, 0) is the lower-left corner of the robot
cli = CLITool(
point_set=get_calibration_points(),
tip_length=51.7)
hardware = cli.hardware
backup_configuration_and_reload(hardware)
if not feature_flags.use_protocol_api_v2():
hardware.connect()
hardware.turn_on_rail_lights()
atexit.register(hardware.turn_off_rail_lights)
else:
hardware.set_lights(rails=True)
cli.home()
# lights help the script user to see the points on the deck
cli.ui_loop.run()
if feature_flags.use_protocol_api_v2():
hardware.set_lights(rails=False)
print('Robot config: \n', cli._config) |
def get_internal_project(
self,
timeout: float = 1
) -> typing.Union['projects.Project', None]:
"""
Attempts to return the internally loaded project. This function
prevents race condition issues where projects are loaded via threads
because the internal loop will try to continuously load the internal
project until it is available or until the timeout is reached.
:param timeout:
Maximum number of seconds to wait before giving up and returning
None.
"""
count = int(timeout / 0.1)
for _ in range(count):
project = self.internal_project
if project:
return project
time.sleep(0.1)
return self.internal_project | Attempts to return the internally loaded project. This function
prevents race condition issues where projects are loaded via threads
because the internal loop will try to continuously load the internal
project until it is available or until the timeout is reached.
:param timeout:
Maximum number of seconds to wait before giving up and returning
None. | Below is the the instruction that describes the task:
### Input:
Attempts to return the internally loaded project. This function
prevents race condition issues where projects are loaded via threads
because the internal loop will try to continuously load the internal
project until it is available or until the timeout is reached.
:param timeout:
Maximum number of seconds to wait before giving up and returning
None.
### Response:
def get_internal_project(
self,
timeout: float = 1
) -> typing.Union['projects.Project', None]:
"""
Attempts to return the internally loaded project. This function
prevents race condition issues where projects are loaded via threads
because the internal loop will try to continuously load the internal
project until it is available or until the timeout is reached.
:param timeout:
Maximum number of seconds to wait before giving up and returning
None.
"""
count = int(timeout / 0.1)
for _ in range(count):
project = self.internal_project
if project:
return project
time.sleep(0.1)
return self.internal_project |
async def on_message(message):
"""The on_message event handler for this module
Args:
message (discord.Message): Input message
"""
# Simplify message info
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
# Only reply to server messages and don't reply to myself
if server is not None and author != channel.server.me:
# Do a flip check
flipchecked = api_flipcheck.flipcheck(content)
if flipchecked:
await client.send_typing(channel)
await client.send_message(channel, flipchecked) | The on_message event handler for this module
Args:
message (discord.Message): Input message | Below is the the instruction that describes the task:
### Input:
The on_message event handler for this module
Args:
message (discord.Message): Input message
### Response:
async def on_message(message):
"""The on_message event handler for this module
Args:
message (discord.Message): Input message
"""
# Simplify message info
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
# Only reply to server messages and don't reply to myself
if server is not None and author != channel.server.me:
# Do a flip check
flipchecked = api_flipcheck.flipcheck(content)
if flipchecked:
await client.send_typing(channel)
await client.send_message(channel, flipchecked) |
def forwarder(frontend, backend):
"""Simple pub/sub forwarder
:param int frontend: fontend zeromq port
:param int backend: backend zeromq port
"""
try:
context = zmq.Context()
front_sub = context.socket(zmq.SUB)
front_sub.bind("tcp://*:%d" % frontend)
front_sub.setsockopt_string(zmq.SUBSCRIBE, "")
back_pub = context.socket(zmq.PUB)
back_pub.bind("tcp://*:%d" % backend)
print("forwarder started, backend on port : %d\tfrontend on port: %d" % (backend, frontend))
zmq.proxy(front_sub, back_pub)
except Exception as e:
print(e)
finally:
front_sub.close()
back_pub.close()
context.term() | Simple pub/sub forwarder
:param int frontend: fontend zeromq port
:param int backend: backend zeromq port | Below is the the instruction that describes the task:
### Input:
Simple pub/sub forwarder
:param int frontend: fontend zeromq port
:param int backend: backend zeromq port
### Response:
def forwarder(frontend, backend):
"""Simple pub/sub forwarder
:param int frontend: fontend zeromq port
:param int backend: backend zeromq port
"""
try:
context = zmq.Context()
front_sub = context.socket(zmq.SUB)
front_sub.bind("tcp://*:%d" % frontend)
front_sub.setsockopt_string(zmq.SUBSCRIBE, "")
back_pub = context.socket(zmq.PUB)
back_pub.bind("tcp://*:%d" % backend)
print("forwarder started, backend on port : %d\tfrontend on port: %d" % (backend, frontend))
zmq.proxy(front_sub, back_pub)
except Exception as e:
print(e)
finally:
front_sub.close()
back_pub.close()
context.term() |
def execute_lines(self, lines):
"""
Execute a set of lines as multiple command
lines: multiple lines of text to be executed as single commands
"""
for line in lines.splitlines():
stripped_line = line.strip()
if stripped_line.startswith('#'):
continue
self.write(line+os.linesep, flush=True)
self.execute_command(line+"\n")
self.flush() | Execute a set of lines as multiple command
lines: multiple lines of text to be executed as single commands | Below is the the instruction that describes the task:
### Input:
Execute a set of lines as multiple command
lines: multiple lines of text to be executed as single commands
### Response:
def execute_lines(self, lines):
"""
Execute a set of lines as multiple command
lines: multiple lines of text to be executed as single commands
"""
for line in lines.splitlines():
stripped_line = line.strip()
if stripped_line.startswith('#'):
continue
self.write(line+os.linesep, flush=True)
self.execute_command(line+"\n")
self.flush() |
def _encode(self, value, path_from_root):
"""Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `tuple` of key strings from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was.
"""
# Constructs a new `dict` tree in `json_value` that discards and/or
# encodes objects that aren't JSON serializable.
if isinstance(value, dict):
json_value = {}
for key, value in six.iteritems(value):
json_value[key] = self._encode(value, path_from_root + (key,))
return json_value
else:
path = ".".join(path_from_root)
if util.is_pandas_data_frame(value):
return util.encode_data_frame(path, value, self._run)
else:
friendly_value, converted = util.json_friendly(data_types.val_to_json(path, value))
json_value, compressed = util.maybe_compress_summary(friendly_value, util.get_h5_typename(value))
if compressed:
self.write_h5(path_from_root, friendly_value)
return json_value
"""
if isinstance(value, dict):
json_child[key], converted = util.json_friendly(
self._encode(value, path_from_root + [key]))
else:
""" | Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `tuple` of key strings from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was. | Below is the the instruction that describes the task:
### Input:
Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `tuple` of key strings from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was.
### Response:
def _encode(self, value, path_from_root):
"""Normalize, compress, and encode sub-objects for backend storage.
value: Object to encode.
path_from_root: `tuple` of key strings from the top-level summary to the
current `value`.
Returns:
A new tree of dict's with large objects replaced with dictionaries
with "_type" entries that say which type the original data was.
"""
# Constructs a new `dict` tree in `json_value` that discards and/or
# encodes objects that aren't JSON serializable.
if isinstance(value, dict):
json_value = {}
for key, value in six.iteritems(value):
json_value[key] = self._encode(value, path_from_root + (key,))
return json_value
else:
path = ".".join(path_from_root)
if util.is_pandas_data_frame(value):
return util.encode_data_frame(path, value, self._run)
else:
friendly_value, converted = util.json_friendly(data_types.val_to_json(path, value))
json_value, compressed = util.maybe_compress_summary(friendly_value, util.get_h5_typename(value))
if compressed:
self.write_h5(path_from_root, friendly_value)
return json_value
"""
if isinstance(value, dict):
json_child[key], converted = util.json_friendly(
self._encode(value, path_from_root + [key]))
else:
""" |
def customize_grammar_rules(self, tokens, customize):
"""The base grammar we start out for a Python version even with the
subclassing is, well, is pretty base. And we want it that way: lean and
mean so that parsing will go faster.
Here, we add additional grammar rules based on specific instructions
that are in the instruction/token stream. In classes that
inherit from from here and other versions, grammar rules may
also be removed.
For example if we see a pretty rare DELETE_DEREF instruction we'll
add the grammar for that.
More importantly, here we add grammar rules for instructions
that may access a variable number of stack items. CALL_FUNCTION,
BUILD_LIST and so on are like this.
Without custom rules, there can be an super-exponential number of
derivations. See the deparsing paper for an elaboration of
this.
"""
is_pypy = False
# For a rough break out on the first word. This may
# include instructions that don't need customization,
# but we'll do a finer check after the rough breakout.
customize_instruction_basenames = frozenset(
('BUILD', 'CALL', 'CONTINUE', 'DELETE', 'GET',
'JUMP', 'LOAD', 'LOOKUP', 'MAKE',
'RETURN', 'RAISE', 'SETUP',
'UNPACK'))
# Opcode names in the custom_ops_processed set have rules that get added
# unconditionally and the rules are constant. So they need to be done
# only once and if we see the opcode a second we don't have to consider
# adding more rules.
#
# Note: BUILD_TUPLE_UNPACK_WITH_CALL gets considered by
# default because it starts with BUILD. So we'll set to ignore it from
# the start.
custom_ops_processed = set(('BUILD_TUPLE_UNPACK_WITH_CALL',))
# A set of instruction operation names that exist in the token stream.
# We use this customize the grammar that we create.
# 2.6-compatible set comprehensions
self.seen_ops = frozenset([t.kind for t in tokens])
self.seen_op_basenames = frozenset([opname[:opname.rfind('_')] for opname in self.seen_ops])
# Loop over instructions adding custom grammar rules based on
# a specific instruction seen.
if 'PyPy' in customize:
is_pypy = True
self.addRule("""
stmt ::= assign3_pypy
stmt ::= assign2_pypy
assign3_pypy ::= expr expr expr store store store
assign2_pypy ::= expr expr store store
stmt ::= conditional_lambda
stmt ::= conditional_not_lambda
conditional_lambda ::= expr jmp_false expr return_if_lambda
return_lambda LAMBDA_MARKER
conditional_not_lambda
::= expr jmp_true expr return_if_lambda
return_lambda LAMBDA_MARKER
""", nop_func)
n = len(tokens)
# Determine if we have an iteration CALL_FUNCTION_1.
has_get_iter_call_function1 = False
max_branches = 0
for i, token in enumerate(tokens):
if token == 'GET_ITER' and i < n-2 and self.call_fn_name(tokens[i+1]) == 'CALL_FUNCTION_1':
has_get_iter_call_function1 = True
max_branches += 1
elif (token == 'GET_AWAITABLE' and i < n-3
and tokens[i+1] == 'LOAD_CONST' and tokens[i+2] == 'YIELD_FROM'):
max_branches += 1
if max_branches > 2:
break
for i, token in enumerate(tokens):
opname = token.kind
# Do a quick breakout before testing potentially
# each of the dozen or so instruction in if elif.
if (opname[:opname.find('_')] not in customize_instruction_basenames
or opname in custom_ops_processed):
continue
opname_base = opname[:opname.rfind('_')]
# The order of opname listed is roughly sorted below
if opname_base == 'BUILD_CONST_KEY_MAP':
# This is in 3.6+
kvlist_n = 'expr ' * (token.attr)
rule = "dict ::= %sLOAD_CONST %s" % (kvlist_n, opname)
self.addRule(rule, nop_func)
elif opname.startswith('BUILD_LIST_UNPACK'):
v = token.attr
rule = 'build_list_unpack ::= %s%s' % ('expr ' * v, opname)
self.addRule(rule, nop_func)
rule = 'expr ::= build_list_unpack'
self.addRule(rule, nop_func)
elif opname_base in ('BUILD_MAP', 'BUILD_MAP_UNPACK'):
kvlist_n = "kvlist_%s" % token.attr
if opname == 'BUILD_MAP_n':
# PyPy sometimes has no count. Sigh.
rule = ('dict_comp_func ::= BUILD_MAP_n LOAD_FAST FOR_ITER store '
'comp_iter JUMP_BACK RETURN_VALUE RETURN_LAST')
self.add_unique_rule(rule, 'dict_comp_func', 1, customize)
kvlist_n = 'kvlist_n'
rule = 'kvlist_n ::= kvlist_n kv3'
self.add_unique_rule(rule, 'kvlist_n', 0, customize)
rule = 'kvlist_n ::='
self.add_unique_rule(rule, 'kvlist_n', 1, customize)
rule = "dict ::= BUILD_MAP_n kvlist_n"
elif self.version >= 3.5:
if not opname.startswith('BUILD_MAP_WITH_CALL'):
# FIXME: Use the attr
# so this doesn't run into exponential parsing time.
if opname.startswith('BUILD_MAP_UNPACK'):
self.add_unique_rule(rule, opname, token.attr, customize)
rule = 'dict_entry ::= ' + 'expr ' * (token.attr*2)
self.add_unique_rule(rule, opname, token.attr, customize)
# FIXME: start here. The LHS should be unmap_dict, not dict.
# FIXME: really we need a combination of dict_entry-like things.
# It just so happens the most common case is not to mix
# dictionary comphensions with dictionary, elements
if 'LOAD_DICTCOMP' in self.seen_ops:
rule = 'dict ::= %s%s' % ('dict_comp ' * token.attr, opname)
self.addRule(rule, nop_func)
rule = """
expr ::= unmap_dict
unmap_dict ::= %s%s
""" % ('expr ' * token.attr, opname)
else:
rule = "%s ::= %s %s" % (kvlist_n, 'expr ' * (token.attr*2), opname)
self.add_unique_rule(rule, opname, token.attr, customize)
rule = "dict ::= %s" % kvlist_n
else:
rule = kvlist_n + ' ::= ' + 'expr expr STORE_MAP ' * token.attr
self.add_unique_rule(rule, opname, token.attr, customize)
rule = "dict ::= %s %s" % (opname, kvlist_n)
self.add_unique_rule(rule, opname, token.attr, customize)
elif opname.startswith('BUILD_MAP_UNPACK_WITH_CALL'):
v = token.attr
rule = 'build_map_unpack_with_call ::= %s%s' % ('expr ' * v, opname)
self.addRule(rule, nop_func)
elif opname.startswith('BUILD_TUPLE_UNPACK_WITH_CALL'):
v = token.attr
rule = ('starred ::= %s %s' % ('expr ' * v, opname))
self.addRule(rule, nop_func)
elif opname_base in ('BUILD_LIST', 'BUILD_SET', 'BUILD_TUPLE',
'BUILD_TUPLE_UNPACK'):
v = token.attr
is_LOAD_CLOSURE = False
if opname_base == 'BUILD_TUPLE':
# If is part of a "load_closure", then it is not part of a
# "list".
is_LOAD_CLOSURE = True
for j in range(v):
if tokens[i-j-1].kind != 'LOAD_CLOSURE':
is_LOAD_CLOSURE = False
break
if is_LOAD_CLOSURE:
rule = ('load_closure ::= %s%s' % (('LOAD_CLOSURE ' * v), opname))
self.add_unique_rule(rule, opname, token.attr, customize)
if not is_LOAD_CLOSURE or v == 0:
# We do this complicated test to speed up parsing of
# pathelogically long literals, especially those over 1024.
build_count = token.attr
thousands = (build_count//1024)
thirty32s = ((build_count//32) % 32)
if thirty32s > 0:
rule = "expr32 ::=%s" % (' expr' * 32)
self.add_unique_rule(rule, opname_base, build_count, customize)
pass
if thousands > 0:
self.add_unique_rule("expr1024 ::=%s" % (' expr32' * 32),
opname_base, build_count, customize)
pass
collection = opname_base[opname_base.find('_')+1:].lower()
rule = (('%s ::= ' % collection) + 'expr1024 '*thousands +
'expr32 '*thirty32s + 'expr '*(build_count % 32) + opname)
self.add_unique_rules([
"expr ::= %s" % collection,
rule], customize)
continue
continue
elif opname_base == 'BUILD_SLICE':
if token.attr == 2:
self.add_unique_rules([
'expr ::= build_slice2',
'build_slice2 ::= expr expr BUILD_SLICE_2'
], customize)
else:
assert token.attr == 3, "BUILD_SLICE value must be 2 or 3; is %s" % v
self.add_unique_rules([
'expr ::= build_slice3',
'build_slice3 ::= expr expr expr BUILD_SLICE_3',
], customize)
elif (opname in frozenset(('CALL_FUNCTION',
'CALL_FUNCTION_EX',
'CALL_FUNCTION_EX_KW',
'CALL_FUNCTION_VAR',
'CALL_FUNCTION_VAR_KW'))
or opname.startswith('CALL_FUNCTION_KW')):
if opname == 'CALL_FUNCTION' and token.attr == 1:
rule = """
dict_comp ::= LOAD_DICTCOMP LOAD_CONST MAKE_FUNCTION_0 expr
GET_ITER CALL_FUNCTION_1
classdefdeco1 ::= expr classdefdeco2 CALL_FUNCTION_1
"""
if self.version < 3.5:
rule += """
classdefdeco1 ::= expr classdefdeco1 CALL_FUNCTION_1
"""
self.addRule(rule, nop_func)
self.custom_classfunc_rule(opname, token, customize, tokens[i+1])
# Note: don't add to custom_ops_processed.
elif opname_base == 'CALL_METHOD':
# PyPy only - DRY with parse2
args_pos, args_kw = self.get_pos_kw(token)
# number of apply equiv arguments:
nak = ( len(opname_base)-len('CALL_METHOD') ) // 3
rule = ('call ::= expr ' +
('pos_arg ' * args_pos) +
('kwarg ' * args_kw) +
'expr ' * nak + opname)
self.add_unique_rule(rule, opname, token.attr, customize)
elif opname == 'CONTINUE':
self.addRule('continue ::= CONTINUE', nop_func)
custom_ops_processed.add(opname)
elif opname == 'CONTINUE_LOOP':
self.addRule('continue ::= CONTINUE_LOOP', nop_func)
custom_ops_processed.add(opname)
elif opname == 'DELETE_ATTR':
self.addRule('del_stmt ::= expr DELETE_ATTR', nop_func)
custom_ops_processed.add(opname)
elif opname == 'DELETE_DEREF':
self.addRule("""
stmt ::= del_deref_stmt
del_deref_stmt ::= DELETE_DEREF
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'DELETE_SUBSCR':
self.addRule("""
del_stmt ::= delete_subscr
delete_subscr ::= expr expr DELETE_SUBSCR
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'GET_ITER':
self.addRule("""
expr ::= get_iter
attribute ::= expr GET_ITER
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'JUMP_IF_NOT_DEBUG':
v = token.attr
self.addRule("""
stmt ::= assert_pypy
stmt ::= assert2_pypy", nop_func)
assert_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true
LOAD_ASSERT RAISE_VARARGS_1 COME_FROM
assert2_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true
LOAD_ASSERT expr CALL_FUNCTION_1
RAISE_VARARGS_1 COME_FROM
assert2_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true
LOAD_ASSERT expr CALL_FUNCTION_1
RAISE_VARARGS_1 COME_FROM,
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'LOAD_BUILD_CLASS':
self.custom_build_class_rule(opname, i, token, tokens, customize)
# Note: don't add to custom_ops_processed.
elif opname == 'LOAD_CLASSDEREF':
# Python 3.4+
self.addRule("expr ::= LOAD_CLASSDEREF", nop_func)
custom_ops_processed.add(opname)
elif opname == 'LOAD_CLASSNAME':
self.addRule("expr ::= LOAD_CLASSNAME", nop_func)
custom_ops_processed.add(opname)
elif opname == 'LOAD_DICTCOMP':
if has_get_iter_call_function1:
rule_pat = ("dict_comp ::= LOAD_DICTCOMP %sMAKE_FUNCTION_0 expr "
"GET_ITER CALL_FUNCTION_1")
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
pass
custom_ops_processed.add(opname)
elif opname == 'LOAD_ATTR':
self.addRule("""
expr ::= attribute
attribute ::= expr LOAD_ATTR
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'LOAD_LISTCOMP':
self.add_unique_rule("expr ::= listcomp", opname, token.attr, customize)
custom_ops_processed.add(opname)
elif opname == 'LOAD_SETCOMP':
# Should this be generalized and put under MAKE_FUNCTION?
if has_get_iter_call_function1:
self.addRule("expr ::= set_comp", nop_func)
rule_pat = ("set_comp ::= LOAD_SETCOMP %sMAKE_FUNCTION_0 expr "
"GET_ITER CALL_FUNCTION_1")
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
pass
custom_ops_processed.add(opname)
elif opname == 'LOOKUP_METHOD':
# A PyPy speciality - DRY with parse3
self.addRule("""
expr ::= attribute
attribute ::= expr LOOKUP_METHOD
""",
nop_func)
custom_ops_processed.add(opname)
elif opname.startswith('MAKE_CLOSURE'):
# DRY with MAKE_FUNCTION
# Note: this probably doesn't handle kwargs proprerly
if opname == 'MAKE_CLOSURE_0' and 'LOAD_DICTCOMP' in self.seen_ops:
# Is there something general going on here?
# Note that 3.6+ doesn't do this, but we'll remove
# this rule in parse36.py
rule = """
dict_comp ::= load_closure LOAD_DICTCOMP LOAD_CONST
MAKE_CLOSURE_0 expr
GET_ITER CALL_FUNCTION_1
"""
self.addRule(rule, nop_func)
args_pos, args_kw, annotate_args = token.attr
# FIXME: Fold test into add_make_function_rule
if self.version < 3.3:
j = 1
else:
j = 2
if is_pypy or (i >= j and tokens[i-j] == 'LOAD_LAMBDA'):
rule_pat = ('mklambda ::= %sload_closure LOAD_LAMBDA %%s%s' %
('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if has_get_iter_call_function1:
rule_pat = ("generator_exp ::= %sload_closure load_genexpr %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if has_get_iter_call_function1:
if (is_pypy or (i >= j and tokens[i-j] == 'LOAD_LISTCOMP')):
# In the tokens we saw:
# LOAD_LISTCOMP LOAD_CONST MAKE_FUNCTION (>= 3.3) or
# LOAD_LISTCOMP MAKE_FUNCTION (< 3.3) or
# and have GET_ITER CALL_FUNCTION_1
# Todo: For Pypy we need to modify this slightly
rule_pat = ('listcomp ::= %sload_closure LOAD_LISTCOMP %%s%s expr '
'GET_ITER CALL_FUNCTION_1' % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if (is_pypy or (i >= j and tokens[i-j] == 'LOAD_SETCOMP')):
rule_pat = ('set_comp ::= %sload_closure LOAD_SETCOMP %%s%s expr '
'GET_ITER CALL_FUNCTION_1' % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if (is_pypy or (i >= j and tokens[i-j] == 'LOAD_DICTCOMP')):
self.add_unique_rule('dict_comp ::= %sload_closure LOAD_DICTCOMP %s '
'expr GET_ITER CALL_FUNCTION_1' %
('pos_arg ' * args_pos, opname),
opname, token.attr, customize)
if args_kw > 0:
kwargs_str = 'kwargs '
else:
kwargs_str = ''
# Note order of kwargs and pos args changed between 3.3-3.4
if self.version <= 3.2:
rule = ('mkfunc ::= %s%sload_closure LOAD_CONST %s'
% (kwargs_str, 'expr ' * args_pos, opname))
elif self.version == 3.3:
rule = ('mkfunc ::= %s%sload_closure LOAD_CONST LOAD_CONST %s'
% (kwargs_str, 'expr ' * args_pos, opname))
elif self.version >= 3.4:
rule = ('mkfunc ::= %s%s load_closure LOAD_CONST LOAD_CONST %s'
% ('expr ' * args_pos, kwargs_str, opname))
self.add_unique_rule(rule, opname, token.attr, customize)
if args_kw == 0:
rule = ('mkfunc ::= %sload_closure load_genexpr %s'
% ('pos_arg ' * args_pos, opname))
self.add_unique_rule(rule, opname, token.attr, customize)
if self.version < 3.4:
rule = ('mkfunc ::= %sload_closure LOAD_CONST %s'
% ('expr ' * args_pos, opname))
self.add_unique_rule(rule, opname, token.attr, customize)
pass
elif opname_base.startswith('MAKE_FUNCTION'):
# DRY with MAKE_CLOSURE
if self.version >= 3.6:
# The semantics of MAKE_FUNCTION in 3.6 are totally different from
# before.
args_pos, args_kw, annotate_args, closure = token.attr
stack_count = args_pos + args_kw + annotate_args
if closure:
if args_pos:
rule = ('mklambda ::= %s%s%s%s' %
('expr ' * stack_count,
'load_closure ' * closure,
'BUILD_TUPLE_1 LOAD_LAMBDA LOAD_CONST ',
opname))
else:
rule = ('mklambda ::= %s%s%s' %
('load_closure ' * closure,
'LOAD_LAMBDA LOAD_CONST ',
opname))
self.add_unique_rule(rule, opname, token.attr, customize)
else:
rule = ('mklambda ::= %sLOAD_LAMBDA LOAD_CONST %s' %
(('expr ' * stack_count), opname))
self.add_unique_rule(rule, opname, token.attr, customize)
rule = ('mkfunc ::= %s%s%s%s' %
('expr ' * stack_count,
'load_closure ' * closure,
'LOAD_CONST ' * 2,
opname))
self.add_unique_rule(rule, opname, token.attr, customize)
if has_get_iter_call_function1:
rule_pat = ("generator_exp ::= %sload_genexpr %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
rule_pat = ("generator_exp ::= %sload_closure load_genexpr %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if is_pypy or (i >= 2 and tokens[i-2] == 'LOAD_LISTCOMP'):
if self.version >= 3.6:
# 3.6+ sometimes bundles all of the
# 'exprs' in the rule above into a
# tuple.
rule_pat = ("listcomp ::= load_closure LOAD_LISTCOMP %%s%s "
"expr GET_ITER CALL_FUNCTION_1" % (opname,))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
rule_pat = ("listcomp ::= %sLOAD_LISTCOMP %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('expr ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if is_pypy or (i >= 2 and tokens[i-2] == 'LOAD_LAMBDA'):
rule_pat = ('mklambda ::= %s%sLOAD_LAMBDA %%s%s' %
(('pos_arg ' * args_pos),
('kwarg ' * args_kw),
opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
continue
if self.version < 3.6:
args_pos, args_kw, annotate_args = token.attr
else:
args_pos, args_kw, annotate_args, closure = token.attr
if self.version < 3.3:
j = 1
else:
j = 2
if has_get_iter_call_function1:
rule_pat = ("generator_exp ::= %sload_genexpr %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if is_pypy or (i >= j and tokens[i-j] == 'LOAD_LISTCOMP'):
# In the tokens we saw:
# LOAD_LISTCOMP LOAD_CONST MAKE_FUNCTION (>= 3.3) or
# LOAD_LISTCOMP MAKE_FUNCTION (< 3.3) or
# and have GET_ITER CALL_FUNCTION_1
# Todo: For Pypy we need to modify this slightly
rule_pat = ("listcomp ::= %sLOAD_LISTCOMP %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('expr ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
# FIXME: Fold test into add_make_function_rule
if is_pypy or (i >= j and tokens[i-j] == 'LOAD_LAMBDA'):
rule_pat = ('mklambda ::= %s%sLOAD_LAMBDA %%s%s' %
(('pos_arg ' * args_pos),
('kwarg ' * args_kw),
opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if args_kw == 0:
kwargs = 'no_kwargs'
self.add_unique_rule("no_kwargs ::=", opname, token.attr, customize)
else:
kwargs = 'kwargs'
if self.version < 3.3:
# positional args after keyword args
rule = ('mkfunc ::= %s %s%s%s' %
(kwargs, 'pos_arg ' * args_pos, 'LOAD_CONST ',
opname))
self.add_unique_rule(rule, opname, token.attr, customize)
rule = ('mkfunc ::= %s%s%s' %
('pos_arg ' * args_pos, 'LOAD_CONST ',
opname))
elif self.version == 3.3:
# positional args after keyword args
rule = ('mkfunc ::= %s %s%s%s' %
(kwargs, 'pos_arg ' * args_pos, 'LOAD_CONST '*2,
opname))
elif self.version > 3.5:
# positional args before keyword args
rule = ('mkfunc ::= %s%s %s%s' %
('pos_arg ' * args_pos, kwargs, 'LOAD_CONST '*2,
opname))
elif self.version > 3.3:
# positional args before keyword args
rule = ('mkfunc ::= %s%s %s%s' %
('pos_arg ' * args_pos, kwargs, 'LOAD_CONST '*2,
opname))
else:
rule = ('mkfunc ::= %s%sexpr %s' %
(kwargs, 'pos_arg ' * args_pos, opname))
self.add_unique_rule(rule, opname, token.attr, customize)
if opname.startswith('MAKE_FUNCTION_A'):
if self.version >= 3.6:
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST %s' %
(('pos_arg ' * (args_pos)),
('call ' * (annotate_args-1)), opname))
self.add_unique_rule(rule, opname, token.attr, customize)
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST %s' %
(('pos_arg ' * (args_pos)),
('annotate_arg ' * (annotate_args-1)), opname))
if self.version >= 3.3:
# Normally we remove EXTENDED_ARG from the opcodes, but in the case of
# annotated functions can use the EXTENDED_ARG tuple to signal we have an annotated function.
# Yes this is a little hacky
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST EXTENDED_ARG %s' %
(('pos_arg ' * (args_pos)),
('call ' * (annotate_args-1)), opname))
self.add_unique_rule(rule, opname, token.attr, customize)
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST EXTENDED_ARG %s' %
(('pos_arg ' * (args_pos)),
('annotate_arg ' * (annotate_args-1)), opname))
else:
# See above comment about use of EXTENDED_ARG
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST EXTENDED_ARG %s' %
(('pos_arg ' * (args_pos)),
('annotate_arg ' * (annotate_args-1)), opname))
self.add_unique_rule(rule, opname, token.attr, customize)
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST EXTENDED_ARG %s' %
(('pos_arg ' * (args_pos)),
('call ' * (annotate_args-1)), opname))
self.addRule(rule, nop_func)
elif opname == 'RETURN_VALUE_LAMBDA':
self.addRule("""
return_lambda ::= ret_expr RETURN_VALUE_LAMBDA
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'RAISE_VARARGS_0':
self.addRule("""
stmt ::= raise_stmt0
raise_stmt0 ::= RAISE_VARARGS_0
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'RAISE_VARARGS_1':
self.addRule("""
stmt ::= raise_stmt1
raise_stmt1 ::= expr RAISE_VARARGS_1
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'RAISE_VARARGS_2':
self.addRule("""
stmt ::= raise_stmt2
raise_stmt2 ::= expr expr RAISE_VARARGS_2
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'SETUP_EXCEPT':
self.addRule("""
try_except ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler opt_come_from_except
tryelsestmt ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler else_suite come_from_except_clauses
tryelsestmt ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler else_suite come_froms
tryelsestmtl ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler else_suitel come_from_except_clauses
stmt ::= tryelsestmtl3
tryelsestmtl3 ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler COME_FROM else_suitel
opt_come_from_except
""", nop_func)
custom_ops_processed.add(opname)
elif opname_base in ('UNPACK_EX',):
before_count, after_count = token.attr
rule = 'unpack ::= ' + opname + ' store' * (before_count + after_count + 1)
self.addRule(rule, nop_func)
elif opname_base in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
rule = 'unpack ::= ' + opname + ' store' * token.attr
self.addRule(rule, nop_func)
elif opname_base == 'UNPACK_LIST':
rule = 'unpack_list ::= ' + opname + ' store' * token.attr
self.addRule(rule, nop_func)
custom_ops_processed.add(opname)
pass
pass
self.check_reduce['aug_assign1'] = 'AST'
self.check_reduce['aug_assign2'] = 'AST'
self.check_reduce['while1stmt'] = 'noAST'
self.check_reduce['while1elsestmt'] = 'noAST'
self.check_reduce['ifelsestmt'] = 'AST'
self.check_reduce['annotate_tuple'] = 'noAST'
self.check_reduce['kwarg'] = 'noAST'
if self.version < 3.6:
# 3.6+ can remove a JUMP_FORWARD which messes up our testing here
self.check_reduce['try_except'] = 'AST'
# FIXME: remove parser errors caused by the below
# self.check_reduce['while1elsestmt'] = 'noAST'
return | The base grammar we start out for a Python version even with the
subclassing is, well, is pretty base. And we want it that way: lean and
mean so that parsing will go faster.
Here, we add additional grammar rules based on specific instructions
that are in the instruction/token stream. In classes that
inherit from from here and other versions, grammar rules may
also be removed.
For example if we see a pretty rare DELETE_DEREF instruction we'll
add the grammar for that.
More importantly, here we add grammar rules for instructions
that may access a variable number of stack items. CALL_FUNCTION,
BUILD_LIST and so on are like this.
Without custom rules, there can be an super-exponential number of
derivations. See the deparsing paper for an elaboration of
this. | Below is the the instruction that describes the task:
### Input:
The base grammar we start out for a Python version even with the
subclassing is, well, is pretty base. And we want it that way: lean and
mean so that parsing will go faster.
Here, we add additional grammar rules based on specific instructions
that are in the instruction/token stream. In classes that
inherit from from here and other versions, grammar rules may
also be removed.
For example if we see a pretty rare DELETE_DEREF instruction we'll
add the grammar for that.
More importantly, here we add grammar rules for instructions
that may access a variable number of stack items. CALL_FUNCTION,
BUILD_LIST and so on are like this.
Without custom rules, there can be an super-exponential number of
derivations. See the deparsing paper for an elaboration of
this.
### Response:
def customize_grammar_rules(self, tokens, customize):
"""The base grammar we start out for a Python version even with the
subclassing is, well, is pretty base. And we want it that way: lean and
mean so that parsing will go faster.
Here, we add additional grammar rules based on specific instructions
that are in the instruction/token stream. In classes that
inherit from from here and other versions, grammar rules may
also be removed.
For example if we see a pretty rare DELETE_DEREF instruction we'll
add the grammar for that.
More importantly, here we add grammar rules for instructions
that may access a variable number of stack items. CALL_FUNCTION,
BUILD_LIST and so on are like this.
Without custom rules, there can be an super-exponential number of
derivations. See the deparsing paper for an elaboration of
this.
"""
is_pypy = False
# For a rough break out on the first word. This may
# include instructions that don't need customization,
# but we'll do a finer check after the rough breakout.
customize_instruction_basenames = frozenset(
('BUILD', 'CALL', 'CONTINUE', 'DELETE', 'GET',
'JUMP', 'LOAD', 'LOOKUP', 'MAKE',
'RETURN', 'RAISE', 'SETUP',
'UNPACK'))
# Opcode names in the custom_ops_processed set have rules that get added
# unconditionally and the rules are constant. So they need to be done
# only once and if we see the opcode a second we don't have to consider
# adding more rules.
#
# Note: BUILD_TUPLE_UNPACK_WITH_CALL gets considered by
# default because it starts with BUILD. So we'll set to ignore it from
# the start.
custom_ops_processed = set(('BUILD_TUPLE_UNPACK_WITH_CALL',))
# A set of instruction operation names that exist in the token stream.
# We use this customize the grammar that we create.
# 2.6-compatible set comprehensions
self.seen_ops = frozenset([t.kind for t in tokens])
self.seen_op_basenames = frozenset([opname[:opname.rfind('_')] for opname in self.seen_ops])
# Loop over instructions adding custom grammar rules based on
# a specific instruction seen.
if 'PyPy' in customize:
is_pypy = True
self.addRule("""
stmt ::= assign3_pypy
stmt ::= assign2_pypy
assign3_pypy ::= expr expr expr store store store
assign2_pypy ::= expr expr store store
stmt ::= conditional_lambda
stmt ::= conditional_not_lambda
conditional_lambda ::= expr jmp_false expr return_if_lambda
return_lambda LAMBDA_MARKER
conditional_not_lambda
::= expr jmp_true expr return_if_lambda
return_lambda LAMBDA_MARKER
""", nop_func)
n = len(tokens)
# Determine if we have an iteration CALL_FUNCTION_1.
has_get_iter_call_function1 = False
max_branches = 0
for i, token in enumerate(tokens):
if token == 'GET_ITER' and i < n-2 and self.call_fn_name(tokens[i+1]) == 'CALL_FUNCTION_1':
has_get_iter_call_function1 = True
max_branches += 1
elif (token == 'GET_AWAITABLE' and i < n-3
and tokens[i+1] == 'LOAD_CONST' and tokens[i+2] == 'YIELD_FROM'):
max_branches += 1
if max_branches > 2:
break
for i, token in enumerate(tokens):
opname = token.kind
# Do a quick breakout before testing potentially
# each of the dozen or so instruction in if elif.
if (opname[:opname.find('_')] not in customize_instruction_basenames
or opname in custom_ops_processed):
continue
opname_base = opname[:opname.rfind('_')]
# The order of opname listed is roughly sorted below
if opname_base == 'BUILD_CONST_KEY_MAP':
# This is in 3.6+
kvlist_n = 'expr ' * (token.attr)
rule = "dict ::= %sLOAD_CONST %s" % (kvlist_n, opname)
self.addRule(rule, nop_func)
elif opname.startswith('BUILD_LIST_UNPACK'):
v = token.attr
rule = 'build_list_unpack ::= %s%s' % ('expr ' * v, opname)
self.addRule(rule, nop_func)
rule = 'expr ::= build_list_unpack'
self.addRule(rule, nop_func)
elif opname_base in ('BUILD_MAP', 'BUILD_MAP_UNPACK'):
kvlist_n = "kvlist_%s" % token.attr
if opname == 'BUILD_MAP_n':
# PyPy sometimes has no count. Sigh.
rule = ('dict_comp_func ::= BUILD_MAP_n LOAD_FAST FOR_ITER store '
'comp_iter JUMP_BACK RETURN_VALUE RETURN_LAST')
self.add_unique_rule(rule, 'dict_comp_func', 1, customize)
kvlist_n = 'kvlist_n'
rule = 'kvlist_n ::= kvlist_n kv3'
self.add_unique_rule(rule, 'kvlist_n', 0, customize)
rule = 'kvlist_n ::='
self.add_unique_rule(rule, 'kvlist_n', 1, customize)
rule = "dict ::= BUILD_MAP_n kvlist_n"
elif self.version >= 3.5:
if not opname.startswith('BUILD_MAP_WITH_CALL'):
# FIXME: Use the attr
# so this doesn't run into exponential parsing time.
if opname.startswith('BUILD_MAP_UNPACK'):
self.add_unique_rule(rule, opname, token.attr, customize)
rule = 'dict_entry ::= ' + 'expr ' * (token.attr*2)
self.add_unique_rule(rule, opname, token.attr, customize)
# FIXME: start here. The LHS should be unmap_dict, not dict.
# FIXME: really we need a combination of dict_entry-like things.
# It just so happens the most common case is not to mix
# dictionary comphensions with dictionary, elements
if 'LOAD_DICTCOMP' in self.seen_ops:
rule = 'dict ::= %s%s' % ('dict_comp ' * token.attr, opname)
self.addRule(rule, nop_func)
rule = """
expr ::= unmap_dict
unmap_dict ::= %s%s
""" % ('expr ' * token.attr, opname)
else:
rule = "%s ::= %s %s" % (kvlist_n, 'expr ' * (token.attr*2), opname)
self.add_unique_rule(rule, opname, token.attr, customize)
rule = "dict ::= %s" % kvlist_n
else:
rule = kvlist_n + ' ::= ' + 'expr expr STORE_MAP ' * token.attr
self.add_unique_rule(rule, opname, token.attr, customize)
rule = "dict ::= %s %s" % (opname, kvlist_n)
self.add_unique_rule(rule, opname, token.attr, customize)
elif opname.startswith('BUILD_MAP_UNPACK_WITH_CALL'):
v = token.attr
rule = 'build_map_unpack_with_call ::= %s%s' % ('expr ' * v, opname)
self.addRule(rule, nop_func)
elif opname.startswith('BUILD_TUPLE_UNPACK_WITH_CALL'):
v = token.attr
rule = ('starred ::= %s %s' % ('expr ' * v, opname))
self.addRule(rule, nop_func)
elif opname_base in ('BUILD_LIST', 'BUILD_SET', 'BUILD_TUPLE',
'BUILD_TUPLE_UNPACK'):
v = token.attr
is_LOAD_CLOSURE = False
if opname_base == 'BUILD_TUPLE':
# If is part of a "load_closure", then it is not part of a
# "list".
is_LOAD_CLOSURE = True
for j in range(v):
if tokens[i-j-1].kind != 'LOAD_CLOSURE':
is_LOAD_CLOSURE = False
break
if is_LOAD_CLOSURE:
rule = ('load_closure ::= %s%s' % (('LOAD_CLOSURE ' * v), opname))
self.add_unique_rule(rule, opname, token.attr, customize)
if not is_LOAD_CLOSURE or v == 0:
# We do this complicated test to speed up parsing of
# pathelogically long literals, especially those over 1024.
build_count = token.attr
thousands = (build_count//1024)
thirty32s = ((build_count//32) % 32)
if thirty32s > 0:
rule = "expr32 ::=%s" % (' expr' * 32)
self.add_unique_rule(rule, opname_base, build_count, customize)
pass
if thousands > 0:
self.add_unique_rule("expr1024 ::=%s" % (' expr32' * 32),
opname_base, build_count, customize)
pass
collection = opname_base[opname_base.find('_')+1:].lower()
rule = (('%s ::= ' % collection) + 'expr1024 '*thousands +
'expr32 '*thirty32s + 'expr '*(build_count % 32) + opname)
self.add_unique_rules([
"expr ::= %s" % collection,
rule], customize)
continue
continue
elif opname_base == 'BUILD_SLICE':
if token.attr == 2:
self.add_unique_rules([
'expr ::= build_slice2',
'build_slice2 ::= expr expr BUILD_SLICE_2'
], customize)
else:
assert token.attr == 3, "BUILD_SLICE value must be 2 or 3; is %s" % v
self.add_unique_rules([
'expr ::= build_slice3',
'build_slice3 ::= expr expr expr BUILD_SLICE_3',
], customize)
elif (opname in frozenset(('CALL_FUNCTION',
'CALL_FUNCTION_EX',
'CALL_FUNCTION_EX_KW',
'CALL_FUNCTION_VAR',
'CALL_FUNCTION_VAR_KW'))
or opname.startswith('CALL_FUNCTION_KW')):
if opname == 'CALL_FUNCTION' and token.attr == 1:
rule = """
dict_comp ::= LOAD_DICTCOMP LOAD_CONST MAKE_FUNCTION_0 expr
GET_ITER CALL_FUNCTION_1
classdefdeco1 ::= expr classdefdeco2 CALL_FUNCTION_1
"""
if self.version < 3.5:
rule += """
classdefdeco1 ::= expr classdefdeco1 CALL_FUNCTION_1
"""
self.addRule(rule, nop_func)
self.custom_classfunc_rule(opname, token, customize, tokens[i+1])
# Note: don't add to custom_ops_processed.
elif opname_base == 'CALL_METHOD':
# PyPy only - DRY with parse2
args_pos, args_kw = self.get_pos_kw(token)
# number of apply equiv arguments:
nak = ( len(opname_base)-len('CALL_METHOD') ) // 3
rule = ('call ::= expr ' +
('pos_arg ' * args_pos) +
('kwarg ' * args_kw) +
'expr ' * nak + opname)
self.add_unique_rule(rule, opname, token.attr, customize)
elif opname == 'CONTINUE':
self.addRule('continue ::= CONTINUE', nop_func)
custom_ops_processed.add(opname)
elif opname == 'CONTINUE_LOOP':
self.addRule('continue ::= CONTINUE_LOOP', nop_func)
custom_ops_processed.add(opname)
elif opname == 'DELETE_ATTR':
self.addRule('del_stmt ::= expr DELETE_ATTR', nop_func)
custom_ops_processed.add(opname)
elif opname == 'DELETE_DEREF':
self.addRule("""
stmt ::= del_deref_stmt
del_deref_stmt ::= DELETE_DEREF
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'DELETE_SUBSCR':
self.addRule("""
del_stmt ::= delete_subscr
delete_subscr ::= expr expr DELETE_SUBSCR
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'GET_ITER':
self.addRule("""
expr ::= get_iter
attribute ::= expr GET_ITER
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'JUMP_IF_NOT_DEBUG':
v = token.attr
self.addRule("""
stmt ::= assert_pypy
stmt ::= assert2_pypy", nop_func)
assert_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true
LOAD_ASSERT RAISE_VARARGS_1 COME_FROM
assert2_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true
LOAD_ASSERT expr CALL_FUNCTION_1
RAISE_VARARGS_1 COME_FROM
assert2_pypy ::= JUMP_IF_NOT_DEBUG assert_expr jmp_true
LOAD_ASSERT expr CALL_FUNCTION_1
RAISE_VARARGS_1 COME_FROM,
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'LOAD_BUILD_CLASS':
self.custom_build_class_rule(opname, i, token, tokens, customize)
# Note: don't add to custom_ops_processed.
elif opname == 'LOAD_CLASSDEREF':
# Python 3.4+
self.addRule("expr ::= LOAD_CLASSDEREF", nop_func)
custom_ops_processed.add(opname)
elif opname == 'LOAD_CLASSNAME':
self.addRule("expr ::= LOAD_CLASSNAME", nop_func)
custom_ops_processed.add(opname)
elif opname == 'LOAD_DICTCOMP':
if has_get_iter_call_function1:
rule_pat = ("dict_comp ::= LOAD_DICTCOMP %sMAKE_FUNCTION_0 expr "
"GET_ITER CALL_FUNCTION_1")
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
pass
custom_ops_processed.add(opname)
elif opname == 'LOAD_ATTR':
self.addRule("""
expr ::= attribute
attribute ::= expr LOAD_ATTR
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'LOAD_LISTCOMP':
self.add_unique_rule("expr ::= listcomp", opname, token.attr, customize)
custom_ops_processed.add(opname)
elif opname == 'LOAD_SETCOMP':
# Should this be generalized and put under MAKE_FUNCTION?
if has_get_iter_call_function1:
self.addRule("expr ::= set_comp", nop_func)
rule_pat = ("set_comp ::= LOAD_SETCOMP %sMAKE_FUNCTION_0 expr "
"GET_ITER CALL_FUNCTION_1")
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
pass
custom_ops_processed.add(opname)
elif opname == 'LOOKUP_METHOD':
# A PyPy speciality - DRY with parse3
self.addRule("""
expr ::= attribute
attribute ::= expr LOOKUP_METHOD
""",
nop_func)
custom_ops_processed.add(opname)
elif opname.startswith('MAKE_CLOSURE'):
# DRY with MAKE_FUNCTION
# Note: this probably doesn't handle kwargs proprerly
if opname == 'MAKE_CLOSURE_0' and 'LOAD_DICTCOMP' in self.seen_ops:
# Is there something general going on here?
# Note that 3.6+ doesn't do this, but we'll remove
# this rule in parse36.py
rule = """
dict_comp ::= load_closure LOAD_DICTCOMP LOAD_CONST
MAKE_CLOSURE_0 expr
GET_ITER CALL_FUNCTION_1
"""
self.addRule(rule, nop_func)
args_pos, args_kw, annotate_args = token.attr
# FIXME: Fold test into add_make_function_rule
if self.version < 3.3:
j = 1
else:
j = 2
if is_pypy or (i >= j and tokens[i-j] == 'LOAD_LAMBDA'):
rule_pat = ('mklambda ::= %sload_closure LOAD_LAMBDA %%s%s' %
('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if has_get_iter_call_function1:
rule_pat = ("generator_exp ::= %sload_closure load_genexpr %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if has_get_iter_call_function1:
if (is_pypy or (i >= j and tokens[i-j] == 'LOAD_LISTCOMP')):
# In the tokens we saw:
# LOAD_LISTCOMP LOAD_CONST MAKE_FUNCTION (>= 3.3) or
# LOAD_LISTCOMP MAKE_FUNCTION (< 3.3) or
# and have GET_ITER CALL_FUNCTION_1
# Todo: For Pypy we need to modify this slightly
rule_pat = ('listcomp ::= %sload_closure LOAD_LISTCOMP %%s%s expr '
'GET_ITER CALL_FUNCTION_1' % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if (is_pypy or (i >= j and tokens[i-j] == 'LOAD_SETCOMP')):
rule_pat = ('set_comp ::= %sload_closure LOAD_SETCOMP %%s%s expr '
'GET_ITER CALL_FUNCTION_1' % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if (is_pypy or (i >= j and tokens[i-j] == 'LOAD_DICTCOMP')):
self.add_unique_rule('dict_comp ::= %sload_closure LOAD_DICTCOMP %s '
'expr GET_ITER CALL_FUNCTION_1' %
('pos_arg ' * args_pos, opname),
opname, token.attr, customize)
if args_kw > 0:
kwargs_str = 'kwargs '
else:
kwargs_str = ''
# Note order of kwargs and pos args changed between 3.3-3.4
if self.version <= 3.2:
rule = ('mkfunc ::= %s%sload_closure LOAD_CONST %s'
% (kwargs_str, 'expr ' * args_pos, opname))
elif self.version == 3.3:
rule = ('mkfunc ::= %s%sload_closure LOAD_CONST LOAD_CONST %s'
% (kwargs_str, 'expr ' * args_pos, opname))
elif self.version >= 3.4:
rule = ('mkfunc ::= %s%s load_closure LOAD_CONST LOAD_CONST %s'
% ('expr ' * args_pos, kwargs_str, opname))
self.add_unique_rule(rule, opname, token.attr, customize)
if args_kw == 0:
rule = ('mkfunc ::= %sload_closure load_genexpr %s'
% ('pos_arg ' * args_pos, opname))
self.add_unique_rule(rule, opname, token.attr, customize)
if self.version < 3.4:
rule = ('mkfunc ::= %sload_closure LOAD_CONST %s'
% ('expr ' * args_pos, opname))
self.add_unique_rule(rule, opname, token.attr, customize)
pass
elif opname_base.startswith('MAKE_FUNCTION'):
# DRY with MAKE_CLOSURE
if self.version >= 3.6:
# The semantics of MAKE_FUNCTION in 3.6 are totally different from
# before.
args_pos, args_kw, annotate_args, closure = token.attr
stack_count = args_pos + args_kw + annotate_args
if closure:
if args_pos:
rule = ('mklambda ::= %s%s%s%s' %
('expr ' * stack_count,
'load_closure ' * closure,
'BUILD_TUPLE_1 LOAD_LAMBDA LOAD_CONST ',
opname))
else:
rule = ('mklambda ::= %s%s%s' %
('load_closure ' * closure,
'LOAD_LAMBDA LOAD_CONST ',
opname))
self.add_unique_rule(rule, opname, token.attr, customize)
else:
rule = ('mklambda ::= %sLOAD_LAMBDA LOAD_CONST %s' %
(('expr ' * stack_count), opname))
self.add_unique_rule(rule, opname, token.attr, customize)
rule = ('mkfunc ::= %s%s%s%s' %
('expr ' * stack_count,
'load_closure ' * closure,
'LOAD_CONST ' * 2,
opname))
self.add_unique_rule(rule, opname, token.attr, customize)
if has_get_iter_call_function1:
rule_pat = ("generator_exp ::= %sload_genexpr %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
rule_pat = ("generator_exp ::= %sload_closure load_genexpr %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if is_pypy or (i >= 2 and tokens[i-2] == 'LOAD_LISTCOMP'):
if self.version >= 3.6:
# 3.6+ sometimes bundles all of the
# 'exprs' in the rule above into a
# tuple.
rule_pat = ("listcomp ::= load_closure LOAD_LISTCOMP %%s%s "
"expr GET_ITER CALL_FUNCTION_1" % (opname,))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
rule_pat = ("listcomp ::= %sLOAD_LISTCOMP %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('expr ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if is_pypy or (i >= 2 and tokens[i-2] == 'LOAD_LAMBDA'):
rule_pat = ('mklambda ::= %s%sLOAD_LAMBDA %%s%s' %
(('pos_arg ' * args_pos),
('kwarg ' * args_kw),
opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
continue
if self.version < 3.6:
args_pos, args_kw, annotate_args = token.attr
else:
args_pos, args_kw, annotate_args, closure = token.attr
if self.version < 3.3:
j = 1
else:
j = 2
if has_get_iter_call_function1:
rule_pat = ("generator_exp ::= %sload_genexpr %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('pos_arg ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if is_pypy or (i >= j and tokens[i-j] == 'LOAD_LISTCOMP'):
# In the tokens we saw:
# LOAD_LISTCOMP LOAD_CONST MAKE_FUNCTION (>= 3.3) or
# LOAD_LISTCOMP MAKE_FUNCTION (< 3.3) or
# and have GET_ITER CALL_FUNCTION_1
# Todo: For Pypy we need to modify this slightly
rule_pat = ("listcomp ::= %sLOAD_LISTCOMP %%s%s expr "
"GET_ITER CALL_FUNCTION_1" % ('expr ' * args_pos, opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
# FIXME: Fold test into add_make_function_rule
if is_pypy or (i >= j and tokens[i-j] == 'LOAD_LAMBDA'):
rule_pat = ('mklambda ::= %s%sLOAD_LAMBDA %%s%s' %
(('pos_arg ' * args_pos),
('kwarg ' * args_kw),
opname))
self.add_make_function_rule(rule_pat, opname, token.attr, customize)
if args_kw == 0:
kwargs = 'no_kwargs'
self.add_unique_rule("no_kwargs ::=", opname, token.attr, customize)
else:
kwargs = 'kwargs'
if self.version < 3.3:
# positional args after keyword args
rule = ('mkfunc ::= %s %s%s%s' %
(kwargs, 'pos_arg ' * args_pos, 'LOAD_CONST ',
opname))
self.add_unique_rule(rule, opname, token.attr, customize)
rule = ('mkfunc ::= %s%s%s' %
('pos_arg ' * args_pos, 'LOAD_CONST ',
opname))
elif self.version == 3.3:
# positional args after keyword args
rule = ('mkfunc ::= %s %s%s%s' %
(kwargs, 'pos_arg ' * args_pos, 'LOAD_CONST '*2,
opname))
elif self.version > 3.5:
# positional args before keyword args
rule = ('mkfunc ::= %s%s %s%s' %
('pos_arg ' * args_pos, kwargs, 'LOAD_CONST '*2,
opname))
elif self.version > 3.3:
# positional args before keyword args
rule = ('mkfunc ::= %s%s %s%s' %
('pos_arg ' * args_pos, kwargs, 'LOAD_CONST '*2,
opname))
else:
rule = ('mkfunc ::= %s%sexpr %s' %
(kwargs, 'pos_arg ' * args_pos, opname))
self.add_unique_rule(rule, opname, token.attr, customize)
if opname.startswith('MAKE_FUNCTION_A'):
if self.version >= 3.6:
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST %s' %
(('pos_arg ' * (args_pos)),
('call ' * (annotate_args-1)), opname))
self.add_unique_rule(rule, opname, token.attr, customize)
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST %s' %
(('pos_arg ' * (args_pos)),
('annotate_arg ' * (annotate_args-1)), opname))
if self.version >= 3.3:
# Normally we remove EXTENDED_ARG from the opcodes, but in the case of
# annotated functions can use the EXTENDED_ARG tuple to signal we have an annotated function.
# Yes this is a little hacky
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST EXTENDED_ARG %s' %
(('pos_arg ' * (args_pos)),
('call ' * (annotate_args-1)), opname))
self.add_unique_rule(rule, opname, token.attr, customize)
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST LOAD_CONST EXTENDED_ARG %s' %
(('pos_arg ' * (args_pos)),
('annotate_arg ' * (annotate_args-1)), opname))
else:
# See above comment about use of EXTENDED_ARG
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST EXTENDED_ARG %s' %
(('pos_arg ' * (args_pos)),
('annotate_arg ' * (annotate_args-1)), opname))
self.add_unique_rule(rule, opname, token.attr, customize)
rule = ('mkfunc_annotate ::= %s%sannotate_tuple LOAD_CONST EXTENDED_ARG %s' %
(('pos_arg ' * (args_pos)),
('call ' * (annotate_args-1)), opname))
self.addRule(rule, nop_func)
elif opname == 'RETURN_VALUE_LAMBDA':
self.addRule("""
return_lambda ::= ret_expr RETURN_VALUE_LAMBDA
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'RAISE_VARARGS_0':
self.addRule("""
stmt ::= raise_stmt0
raise_stmt0 ::= RAISE_VARARGS_0
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'RAISE_VARARGS_1':
self.addRule("""
stmt ::= raise_stmt1
raise_stmt1 ::= expr RAISE_VARARGS_1
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'RAISE_VARARGS_2':
self.addRule("""
stmt ::= raise_stmt2
raise_stmt2 ::= expr expr RAISE_VARARGS_2
""", nop_func)
custom_ops_processed.add(opname)
elif opname == 'SETUP_EXCEPT':
self.addRule("""
try_except ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler opt_come_from_except
tryelsestmt ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler else_suite come_from_except_clauses
tryelsestmt ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler else_suite come_froms
tryelsestmtl ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler else_suitel come_from_except_clauses
stmt ::= tryelsestmtl3
tryelsestmtl3 ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler COME_FROM else_suitel
opt_come_from_except
""", nop_func)
custom_ops_processed.add(opname)
elif opname_base in ('UNPACK_EX',):
before_count, after_count = token.attr
rule = 'unpack ::= ' + opname + ' store' * (before_count + after_count + 1)
self.addRule(rule, nop_func)
elif opname_base in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
rule = 'unpack ::= ' + opname + ' store' * token.attr
self.addRule(rule, nop_func)
elif opname_base == 'UNPACK_LIST':
rule = 'unpack_list ::= ' + opname + ' store' * token.attr
self.addRule(rule, nop_func)
custom_ops_processed.add(opname)
pass
pass
self.check_reduce['aug_assign1'] = 'AST'
self.check_reduce['aug_assign2'] = 'AST'
self.check_reduce['while1stmt'] = 'noAST'
self.check_reduce['while1elsestmt'] = 'noAST'
self.check_reduce['ifelsestmt'] = 'AST'
self.check_reduce['annotate_tuple'] = 'noAST'
self.check_reduce['kwarg'] = 'noAST'
if self.version < 3.6:
# 3.6+ can remove a JUMP_FORWARD which messes up our testing here
self.check_reduce['try_except'] = 'AST'
# FIXME: remove parser errors caused by the below
# self.check_reduce['while1elsestmt'] = 'noAST'
return |
def get_absolute_path(some_path):
"""
This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory.
"""
if os.path.isabs(some_path):
return some_path
else:
return evaluate_relative_path(os.getcwd(), some_path) | This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory. | Below is the the instruction that describes the task:
### Input:
This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory.
### Response:
def get_absolute_path(some_path):
"""
This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory.
"""
if os.path.isabs(some_path):
return some_path
else:
return evaluate_relative_path(os.getcwd(), some_path) |
def create_protocol(name, **kwargs):
"""
Returns an instance of the protocol with the given name.
:type name: str
:param name: The name of the protocol.
:rtype: Protocol
:return: An instance of the protocol.
"""
cls = protocol_map.get(name)
if not cls:
raise ValueError('Unsupported protocol "%s".' % name)
return cls(**kwargs) | Returns an instance of the protocol with the given name.
:type name: str
:param name: The name of the protocol.
:rtype: Protocol
:return: An instance of the protocol. | Below is the the instruction that describes the task:
### Input:
Returns an instance of the protocol with the given name.
:type name: str
:param name: The name of the protocol.
:rtype: Protocol
:return: An instance of the protocol.
### Response:
def create_protocol(name, **kwargs):
"""
Returns an instance of the protocol with the given name.
:type name: str
:param name: The name of the protocol.
:rtype: Protocol
:return: An instance of the protocol.
"""
cls = protocol_map.get(name)
if not cls:
raise ValueError('Unsupported protocol "%s".' % name)
return cls(**kwargs) |
def from_json(cls, data):
"""Decode a JSON string and inflate a node instance."""
# Decode JSON string
assert isinstance(data, str)
data = json.loads(data)
assert isinstance(data, dict)
return cls.from_dict(data) | Decode a JSON string and inflate a node instance. | Below is the the instruction that describes the task:
### Input:
Decode a JSON string and inflate a node instance.
### Response:
def from_json(cls, data):
"""Decode a JSON string and inflate a node instance."""
# Decode JSON string
assert isinstance(data, str)
data = json.loads(data)
assert isinstance(data, dict)
return cls.from_dict(data) |
def _fill(self, values):
"""Add extra values to fill the line"""
if not self._previous_line:
self._previous_line = values
return super(StackedLine, self)._fill(values)
new_values = values + list(reversed(self._previous_line))
self._previous_line = values
return new_values | Add extra values to fill the line | Below is the the instruction that describes the task:
### Input:
Add extra values to fill the line
### Response:
def _fill(self, values):
"""Add extra values to fill the line"""
if not self._previous_line:
self._previous_line = values
return super(StackedLine, self)._fill(values)
new_values = values + list(reversed(self._previous_line))
self._previous_line = values
return new_values |
def parse_selfsm(self, f):
""" Go through selfSM file and create a dictionary with the sample name as a key, """
#create a dictionary to populate from this sample's file
parsed_data = dict()
# set a empty variable which denotes if the headers have been read
headers = None
# for each line in the file
for l in f['f'].splitlines():
# split the line on tab
s = l.split("\t")
# if we haven't already read the header line
if headers is None:
# assign this list to headers variable
headers = s
# for all rows after the first
else:
# clean the sample name (first column) and assign to s_name
s_name = self.clean_s_name(s[0], f['root'])
# create a dictionary entry with the first column as a key (sample name) and empty dictionary as a value
parsed_data[s_name] = {}
# for each item in list of items in the row
for i, v in enumerate(s):
# if it's not the first element (if it's not the name)
if i != 0:
# see if CHIP is in the column header and the value is not NA
if "CHIP" in [headers[i]] and v != "NA":
# set hide_chip_columns = False so they are not hidden
self.hide_chip_columns=False
# try and convert the value into a float
try:
# and add to the dictionary the key as the corrsponding item from the header and the value from the list
parsed_data[s_name][headers[i]] = float(v)
#if can't convert to float...
except ValueError:
# add to the dictionary the key as the corrsponding item from the header and the value from the list
parsed_data[s_name][headers[i]] = v
# else return the dictionary
return parsed_data | Go through selfSM file and create a dictionary with the sample name as a key, | Below is the the instruction that describes the task:
### Input:
Go through selfSM file and create a dictionary with the sample name as a key,
### Response:
def parse_selfsm(self, f):
""" Go through selfSM file and create a dictionary with the sample name as a key, """
#create a dictionary to populate from this sample's file
parsed_data = dict()
# set a empty variable which denotes if the headers have been read
headers = None
# for each line in the file
for l in f['f'].splitlines():
# split the line on tab
s = l.split("\t")
# if we haven't already read the header line
if headers is None:
# assign this list to headers variable
headers = s
# for all rows after the first
else:
# clean the sample name (first column) and assign to s_name
s_name = self.clean_s_name(s[0], f['root'])
# create a dictionary entry with the first column as a key (sample name) and empty dictionary as a value
parsed_data[s_name] = {}
# for each item in list of items in the row
for i, v in enumerate(s):
# if it's not the first element (if it's not the name)
if i != 0:
# see if CHIP is in the column header and the value is not NA
if "CHIP" in [headers[i]] and v != "NA":
# set hide_chip_columns = False so they are not hidden
self.hide_chip_columns=False
# try and convert the value into a float
try:
# and add to the dictionary the key as the corrsponding item from the header and the value from the list
parsed_data[s_name][headers[i]] = float(v)
#if can't convert to float...
except ValueError:
# add to the dictionary the key as the corrsponding item from the header and the value from the list
parsed_data[s_name][headers[i]] = v
# else return the dictionary
return parsed_data |
def reset_handler(self, cmd):
"""Process a ResetCommand."""
self.cmd_counts[cmd.name] += 1
if cmd.ref.startswith('refs/tags/'):
self.lightweight_tags += 1
else:
if cmd.from_ is not None:
self.reftracker.track_heads_for_ref(
cmd.ref, cmd.from_) | Process a ResetCommand. | Below is the the instruction that describes the task:
### Input:
Process a ResetCommand.
### Response:
def reset_handler(self, cmd):
"""Process a ResetCommand."""
self.cmd_counts[cmd.name] += 1
if cmd.ref.startswith('refs/tags/'):
self.lightweight_tags += 1
else:
if cmd.from_ is not None:
self.reftracker.track_heads_for_ref(
cmd.ref, cmd.from_) |
def map(self, absolute_address, region_id, related_function_address=None):
"""
Add a mapping between an absolute address and a region ID. If this is a stack region map, all stack regions
beyond (lower than) this newly added regions will be discarded.
:param absolute_address: An absolute memory address.
:param region_id: ID of the memory region.
:param related_function_address: A related function address, mostly used for stack regions.
"""
if self.is_stack:
# Sanity check
if not region_id.startswith('stack_'):
raise SimRegionMapError('Received a non-stack memory ID "%d" in a stack region map' % region_id)
# Remove all stack regions that are lower than the one to add
while True:
try:
addr = next(self._address_to_region_id.irange(maximum=absolute_address, reverse=True))
descriptor = self._address_to_region_id[addr]
# Remove this mapping
del self._address_to_region_id[addr]
# Remove this region ID from the other mapping
del self._region_id_to_address[descriptor.region_id]
except StopIteration:
break
else:
if absolute_address in self._address_to_region_id:
descriptor = self._address_to_region_id[absolute_address]
# Remove this mapping
del self._address_to_region_id[absolute_address]
del self._region_id_to_address[descriptor.region_id]
# Add this new region mapping
desc = RegionDescriptor(
region_id,
absolute_address,
related_function_address=related_function_address
)
self._address_to_region_id[absolute_address] = desc
self._region_id_to_address[region_id] = desc | Add a mapping between an absolute address and a region ID. If this is a stack region map, all stack regions
beyond (lower than) this newly added regions will be discarded.
:param absolute_address: An absolute memory address.
:param region_id: ID of the memory region.
:param related_function_address: A related function address, mostly used for stack regions. | Below is the the instruction that describes the task:
### Input:
Add a mapping between an absolute address and a region ID. If this is a stack region map, all stack regions
beyond (lower than) this newly added regions will be discarded.
:param absolute_address: An absolute memory address.
:param region_id: ID of the memory region.
:param related_function_address: A related function address, mostly used for stack regions.
### Response:
def map(self, absolute_address, region_id, related_function_address=None):
"""
Add a mapping between an absolute address and a region ID. If this is a stack region map, all stack regions
beyond (lower than) this newly added regions will be discarded.
:param absolute_address: An absolute memory address.
:param region_id: ID of the memory region.
:param related_function_address: A related function address, mostly used for stack regions.
"""
if self.is_stack:
# Sanity check
if not region_id.startswith('stack_'):
raise SimRegionMapError('Received a non-stack memory ID "%d" in a stack region map' % region_id)
# Remove all stack regions that are lower than the one to add
while True:
try:
addr = next(self._address_to_region_id.irange(maximum=absolute_address, reverse=True))
descriptor = self._address_to_region_id[addr]
# Remove this mapping
del self._address_to_region_id[addr]
# Remove this region ID from the other mapping
del self._region_id_to_address[descriptor.region_id]
except StopIteration:
break
else:
if absolute_address in self._address_to_region_id:
descriptor = self._address_to_region_id[absolute_address]
# Remove this mapping
del self._address_to_region_id[absolute_address]
del self._region_id_to_address[descriptor.region_id]
# Add this new region mapping
desc = RegionDescriptor(
region_id,
absolute_address,
related_function_address=related_function_address
)
self._address_to_region_id[absolute_address] = desc
self._region_id_to_address[region_id] = desc |
def get_file_diff(tree, files_to_diff):
""" get_file_diff: Download files from nodes
Args:
tree (ChannelManager): manager to handle communication to Kolibri Studio
Returns: list of files that are not on Kolibri Studio
"""
# Determine which files have not yet been uploaded to the CC server
config.LOGGER.info("\nChecking if files exist on Kolibri Studio...")
file_diff = tree.get_file_diff(files_to_diff)
return file_diff | get_file_diff: Download files from nodes
Args:
tree (ChannelManager): manager to handle communication to Kolibri Studio
Returns: list of files that are not on Kolibri Studio | Below is the the instruction that describes the task:
### Input:
get_file_diff: Download files from nodes
Args:
tree (ChannelManager): manager to handle communication to Kolibri Studio
Returns: list of files that are not on Kolibri Studio
### Response:
def get_file_diff(tree, files_to_diff):
""" get_file_diff: Download files from nodes
Args:
tree (ChannelManager): manager to handle communication to Kolibri Studio
Returns: list of files that are not on Kolibri Studio
"""
# Determine which files have not yet been uploaded to the CC server
config.LOGGER.info("\nChecking if files exist on Kolibri Studio...")
file_diff = tree.get_file_diff(files_to_diff)
return file_diff |
def encodeIntoArray(self, input, output, learn=True):
""" See method description in base.py """
if input is not None and not isinstance(input, numbers.Number):
raise TypeError(
"Expected a scalar input but got input of type %s" % type(input))
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
# Get the bucket index to use
bucketIdx = self._getFirstOnBit(input)[0]
if bucketIdx is None:
# None is returned for missing value
output[0:self.n] = 0 #TODO: should all 1s, or random SDR be returned instead?
else:
# The bucket index is the index of the first bit to set in the output
output[:self.n] = 0
minbin = bucketIdx
maxbin = minbin + 2*self.halfwidth
if self.periodic:
# Handle the edges by computing wrap-around
if maxbin >= self.n:
bottombins = maxbin - self.n + 1
output[:bottombins] = 1
maxbin = self.n - 1
if minbin < 0:
topbins = -minbin
output[self.n - topbins:self.n] = 1
minbin = 0
assert minbin >= 0
assert maxbin < self.n
# set the output (except for periodic wraparound)
output[minbin:maxbin + 1] = 1
# Debug the decode() method
if self.verbosity >= 2:
print
print "input:", input
print "range:", self.minval, "-", self.maxval
print "n:", self.n, "w:", self.w, "resolution:", self.resolution, \
"radius", self.radius, "periodic:", self.periodic
print "output:",
self.pprint(output)
print "input desc:", self.decodedToStr(self.decode(output)) | See method description in base.py | Below is the the instruction that describes the task:
### Input:
See method description in base.py
### Response:
def encodeIntoArray(self, input, output, learn=True):
""" See method description in base.py """
if input is not None and not isinstance(input, numbers.Number):
raise TypeError(
"Expected a scalar input but got input of type %s" % type(input))
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
# Get the bucket index to use
bucketIdx = self._getFirstOnBit(input)[0]
if bucketIdx is None:
# None is returned for missing value
output[0:self.n] = 0 #TODO: should all 1s, or random SDR be returned instead?
else:
# The bucket index is the index of the first bit to set in the output
output[:self.n] = 0
minbin = bucketIdx
maxbin = minbin + 2*self.halfwidth
if self.periodic:
# Handle the edges by computing wrap-around
if maxbin >= self.n:
bottombins = maxbin - self.n + 1
output[:bottombins] = 1
maxbin = self.n - 1
if minbin < 0:
topbins = -minbin
output[self.n - topbins:self.n] = 1
minbin = 0
assert minbin >= 0
assert maxbin < self.n
# set the output (except for periodic wraparound)
output[minbin:maxbin + 1] = 1
# Debug the decode() method
if self.verbosity >= 2:
print
print "input:", input
print "range:", self.minval, "-", self.maxval
print "n:", self.n, "w:", self.w, "resolution:", self.resolution, \
"radius", self.radius, "periodic:", self.periodic
print "output:",
self.pprint(output)
print "input desc:", self.decodedToStr(self.decode(output)) |
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1) | Inner function to assist LoadData. | Below is the the instruction that describes the task:
### Input:
Inner function to assist LoadData.
### Response:
def _InnerAppendData(self, prev_col_values, data, col_index):
"""Inner function to assist LoadData."""
# We first check that col_index has not exceeded the columns size
if col_index >= len(self.__columns):
raise DataTableException("The data does not match description, too deep")
# Dealing with the scalar case, the data is the last value.
if self.__columns[col_index]["container"] == "scalar":
prev_col_values[0][self.__columns[col_index]["id"]] = data
self.__data.append(prev_col_values)
return
if self.__columns[col_index]["container"] == "iter":
if not hasattr(data, "__iter__") or isinstance(data, dict):
raise DataTableException("Expected iterable object, got %s" %
type(data))
# We only need to insert the rest of the columns
# If there are less items than expected, we only add what there is.
for value in data:
if col_index >= len(self.__columns):
raise DataTableException("Too many elements given in data")
prev_col_values[0][self.__columns[col_index]["id"]] = value
col_index += 1
self.__data.append(prev_col_values)
return
# We know the current level is a dictionary, we verify the type.
if not isinstance(data, dict):
raise DataTableException("Expected dictionary at current level, got %s" %
type(data))
# We check if this is the last level
if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
# We need to add the keys in the dictionary as they are
for col in self.__columns[col_index:]:
if col["id"] in data:
prev_col_values[0][col["id"]] = data[col["id"]]
self.__data.append(prev_col_values)
return
# We have a dictionary in an inner depth level.
if not data.keys():
# In case this is an empty dictionary, we add a record with the columns
# filled only until this point.
self.__data.append(prev_col_values)
else:
for key in sorted(data):
col_values = dict(prev_col_values[0])
col_values[self.__columns[col_index]["id"]] = key
self._InnerAppendData((col_values, prev_col_values[1]),
data[key], col_index + 1) |
def read_rle(file_obj, header, bit_width, debug_logging):
"""Read a run-length encoded run from the given fo with the given header and bit_width.
The count is determined from the header and the width is used to grab the
value that's repeated. Yields the value repeated count times.
"""
count = header >> 1
zero_data = b"\x00\x00\x00\x00"
width = (bit_width + 7) // 8
data = file_obj.read(width)
data = data + zero_data[len(data):]
value = struct.unpack(b"<i", data)[0]
if debug_logging:
logger.debug("Read RLE group with value %s of byte-width %s and count %s",
value, width, count)
for _ in range(count):
yield value | Read a run-length encoded run from the given fo with the given header and bit_width.
The count is determined from the header and the width is used to grab the
value that's repeated. Yields the value repeated count times. | Below is the the instruction that describes the task:
### Input:
Read a run-length encoded run from the given fo with the given header and bit_width.
The count is determined from the header and the width is used to grab the
value that's repeated. Yields the value repeated count times.
### Response:
def read_rle(file_obj, header, bit_width, debug_logging):
"""Read a run-length encoded run from the given fo with the given header and bit_width.
The count is determined from the header and the width is used to grab the
value that's repeated. Yields the value repeated count times.
"""
count = header >> 1
zero_data = b"\x00\x00\x00\x00"
width = (bit_width + 7) // 8
data = file_obj.read(width)
data = data + zero_data[len(data):]
value = struct.unpack(b"<i", data)[0]
if debug_logging:
logger.debug("Read RLE group with value %s of byte-width %s and count %s",
value, width, count)
for _ in range(count):
yield value |
def handleMethodCallMessage(self, msg):
"""
Handles DBus MethodCall messages on behalf of the DBus Connection and
dispatches them to the appropriate exported object
"""
if (
msg.interface == 'org.freedesktop.DBus.Peer'
and msg.member == 'Ping'
):
r = message.MethodReturnMessage(
msg.serial,
destination=msg.sender,
)
self.conn.sendMessage(r)
return
if (
msg.interface == 'org.freedesktop.DBus.Introspectable'
and msg.member == 'Introspect'
):
xml = introspection.generateIntrospectionXML(
msg.path,
self.exports,
)
if xml is not None:
r = message.MethodReturnMessage(
msg.serial,
body=[xml],
destination=msg.sender,
signature='s',
)
self.conn.sendMessage(r)
return
# Try to get object from complete object path
o = self.exports.get(msg.path, None)
if o is None:
self._send_err(
msg,
'org.freedesktop.DBus.Error.UnknownObject',
'%s is not an object provided by this process.' % (msg.path),
)
return
if (
msg.interface == 'org.freedesktop.DBus.ObjectManager'
and msg.member == 'GetManagedObjects'
):
i_and_p = self.getManagedObjects(o.getObjectPath())
r = message.MethodReturnMessage(
msg.serial,
body=[i_and_p],
destination=msg.sender,
signature='a{oa{sa{sv}}}',
)
self.conn.sendMessage(r)
return
i = None
for x in o.getInterfaces():
if msg.interface:
if x.name == msg.interface:
i = x
break
else:
if msg.member in x.methods:
i = x
break
m = None
if i:
m = i.methods.get(msg.member, None)
if m is None:
self._send_err(
msg,
'org.freedesktop.DBus.Error.UnknownMethod',
(
'Method "%s" with signature "%s" on interface "%s" '
'doesn\'t exist'
) % (
msg.member, msg.signature or '',
msg.interface or '(null)',
),
)
return
msig = msg.signature if msg.signature is not None else ''
esig = m.sigIn if m.sigIn is not None else ''
if esig != msig:
self._send_err(
msg,
'org.freedesktop.DBus.Error.InvalidArgs',
'Call to %s has wrong args (%s, expected %s)' %
(msg.member, msg.signature or '', m.sigIn or '')
)
return
d = defer.maybeDeferred(
o.executeMethod,
i,
msg.member,
msg.body,
msg.sender,
)
if msg.expectReply:
def send_reply(return_values):
if isinstance(return_values, (list, tuple)):
if m.nret == 1:
return_values = [return_values]
else:
return_values = [return_values]
r = message.MethodReturnMessage(
msg.serial,
body=return_values,
destination=msg.sender,
signature=m.sigOut,
)
self.conn.sendMessage(r)
def send_error(err):
e = err.value
errMsg = err.getErrorMessage()
name = None
if hasattr(e, 'dbusErrorName'):
name = e.dbusErrorName
if name is None:
name = 'org.txdbus.PythonException.' + e.__class__.__name__
try:
marshal.validateErrorName(name)
except error.MarshallingError:
errMsg = ('!!(Invalid error name "%s")!! ' % name) + errMsg
name = 'org.txdbus.InvalidErrorName'
r = message.ErrorMessage(name, msg.serial,
body=[errMsg],
signature='s',
destination=msg.sender)
self.conn.sendMessage(r)
d.addCallback(send_reply)
d.addErrback(send_error) | Handles DBus MethodCall messages on behalf of the DBus Connection and
dispatches them to the appropriate exported object | Below is the the instruction that describes the task:
### Input:
Handles DBus MethodCall messages on behalf of the DBus Connection and
dispatches them to the appropriate exported object
### Response:
def handleMethodCallMessage(self, msg):
"""
Handles DBus MethodCall messages on behalf of the DBus Connection and
dispatches them to the appropriate exported object
"""
if (
msg.interface == 'org.freedesktop.DBus.Peer'
and msg.member == 'Ping'
):
r = message.MethodReturnMessage(
msg.serial,
destination=msg.sender,
)
self.conn.sendMessage(r)
return
if (
msg.interface == 'org.freedesktop.DBus.Introspectable'
and msg.member == 'Introspect'
):
xml = introspection.generateIntrospectionXML(
msg.path,
self.exports,
)
if xml is not None:
r = message.MethodReturnMessage(
msg.serial,
body=[xml],
destination=msg.sender,
signature='s',
)
self.conn.sendMessage(r)
return
# Try to get object from complete object path
o = self.exports.get(msg.path, None)
if o is None:
self._send_err(
msg,
'org.freedesktop.DBus.Error.UnknownObject',
'%s is not an object provided by this process.' % (msg.path),
)
return
if (
msg.interface == 'org.freedesktop.DBus.ObjectManager'
and msg.member == 'GetManagedObjects'
):
i_and_p = self.getManagedObjects(o.getObjectPath())
r = message.MethodReturnMessage(
msg.serial,
body=[i_and_p],
destination=msg.sender,
signature='a{oa{sa{sv}}}',
)
self.conn.sendMessage(r)
return
i = None
for x in o.getInterfaces():
if msg.interface:
if x.name == msg.interface:
i = x
break
else:
if msg.member in x.methods:
i = x
break
m = None
if i:
m = i.methods.get(msg.member, None)
if m is None:
self._send_err(
msg,
'org.freedesktop.DBus.Error.UnknownMethod',
(
'Method "%s" with signature "%s" on interface "%s" '
'doesn\'t exist'
) % (
msg.member, msg.signature or '',
msg.interface or '(null)',
),
)
return
msig = msg.signature if msg.signature is not None else ''
esig = m.sigIn if m.sigIn is not None else ''
if esig != msig:
self._send_err(
msg,
'org.freedesktop.DBus.Error.InvalidArgs',
'Call to %s has wrong args (%s, expected %s)' %
(msg.member, msg.signature or '', m.sigIn or '')
)
return
d = defer.maybeDeferred(
o.executeMethod,
i,
msg.member,
msg.body,
msg.sender,
)
if msg.expectReply:
def send_reply(return_values):
if isinstance(return_values, (list, tuple)):
if m.nret == 1:
return_values = [return_values]
else:
return_values = [return_values]
r = message.MethodReturnMessage(
msg.serial,
body=return_values,
destination=msg.sender,
signature=m.sigOut,
)
self.conn.sendMessage(r)
def send_error(err):
e = err.value
errMsg = err.getErrorMessage()
name = None
if hasattr(e, 'dbusErrorName'):
name = e.dbusErrorName
if name is None:
name = 'org.txdbus.PythonException.' + e.__class__.__name__
try:
marshal.validateErrorName(name)
except error.MarshallingError:
errMsg = ('!!(Invalid error name "%s")!! ' % name) + errMsg
name = 'org.txdbus.InvalidErrorName'
r = message.ErrorMessage(name, msg.serial,
body=[errMsg],
signature='s',
destination=msg.sender)
self.conn.sendMessage(r)
d.addCallback(send_reply)
d.addErrback(send_error) |
def getEditorBinary(self, cmdVersion=False):
"""
Determines the location of the UE4Editor binary
"""
return os.path.join(self.getEngineRoot(), 'Engine', 'Binaries', self.getPlatformIdentifier(), 'UE4Editor' + self._editorPathSuffix(cmdVersion)) | Determines the location of the UE4Editor binary | Below is the the instruction that describes the task:
### Input:
Determines the location of the UE4Editor binary
### Response:
def getEditorBinary(self, cmdVersion=False):
"""
Determines the location of the UE4Editor binary
"""
return os.path.join(self.getEngineRoot(), 'Engine', 'Binaries', self.getPlatformIdentifier(), 'UE4Editor' + self._editorPathSuffix(cmdVersion)) |
def cardinality(self):
'''
Obtain the cardinality string.
Example: '1C' for a conditional link with a single instance [0..1]
'MC' for a link with any number of instances [0..*]
'M' for a more than one instance [1..*]
'M' for a link with exactly one instance [1]
'''
if self.many:
s = 'M'
else:
s = '1'
if self.conditional:
s += 'C'
return s | Obtain the cardinality string.
Example: '1C' for a conditional link with a single instance [0..1]
'MC' for a link with any number of instances [0..*]
'M' for a more than one instance [1..*]
'M' for a link with exactly one instance [1] | Below is the the instruction that describes the task:
### Input:
Obtain the cardinality string.
Example: '1C' for a conditional link with a single instance [0..1]
'MC' for a link with any number of instances [0..*]
'M' for a more than one instance [1..*]
'M' for a link with exactly one instance [1]
### Response:
def cardinality(self):
'''
Obtain the cardinality string.
Example: '1C' for a conditional link with a single instance [0..1]
'MC' for a link with any number of instances [0..*]
'M' for a more than one instance [1..*]
'M' for a link with exactly one instance [1]
'''
if self.many:
s = 'M'
else:
s = '1'
if self.conditional:
s += 'C'
return s |
def dataframe_from_excel(path, sheetname=0, header=0, skiprows=None): # , parse_dates=False):
"""Thin wrapper for pandas.io.excel.read_excel() that accepts a file path and sheet index/name
Arguments:
path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from
ext (str): file name extension (to filter files by)
date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used
Returns:
dict of DataFrame: { file_path: flattened_data_frame }
"""
sheetname = sheetname or 0
if isinstance(sheetname, (basestring, float)):
try:
sheetname = int(sheetname)
except (TypeError, ValueError, OverflowError):
sheetname = str(sheetname)
wb = xlrd.open_workbook(path)
# if isinstance(sheetname, int):
# sheet = wb.sheet_by_index(sheetname)
# else:
# sheet = wb.sheet_by_name(sheetname)
# assert(not parse_dates, "`parse_dates` argument and function not yet implemented!")
# table = [sheet.row_values(i) for i in range(sheet.nrows)]
return pd.io.excel.read_excel(wb, sheetname=sheetname, header=header, skiprows=skiprows, engine='xlrd') | Thin wrapper for pandas.io.excel.read_excel() that accepts a file path and sheet index/name
Arguments:
path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from
ext (str): file name extension (to filter files by)
date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used
Returns:
dict of DataFrame: { file_path: flattened_data_frame } | Below is the the instruction that describes the task:
### Input:
Thin wrapper for pandas.io.excel.read_excel() that accepts a file path and sheet index/name
Arguments:
path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from
ext (str): file name extension (to filter files by)
date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used
Returns:
dict of DataFrame: { file_path: flattened_data_frame }
### Response:
def dataframe_from_excel(path, sheetname=0, header=0, skiprows=None): # , parse_dates=False):
"""Thin wrapper for pandas.io.excel.read_excel() that accepts a file path and sheet index/name
Arguments:
path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from
ext (str): file name extension (to filter files by)
date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used
Returns:
dict of DataFrame: { file_path: flattened_data_frame }
"""
sheetname = sheetname or 0
if isinstance(sheetname, (basestring, float)):
try:
sheetname = int(sheetname)
except (TypeError, ValueError, OverflowError):
sheetname = str(sheetname)
wb = xlrd.open_workbook(path)
# if isinstance(sheetname, int):
# sheet = wb.sheet_by_index(sheetname)
# else:
# sheet = wb.sheet_by_name(sheetname)
# assert(not parse_dates, "`parse_dates` argument and function not yet implemented!")
# table = [sheet.row_values(i) for i in range(sheet.nrows)]
return pd.io.excel.read_excel(wb, sheetname=sheetname, header=header, skiprows=skiprows, engine='xlrd') |
def main():
"""Function to add or substract the temperature effect to data in a tomodir
"""
options = handle_options()
# read in temperature and resistivity data
tempdata = readin_temp(options.temp_file)
magdata = readin_rho(options.filename,
options.rhofile,
aniso=options.aniso)
# calculate corrected data
mag_corr = calc_correction(temp=tempdata,
mag=magdata,
add=options.add,
T_std=options.T_std,
m=options.m,)
# save data
save_mag_to_file(mag_corr,
options.output,
options.rhofile) | Function to add or substract the temperature effect to data in a tomodir | Below is the the instruction that describes the task:
### Input:
Function to add or substract the temperature effect to data in a tomodir
### Response:
def main():
"""Function to add or substract the temperature effect to data in a tomodir
"""
options = handle_options()
# read in temperature and resistivity data
tempdata = readin_temp(options.temp_file)
magdata = readin_rho(options.filename,
options.rhofile,
aniso=options.aniso)
# calculate corrected data
mag_corr = calc_correction(temp=tempdata,
mag=magdata,
add=options.add,
T_std=options.T_std,
m=options.m,)
# save data
save_mag_to_file(mag_corr,
options.output,
options.rhofile) |
def _CheckIsLink(self, file_entry):
"""Checks the is_link find specification.
Args:
file_entry (FileEntry): file entry.
Returns:
bool: True if the file entry matches the find specification, False if not.
"""
if definitions.FILE_ENTRY_TYPE_LINK not in self._file_entry_types:
return False
return file_entry.IsLink() | Checks the is_link find specification.
Args:
file_entry (FileEntry): file entry.
Returns:
bool: True if the file entry matches the find specification, False if not. | Below is the the instruction that describes the task:
### Input:
Checks the is_link find specification.
Args:
file_entry (FileEntry): file entry.
Returns:
bool: True if the file entry matches the find specification, False if not.
### Response:
def _CheckIsLink(self, file_entry):
"""Checks the is_link find specification.
Args:
file_entry (FileEntry): file entry.
Returns:
bool: True if the file entry matches the find specification, False if not.
"""
if definitions.FILE_ENTRY_TYPE_LINK not in self._file_entry_types:
return False
return file_entry.IsLink() |
def remove(self):
""" Remove the directory. """
lib.gp_camera_folder_remove_dir(
self._cam._cam, self.parent.path.encode(), self.name.encode(),
self._cam._ctx) | Remove the directory. | Below is the the instruction that describes the task:
### Input:
Remove the directory.
### Response:
def remove(self):
""" Remove the directory. """
lib.gp_camera_folder_remove_dir(
self._cam._cam, self.parent.path.encode(), self.name.encode(),
self._cam._ctx) |
def eventloop(self, *args, **kwargs):
"""
Hand crafted event loop, with only one event possible : exit
More events ( and signals ) can be added later, after converting to asyncio.
"""
# Setting status
status = None
# Starting the clock
start = time.time()
first_loop = True
# loop running target, maybe more than once
while not self.exit.is_set():
if first_loop:
first_loop = False
# signalling startup only the first time, just after having check for exit request.
# We need to return control before starting, but after entering context...
self.started.set()
# TODO : check if better outside of loop maybe ??
# It will change semantics, but might be more intuitive...
# time is ticking
# TODO : move this out of here. this class should require only generic interface to any method.
now = time.time()
timedelta = now - start
start = now
# replacing the original Process.run() call, passing arguments to our target
if self._target:
# bwcompat
kwargs['timedelta'] = timedelta
# TODO : use return code to determine when/how we need to run this the next time...
# Also we need to keep the exit status to be able to call external process as an update...
logging.debug(
"[{self.name}] calling {self._target.__name__} with args {args} and kwargs {kwargs}...".format(
**locals()))
status = self._target(*args, **kwargs)
if status is not None:
break
if self.started.is_set() and status is None and self.exit.is_set():
# in the not so special case where we started, we didnt get exit code and we exited,
# this is expected as a normal result and we set an exitcode here of 0
# As 0 is the conventional success for unix process successful run
status = 0
return status | Hand crafted event loop, with only one event possible : exit
More events ( and signals ) can be added later, after converting to asyncio. | Below is the the instruction that describes the task:
### Input:
Hand crafted event loop, with only one event possible : exit
More events ( and signals ) can be added later, after converting to asyncio.
### Response:
def eventloop(self, *args, **kwargs):
"""
Hand crafted event loop, with only one event possible : exit
More events ( and signals ) can be added later, after converting to asyncio.
"""
# Setting status
status = None
# Starting the clock
start = time.time()
first_loop = True
# loop running target, maybe more than once
while not self.exit.is_set():
if first_loop:
first_loop = False
# signalling startup only the first time, just after having check for exit request.
# We need to return control before starting, but after entering context...
self.started.set()
# TODO : check if better outside of loop maybe ??
# It will change semantics, but might be more intuitive...
# time is ticking
# TODO : move this out of here. this class should require only generic interface to any method.
now = time.time()
timedelta = now - start
start = now
# replacing the original Process.run() call, passing arguments to our target
if self._target:
# bwcompat
kwargs['timedelta'] = timedelta
# TODO : use return code to determine when/how we need to run this the next time...
# Also we need to keep the exit status to be able to call external process as an update...
logging.debug(
"[{self.name}] calling {self._target.__name__} with args {args} and kwargs {kwargs}...".format(
**locals()))
status = self._target(*args, **kwargs)
if status is not None:
break
if self.started.is_set() and status is None and self.exit.is_set():
# in the not so special case where we started, we didnt get exit code and we exited,
# this is expected as a normal result and we set an exitcode here of 0
# As 0 is the conventional success for unix process successful run
status = 0
return status |
def axes(self):
"""
Returns all the axes that have been defined for this chart.
:return [<projexui.widgets.xchart.XChartAxis>, ..]
"""
out = self._axes[:]
if self._horizontalAxis:
out.append(self._horizontalAxis)
if self._verticalAxis:
out.append(self._verticalAxis)
return out | Returns all the axes that have been defined for this chart.
:return [<projexui.widgets.xchart.XChartAxis>, ..] | Below is the the instruction that describes the task:
### Input:
Returns all the axes that have been defined for this chart.
:return [<projexui.widgets.xchart.XChartAxis>, ..]
### Response:
def axes(self):
"""
Returns all the axes that have been defined for this chart.
:return [<projexui.widgets.xchart.XChartAxis>, ..]
"""
out = self._axes[:]
if self._horizontalAxis:
out.append(self._horizontalAxis)
if self._verticalAxis:
out.append(self._verticalAxis)
return out |
def make_prior(num_topics, initial_value):
"""Create the prior distribution.
Args:
num_topics: Number of topics.
initial_value: The starting value for the prior parameters.
Returns:
prior: A `callable` that returns a `tf.distribution.Distribution`
instance, the prior distribution.
prior_variables: A `list` of `Variable` objects, the trainable parameters
of the prior.
"""
def _softplus_inverse(x):
return np.log(np.expm1(x))
logit_concentration = tf.compat.v1.get_variable(
"logit_concentration",
shape=[1, num_topics],
initializer=tf.compat.v1.initializers.constant(
_softplus_inverse(initial_value)))
concentration = _clip_dirichlet_parameters(
tf.nn.softplus(logit_concentration))
def prior():
return tfd.Dirichlet(concentration=concentration,
name="topics_prior")
prior_variables = [logit_concentration]
return prior, prior_variables | Create the prior distribution.
Args:
num_topics: Number of topics.
initial_value: The starting value for the prior parameters.
Returns:
prior: A `callable` that returns a `tf.distribution.Distribution`
instance, the prior distribution.
prior_variables: A `list` of `Variable` objects, the trainable parameters
of the prior. | Below is the the instruction that describes the task:
### Input:
Create the prior distribution.
Args:
num_topics: Number of topics.
initial_value: The starting value for the prior parameters.
Returns:
prior: A `callable` that returns a `tf.distribution.Distribution`
instance, the prior distribution.
prior_variables: A `list` of `Variable` objects, the trainable parameters
of the prior.
### Response:
def make_prior(num_topics, initial_value):
"""Create the prior distribution.
Args:
num_topics: Number of topics.
initial_value: The starting value for the prior parameters.
Returns:
prior: A `callable` that returns a `tf.distribution.Distribution`
instance, the prior distribution.
prior_variables: A `list` of `Variable` objects, the trainable parameters
of the prior.
"""
def _softplus_inverse(x):
return np.log(np.expm1(x))
logit_concentration = tf.compat.v1.get_variable(
"logit_concentration",
shape=[1, num_topics],
initializer=tf.compat.v1.initializers.constant(
_softplus_inverse(initial_value)))
concentration = _clip_dirichlet_parameters(
tf.nn.softplus(logit_concentration))
def prior():
return tfd.Dirichlet(concentration=concentration,
name="topics_prior")
prior_variables = [logit_concentration]
return prior, prior_variables |
def install_from_exchange():
''' Install from experiment exchange. '''
parser = argparse.ArgumentParser(
description='Download experiment from the psiturk.org experiment\
exchange (http://psiturk.org/ee).'
)
parser.add_argument(
'exp_id', metavar='exp_id', type=str, help='the id number of the\
experiment in the exchange'
)
args = parser.parse_args()
exp_exch = ExperimentExchangeServices()
exp_exch.download_experiment(args.exp_id) | Install from experiment exchange. | Below is the the instruction that describes the task:
### Input:
Install from experiment exchange.
### Response:
def install_from_exchange():
''' Install from experiment exchange. '''
parser = argparse.ArgumentParser(
description='Download experiment from the psiturk.org experiment\
exchange (http://psiturk.org/ee).'
)
parser.add_argument(
'exp_id', metavar='exp_id', type=str, help='the id number of the\
experiment in the exchange'
)
args = parser.parse_args()
exp_exch = ExperimentExchangeServices()
exp_exch.download_experiment(args.exp_id) |
def get_term(self, ontology, iri):
"""Gets the data for a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:rtype: dict
"""
url = self.ontology_term_fmt.format(ontology, iri)
response = requests.get(url)
return response.json() | Gets the data for a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Gets the data for a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:rtype: dict
### Response:
def get_term(self, ontology, iri):
"""Gets the data for a given term
:param str ontology: The name of the ontology
:param str iri: The IRI of a term
:rtype: dict
"""
url = self.ontology_term_fmt.format(ontology, iri)
response = requests.get(url)
return response.json() |
def _sorted_keys_items(dobj):
"""Return dictionary items sorted by key."""
keys = sorted(dobj.keys())
for key in keys:
yield key, dobj[key] | Return dictionary items sorted by key. | Below is the the instruction that describes the task:
### Input:
Return dictionary items sorted by key.
### Response:
def _sorted_keys_items(dobj):
"""Return dictionary items sorted by key."""
keys = sorted(dobj.keys())
for key in keys:
yield key, dobj[key] |
def airwires(board, showgui=0):
'search for airwires in eagle board'
board = Path(board).expand().abspath()
file_out = tempfile.NamedTemporaryFile(suffix='.txt', delete=0)
file_out.close()
ulp = ulp_templ.replace('FILE_NAME', file_out.name)
file_ulp = tempfile.NamedTemporaryFile(suffix='.ulp', delete=0)
file_ulp.write(ulp.encode('utf-8'))
file_ulp.close()
commands = [
'run ' + file_ulp.name,
'quit',
]
command_eagle(board, commands=commands, showgui=showgui)
n = int(Path(file_out.name).text())
Path(file_out.name).remove()
Path(file_ulp.name).remove()
return n | search for airwires in eagle board | Below is the the instruction that describes the task:
### Input:
search for airwires in eagle board
### Response:
def airwires(board, showgui=0):
'search for airwires in eagle board'
board = Path(board).expand().abspath()
file_out = tempfile.NamedTemporaryFile(suffix='.txt', delete=0)
file_out.close()
ulp = ulp_templ.replace('FILE_NAME', file_out.name)
file_ulp = tempfile.NamedTemporaryFile(suffix='.ulp', delete=0)
file_ulp.write(ulp.encode('utf-8'))
file_ulp.close()
commands = [
'run ' + file_ulp.name,
'quit',
]
command_eagle(board, commands=commands, showgui=showgui)
n = int(Path(file_out.name).text())
Path(file_out.name).remove()
Path(file_ulp.name).remove()
return n |
def to_latex(self, buf=None, upper_triangle=True, **kwargs):
"""Render a DataFrame to a tabular environment table.
You can splice this into a LaTeX document.
Requires ``\\usepackage{booktabs}``.
Wrapper around the :meth:`pandas.DataFrame.to_latex` method.
"""
out = self._sympy_formatter()
out = out._abs_ref_formatter(format_as='latex')
if not upper_triangle:
out = out._remove_upper_triangle()
return out._frame.to_latex(buf=buf, **kwargs) | Render a DataFrame to a tabular environment table.
You can splice this into a LaTeX document.
Requires ``\\usepackage{booktabs}``.
Wrapper around the :meth:`pandas.DataFrame.to_latex` method. | Below is the the instruction that describes the task:
### Input:
Render a DataFrame to a tabular environment table.
You can splice this into a LaTeX document.
Requires ``\\usepackage{booktabs}``.
Wrapper around the :meth:`pandas.DataFrame.to_latex` method.
### Response:
def to_latex(self, buf=None, upper_triangle=True, **kwargs):
"""Render a DataFrame to a tabular environment table.
You can splice this into a LaTeX document.
Requires ``\\usepackage{booktabs}``.
Wrapper around the :meth:`pandas.DataFrame.to_latex` method.
"""
out = self._sympy_formatter()
out = out._abs_ref_formatter(format_as='latex')
if not upper_triangle:
out = out._remove_upper_triangle()
return out._frame.to_latex(buf=buf, **kwargs) |
def get_client_identity_from_certificate(certificate):
"""
Given an X.509 certificate, extract and return the client identity.
"""
client_ids = get_common_names_from_certificate(certificate)
if len(client_ids) > 0:
if len(client_ids) > 1:
raise exceptions.PermissionDenied(
"Multiple client identities found."
)
return client_ids[0]
else:
raise exceptions.PermissionDenied(
"The certificate does not define any subject common names. "
"Client identity unavailable."
) | Given an X.509 certificate, extract and return the client identity. | Below is the the instruction that describes the task:
### Input:
Given an X.509 certificate, extract and return the client identity.
### Response:
def get_client_identity_from_certificate(certificate):
"""
Given an X.509 certificate, extract and return the client identity.
"""
client_ids = get_common_names_from_certificate(certificate)
if len(client_ids) > 0:
if len(client_ids) > 1:
raise exceptions.PermissionDenied(
"Multiple client identities found."
)
return client_ids[0]
else:
raise exceptions.PermissionDenied(
"The certificate does not define any subject common names. "
"Client identity unavailable."
) |
def conditionally_inline_policies(role_name, sr_entry):
"""
If 'policies' key lists the filename prefixes of policies to bind to the role,
load them from the expected path and inline them onto the role
Args:
role_name: name of the role to attach the policies to
sr_entry: service registry entry
"""
service_type = sr_entry['type']
if not (service_type in SERVICE_TYPE_ROLE and "policies" in sr_entry):
print_if_verbose("not eligible for policies; service_type: {} is not valid for policies "
"or no 'policies' key in service registry for this role".format(service_type))
return
for policy_name in sr_entry['policies']:
print_if_verbose("loading policy: {} for role: {}".format(policy_name, role_name))
try:
policy_document = resolve_policy_document(policy_name)
except:
fail("Exception loading policy: {} for role: {}".format(policy_name, role_name), sys.exc_info())
# inline the policy onto the role
if CONTEXT.commit:
try:
CLIENTS["iam"].put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy_document)
except:
fail("Exception putting policy: {} onto role: {}".format(policy_name, role_name), sys.exc_info()) | If 'policies' key lists the filename prefixes of policies to bind to the role,
load them from the expected path and inline them onto the role
Args:
role_name: name of the role to attach the policies to
sr_entry: service registry entry | Below is the the instruction that describes the task:
### Input:
If 'policies' key lists the filename prefixes of policies to bind to the role,
load them from the expected path and inline them onto the role
Args:
role_name: name of the role to attach the policies to
sr_entry: service registry entry
### Response:
def conditionally_inline_policies(role_name, sr_entry):
"""
If 'policies' key lists the filename prefixes of policies to bind to the role,
load them from the expected path and inline them onto the role
Args:
role_name: name of the role to attach the policies to
sr_entry: service registry entry
"""
service_type = sr_entry['type']
if not (service_type in SERVICE_TYPE_ROLE and "policies" in sr_entry):
print_if_verbose("not eligible for policies; service_type: {} is not valid for policies "
"or no 'policies' key in service registry for this role".format(service_type))
return
for policy_name in sr_entry['policies']:
print_if_verbose("loading policy: {} for role: {}".format(policy_name, role_name))
try:
policy_document = resolve_policy_document(policy_name)
except:
fail("Exception loading policy: {} for role: {}".format(policy_name, role_name), sys.exc_info())
# inline the policy onto the role
if CONTEXT.commit:
try:
CLIENTS["iam"].put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy_document)
except:
fail("Exception putting policy: {} onto role: {}".format(policy_name, role_name), sys.exc_info()) |
def difference_update(self, values):
'''Remove an iterable of *values* from the set.'''
d = self.value_pickler.dumps
return self.cache.remove(tuple((d(v) for v in values))) | Remove an iterable of *values* from the set. | Below is the the instruction that describes the task:
### Input:
Remove an iterable of *values* from the set.
### Response:
def difference_update(self, values):
'''Remove an iterable of *values* from the set.'''
d = self.value_pickler.dumps
return self.cache.remove(tuple((d(v) for v in values))) |
def json_safe(string, content_type='application/octet-stream'):
"""Returns JSON-safe version of `string`.
If `string` is a Unicode string or a valid UTF-8, it is returned unmodified,
as it can safely be encoded to JSON string.
If `string` contains raw/binary data, it is Base64-encoded, formatted and
returned according to "data" URL scheme (RFC2397). Since JSON is not
suitable for binary data, some additional encoding was necessary; "data"
URL scheme was chosen for its simplicity.
"""
try:
string = string.decode('utf-8')
json.dumps(string)
return string
except (ValueError, TypeError):
return b''.join([
b'data:',
content_type.encode('utf-8'),
b';base64,',
base64.b64encode(string)
]).decode('utf-8') | Returns JSON-safe version of `string`.
If `string` is a Unicode string or a valid UTF-8, it is returned unmodified,
as it can safely be encoded to JSON string.
If `string` contains raw/binary data, it is Base64-encoded, formatted and
returned according to "data" URL scheme (RFC2397). Since JSON is not
suitable for binary data, some additional encoding was necessary; "data"
URL scheme was chosen for its simplicity. | Below is the the instruction that describes the task:
### Input:
Returns JSON-safe version of `string`.
If `string` is a Unicode string or a valid UTF-8, it is returned unmodified,
as it can safely be encoded to JSON string.
If `string` contains raw/binary data, it is Base64-encoded, formatted and
returned according to "data" URL scheme (RFC2397). Since JSON is not
suitable for binary data, some additional encoding was necessary; "data"
URL scheme was chosen for its simplicity.
### Response:
def json_safe(string, content_type='application/octet-stream'):
"""Returns JSON-safe version of `string`.
If `string` is a Unicode string or a valid UTF-8, it is returned unmodified,
as it can safely be encoded to JSON string.
If `string` contains raw/binary data, it is Base64-encoded, formatted and
returned according to "data" URL scheme (RFC2397). Since JSON is not
suitable for binary data, some additional encoding was necessary; "data"
URL scheme was chosen for its simplicity.
"""
try:
string = string.decode('utf-8')
json.dumps(string)
return string
except (ValueError, TypeError):
return b''.join([
b'data:',
content_type.encode('utf-8'),
b';base64,',
base64.b64encode(string)
]).decode('utf-8') |
def size_to_content(self, get_font_metrics_fn):
""" Size the canvas item to the proper width, the maximum of any label. """
new_sizing = self.copy_sizing()
new_sizing.minimum_width = 0
new_sizing.maximum_width = 0
axes = self.__axes
if axes and axes.is_valid:
# calculate the width based on the label lengths
font = "{0:d}px".format(self.font_size)
max_width = 0
y_range = axes.calibrated_data_max - axes.calibrated_data_min
label = axes.y_ticker.value_label(axes.calibrated_data_max + y_range * 5)
max_width = max(max_width, get_font_metrics_fn(font, label).width)
label = axes.y_ticker.value_label(axes.calibrated_data_min - y_range * 5)
max_width = max(max_width, get_font_metrics_fn(font, label).width)
new_sizing.minimum_width = max_width
new_sizing.maximum_width = max_width
self.update_sizing(new_sizing) | Size the canvas item to the proper width, the maximum of any label. | Below is the the instruction that describes the task:
### Input:
Size the canvas item to the proper width, the maximum of any label.
### Response:
def size_to_content(self, get_font_metrics_fn):
""" Size the canvas item to the proper width, the maximum of any label. """
new_sizing = self.copy_sizing()
new_sizing.minimum_width = 0
new_sizing.maximum_width = 0
axes = self.__axes
if axes and axes.is_valid:
# calculate the width based on the label lengths
font = "{0:d}px".format(self.font_size)
max_width = 0
y_range = axes.calibrated_data_max - axes.calibrated_data_min
label = axes.y_ticker.value_label(axes.calibrated_data_max + y_range * 5)
max_width = max(max_width, get_font_metrics_fn(font, label).width)
label = axes.y_ticker.value_label(axes.calibrated_data_min - y_range * 5)
max_width = max(max_width, get_font_metrics_fn(font, label).width)
new_sizing.minimum_width = max_width
new_sizing.maximum_width = max_width
self.update_sizing(new_sizing) |
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data.get("properties", {})
bgp_peers = []
for raw_content in properties.get("bgpPeers", []):
raw_content["parentResourceID"] = raw_data["resourceId"]
raw_content["grandParentResourceID"] = raw_data["parentResourceID"]
bgp_peers.append(BGPPeers.from_raw_data(raw_content))
properties["bgpPeers"] = bgp_peers
return super(BGPRouters, cls).process_raw_data(raw_data) | Create a new model using raw API response. | Below is the the instruction that describes the task:
### Input:
Create a new model using raw API response.
### Response:
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data.get("properties", {})
bgp_peers = []
for raw_content in properties.get("bgpPeers", []):
raw_content["parentResourceID"] = raw_data["resourceId"]
raw_content["grandParentResourceID"] = raw_data["parentResourceID"]
bgp_peers.append(BGPPeers.from_raw_data(raw_content))
properties["bgpPeers"] = bgp_peers
return super(BGPRouters, cls).process_raw_data(raw_data) |
def create_milestones(self, project_id, milestones):
"""
With this function you can create multiple milestones in a single
request. See the "create" function for a description of the individual
fields in the milestone.
"""
path = '/projects/%u/milestones/create' % project_id
req = ET.Element('request')
for milestone in milestones:
req.append(self._create_milestone_elem(*milestone))
return self._request(path, req) | With this function you can create multiple milestones in a single
request. See the "create" function for a description of the individual
fields in the milestone. | Below is the the instruction that describes the task:
### Input:
With this function you can create multiple milestones in a single
request. See the "create" function for a description of the individual
fields in the milestone.
### Response:
def create_milestones(self, project_id, milestones):
"""
With this function you can create multiple milestones in a single
request. See the "create" function for a description of the individual
fields in the milestone.
"""
path = '/projects/%u/milestones/create' % project_id
req = ET.Element('request')
for milestone in milestones:
req.append(self._create_milestone_elem(*milestone))
return self._request(path, req) |
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder | Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers! | Below is the the instruction that describes the task:
### Input:
Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!
### Response:
def parse_known2func(self, args=None, func=None):
"""Parse the command line arguments to the setup function
This method parses the given command line arguments to the function
used in the :meth:`setup_args` method to setup up this parser
Parameters
----------
args: list
The list of command line arguments
func: function or str
An alternative function to use. If None, the last function or the
one specified through the `setup_as` parameter in the
:meth:`setup_args` is used.
Returns
-------
object
What ever is returned by the called function
list
The remaining command line arguments that could not be interpreted
Note
----
This method does not cover subparsers!"""
ns, remainder = self.parse_known_args(args)
kws = vars(ns)
if func is None:
if self._setup_as:
func = kws.pop(self._setup_as)
else:
func = self._used_functions[-1]
return func(**kws), remainder |
def get_sample_size(self, key=None):
""" Returns the number of samples in the input data
@ In, key, an optional 2-tuple specifying a min-max id pair
used for determining which partition size should be
returned. If not specified then the size of the entire data
set will be returned.
@ Out, an integer specifying the number of samples.
"""
if key is None:
return len(self.Y)
else:
return len(self.get_partitions(self.persistence)[key]) | Returns the number of samples in the input data
@ In, key, an optional 2-tuple specifying a min-max id pair
used for determining which partition size should be
returned. If not specified then the size of the entire data
set will be returned.
@ Out, an integer specifying the number of samples. | Below is the the instruction that describes the task:
### Input:
Returns the number of samples in the input data
@ In, key, an optional 2-tuple specifying a min-max id pair
used for determining which partition size should be
returned. If not specified then the size of the entire data
set will be returned.
@ Out, an integer specifying the number of samples.
### Response:
def get_sample_size(self, key=None):
""" Returns the number of samples in the input data
@ In, key, an optional 2-tuple specifying a min-max id pair
used for determining which partition size should be
returned. If not specified then the size of the entire data
set will be returned.
@ Out, an integer specifying the number of samples.
"""
if key is None:
return len(self.Y)
else:
return len(self.get_partitions(self.persistence)[key]) |
def Create(self, body, path, type, id, initial_headers, options=None):
"""Creates a Azure Cosmos resource and returns it.
:param dict body:
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The created Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
id,
type,
options)
# Create will use WriteEndpoint since it uses POST operation
request = request_object._RequestObject(type, documents._OperationType.Create)
result, self.last_response_headers = self.__Post(path,
request,
body,
headers)
# update session for write request
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result | Creates a Azure Cosmos resource and returns it.
:param dict body:
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The created Azure Cosmos resource.
:rtype:
dict | Below is the the instruction that describes the task:
### Input:
Creates a Azure Cosmos resource and returns it.
:param dict body:
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The created Azure Cosmos resource.
:rtype:
dict
### Response:
def Create(self, body, path, type, id, initial_headers, options=None):
"""Creates a Azure Cosmos resource and returns it.
:param dict body:
:param str path:
:param str type:
:param str id:
:param dict initial_headers:
:param dict options:
The request options for the request.
:return:
The created Azure Cosmos resource.
:rtype:
dict
"""
if options is None:
options = {}
initial_headers = initial_headers or self.default_headers
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
id,
type,
options)
# Create will use WriteEndpoint since it uses POST operation
request = request_object._RequestObject(type, documents._OperationType.Create)
result, self.last_response_headers = self.__Post(path,
request,
body,
headers)
# update session for write request
self._UpdateSessionIfRequired(headers, result, self.last_response_headers)
return result |
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result) | Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD | Below is the the instruction that describes the task:
### Input:
Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
### Response:
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result) |
def get_audio_duration(self):
"""
Return item audio duration
"""
decoder = timeside.core.get_processor('file_decoder')(
uri=self.get_uri())
return decoder.uri_total_duration | Return item audio duration | Below is the the instruction that describes the task:
### Input:
Return item audio duration
### Response:
def get_audio_duration(self):
"""
Return item audio duration
"""
decoder = timeside.core.get_processor('file_decoder')(
uri=self.get_uri())
return decoder.uri_total_duration |
def stop(self, nowait=False):
"""Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
If nowait is False then thread will handle remaining items in queue and
stop.
If nowait is True then thread will be stopped even if the queue still
contains items.
"""
self._stop.set()
if nowait:
self._stop_nowait.set()
self.queue.put_nowait(self._sentinel_item)
if (self._thread.isAlive() and
self._thread is not threading.currentThread()):
self._thread.join()
self._thread = None | Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
If nowait is False then thread will handle remaining items in queue and
stop.
If nowait is True then thread will be stopped even if the queue still
contains items. | Below is the the instruction that describes the task:
### Input:
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
If nowait is False then thread will handle remaining items in queue and
stop.
If nowait is True then thread will be stopped even if the queue still
contains items.
### Response:
def stop(self, nowait=False):
"""Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
If nowait is False then thread will handle remaining items in queue and
stop.
If nowait is True then thread will be stopped even if the queue still
contains items.
"""
self._stop.set()
if nowait:
self._stop_nowait.set()
self.queue.put_nowait(self._sentinel_item)
if (self._thread.isAlive() and
self._thread is not threading.currentThread()):
self._thread.join()
self._thread = None |
def is_single_class():
"""Returns True if only a single class is being run or some tests within a single class"""
ret = False
counts = get_counts()
if counts["classes"] < 1 and counts["modules"] < 1:
ret = counts["tests"] > 0
else:
ret = counts["classes"] <= 1 and counts["modules"] <= 1
return ret | Returns True if only a single class is being run or some tests within a single class | Below is the the instruction that describes the task:
### Input:
Returns True if only a single class is being run or some tests within a single class
### Response:
def is_single_class():
"""Returns True if only a single class is being run or some tests within a single class"""
ret = False
counts = get_counts()
if counts["classes"] < 1 and counts["modules"] < 1:
ret = counts["tests"] > 0
else:
ret = counts["classes"] <= 1 and counts["modules"] <= 1
return ret |
def distance_matrix(self, leaf_labels=False):
'''Return a distance matrix (2D dictionary) of the leaves of this ``Tree``
Args:
``leaf_labels`` (``bool``): ``True`` to have keys be labels of leaf ``Node`` objects, otherwise ``False`` to have keys be ``Node`` objects
Returns:
``dict``: Distance matrix (2D dictionary) of the leaves of this ``Tree``, where keys are labels of leaves; ``M[u][v]`` = distance from ``u`` to ``v``
'''
M = dict(); leaf_dists = dict()
for node in self.traverse_postorder():
if node.is_leaf():
leaf_dists[node] = [[node,0]]
else:
for c in node.children:
if c.edge_length is not None:
for i in range(len(leaf_dists[c])):
leaf_dists[c][i][1] += c.edge_length
for c1 in range(0,len(node.children)-1):
leaves_c1 = leaf_dists[node.children[c1]]
for c2 in range(c1+1,len(node.children)):
leaves_c2 = leaf_dists[node.children[c2]]
for i in range(len(leaves_c1)):
for j in range(len(leaves_c2)):
u,ud = leaves_c1[i]; v,vd = leaves_c2[j]; d = ud+vd
if leaf_labels:
u_key = u.label; v_key = v.label
else:
u_key = u; v_key = v
if u_key not in M:
M[u_key] = dict()
M[u_key][v_key] = d
if v_key not in M:
M[v_key] = dict()
M[v_key][u_key] = d
leaf_dists[node] = leaf_dists[node.children[0]]; del leaf_dists[node.children[0]]
for i in range(1,len(node.children)):
leaf_dists[node] += leaf_dists[node.children[i]]; del leaf_dists[node.children[i]]
return M | Return a distance matrix (2D dictionary) of the leaves of this ``Tree``
Args:
``leaf_labels`` (``bool``): ``True`` to have keys be labels of leaf ``Node`` objects, otherwise ``False`` to have keys be ``Node`` objects
Returns:
``dict``: Distance matrix (2D dictionary) of the leaves of this ``Tree``, where keys are labels of leaves; ``M[u][v]`` = distance from ``u`` to ``v`` | Below is the the instruction that describes the task:
### Input:
Return a distance matrix (2D dictionary) of the leaves of this ``Tree``
Args:
``leaf_labels`` (``bool``): ``True`` to have keys be labels of leaf ``Node`` objects, otherwise ``False`` to have keys be ``Node`` objects
Returns:
``dict``: Distance matrix (2D dictionary) of the leaves of this ``Tree``, where keys are labels of leaves; ``M[u][v]`` = distance from ``u`` to ``v``
### Response:
def distance_matrix(self, leaf_labels=False):
'''Return a distance matrix (2D dictionary) of the leaves of this ``Tree``
Args:
``leaf_labels`` (``bool``): ``True`` to have keys be labels of leaf ``Node`` objects, otherwise ``False`` to have keys be ``Node`` objects
Returns:
``dict``: Distance matrix (2D dictionary) of the leaves of this ``Tree``, where keys are labels of leaves; ``M[u][v]`` = distance from ``u`` to ``v``
'''
M = dict(); leaf_dists = dict()
for node in self.traverse_postorder():
if node.is_leaf():
leaf_dists[node] = [[node,0]]
else:
for c in node.children:
if c.edge_length is not None:
for i in range(len(leaf_dists[c])):
leaf_dists[c][i][1] += c.edge_length
for c1 in range(0,len(node.children)-1):
leaves_c1 = leaf_dists[node.children[c1]]
for c2 in range(c1+1,len(node.children)):
leaves_c2 = leaf_dists[node.children[c2]]
for i in range(len(leaves_c1)):
for j in range(len(leaves_c2)):
u,ud = leaves_c1[i]; v,vd = leaves_c2[j]; d = ud+vd
if leaf_labels:
u_key = u.label; v_key = v.label
else:
u_key = u; v_key = v
if u_key not in M:
M[u_key] = dict()
M[u_key][v_key] = d
if v_key not in M:
M[v_key] = dict()
M[v_key][u_key] = d
leaf_dists[node] = leaf_dists[node.children[0]]; del leaf_dists[node.children[0]]
for i in range(1,len(node.children)):
leaf_dists[node] += leaf_dists[node.children[i]]; del leaf_dists[node.children[i]]
return M |
def string_stats(strs, valid_chars='012346789', left_pad='0', right_pad='', strip=True):
""" Count the occurrence of a category of valid characters within an iterable of serial/model no, etc """
if left_pad is None:
left_pad = ''.join(c for c in rex.ASCII_CHARACTERS if c not in valid_chars)
if right_pad is None:
right_pad = ''.join(c for c in rex.ASCII_CHARACTERS if c not in valid_chars)
def normalize(s):
if strip:
s = s.strip()
s = s.lstrip(left_pad)
s = s.rstrip(right_pad)
return s
# should probably check to make sure memory not exceeded
strs = [normalize(s) for s in strs]
lengths = Counter(len(s) for s in strs)
counts = {}
max_length = max(lengths.keys())
for i in range(max_length):
# print i
for s in strs:
if i < len(s):
counts[i] = counts.get(i, 0) + int(s[i] in valid_chars)
counts[-i - 1] = counts.get(-i - 1, 0) + int(s[-i - 1] in valid_chars)
long_enough_strings = float(sum(c for n, c in list(lengths.items()) if n >= i))
counts[i] = counts[i] / long_enough_strings
counts[-i - 1] = counts[-i - 1] / long_enough_strings
return counts | Count the occurrence of a category of valid characters within an iterable of serial/model no, etc | Below is the the instruction that describes the task:
### Input:
Count the occurrence of a category of valid characters within an iterable of serial/model no, etc
### Response:
def string_stats(strs, valid_chars='012346789', left_pad='0', right_pad='', strip=True):
""" Count the occurrence of a category of valid characters within an iterable of serial/model no, etc """
if left_pad is None:
left_pad = ''.join(c for c in rex.ASCII_CHARACTERS if c not in valid_chars)
if right_pad is None:
right_pad = ''.join(c for c in rex.ASCII_CHARACTERS if c not in valid_chars)
def normalize(s):
if strip:
s = s.strip()
s = s.lstrip(left_pad)
s = s.rstrip(right_pad)
return s
# should probably check to make sure memory not exceeded
strs = [normalize(s) for s in strs]
lengths = Counter(len(s) for s in strs)
counts = {}
max_length = max(lengths.keys())
for i in range(max_length):
# print i
for s in strs:
if i < len(s):
counts[i] = counts.get(i, 0) + int(s[i] in valid_chars)
counts[-i - 1] = counts.get(-i - 1, 0) + int(s[-i - 1] in valid_chars)
long_enough_strings = float(sum(c for n, c in list(lengths.items()) if n >= i))
counts[i] = counts[i] / long_enough_strings
counts[-i - 1] = counts[-i - 1] / long_enough_strings
return counts |
def disable_bool_icon(
fieldname: str,
model) -> Callable[[Any], bool]:
"""
Disable boolean icons for a Django ModelAdmin field.
The '_meta' attribute is present on Django model classes and instances.
model_class: ``Union[Model, Type[Model]]``
... only the type checker in Py3.5 is broken; see ``files.py``
"""
# noinspection PyUnusedLocal
def func(self, obj):
return getattr(obj, fieldname)
func.boolean = False
func.admin_order_field = fieldname
# func.short_description = \
# model._meta.get_field_by_name(fieldname)[0].verbose_name
# get_field_by_name() deprecated in Django 1.9 and will go in 1.10
# https://docs.djangoproject.com/en/1.8/ref/models/meta/
# noinspection PyProtectedMember, PyUnresolvedReferences
func.short_description = \
model._meta.get_field(fieldname).verbose_name
return func | Disable boolean icons for a Django ModelAdmin field.
The '_meta' attribute is present on Django model classes and instances.
model_class: ``Union[Model, Type[Model]]``
... only the type checker in Py3.5 is broken; see ``files.py`` | Below is the the instruction that describes the task:
### Input:
Disable boolean icons for a Django ModelAdmin field.
The '_meta' attribute is present on Django model classes and instances.
model_class: ``Union[Model, Type[Model]]``
... only the type checker in Py3.5 is broken; see ``files.py``
### Response:
def disable_bool_icon(
fieldname: str,
model) -> Callable[[Any], bool]:
"""
Disable boolean icons for a Django ModelAdmin field.
The '_meta' attribute is present on Django model classes and instances.
model_class: ``Union[Model, Type[Model]]``
... only the type checker in Py3.5 is broken; see ``files.py``
"""
# noinspection PyUnusedLocal
def func(self, obj):
return getattr(obj, fieldname)
func.boolean = False
func.admin_order_field = fieldname
# func.short_description = \
# model._meta.get_field_by_name(fieldname)[0].verbose_name
# get_field_by_name() deprecated in Django 1.9 and will go in 1.10
# https://docs.djangoproject.com/en/1.8/ref/models/meta/
# noinspection PyProtectedMember, PyUnresolvedReferences
func.short_description = \
model._meta.get_field(fieldname).verbose_name
return func |
def get_tetrahedra_relative_grid_address(microzone_lattice):
"""Returns relative (differences of) grid addresses from the central
Parameter
---------
microzone_lattice : ndarray or list of list
column vectors of parallel piped microzone lattice, i.e.,
microzone_lattice = np.linalg.inv(cell.get_cell()) / mesh
"""
relative_grid_address = np.zeros((24, 4, 3), dtype='intc')
phonoc.tetrahedra_relative_grid_address(
relative_grid_address,
np.array(microzone_lattice, dtype='double', order='C'))
return relative_grid_address | Returns relative (differences of) grid addresses from the central
Parameter
---------
microzone_lattice : ndarray or list of list
column vectors of parallel piped microzone lattice, i.e.,
microzone_lattice = np.linalg.inv(cell.get_cell()) / mesh | Below is the the instruction that describes the task:
### Input:
Returns relative (differences of) grid addresses from the central
Parameter
---------
microzone_lattice : ndarray or list of list
column vectors of parallel piped microzone lattice, i.e.,
microzone_lattice = np.linalg.inv(cell.get_cell()) / mesh
### Response:
def get_tetrahedra_relative_grid_address(microzone_lattice):
"""Returns relative (differences of) grid addresses from the central
Parameter
---------
microzone_lattice : ndarray or list of list
column vectors of parallel piped microzone lattice, i.e.,
microzone_lattice = np.linalg.inv(cell.get_cell()) / mesh
"""
relative_grid_address = np.zeros((24, 4, 3), dtype='intc')
phonoc.tetrahedra_relative_grid_address(
relative_grid_address,
np.array(microzone_lattice, dtype='double', order='C'))
return relative_grid_address |
def stop(self):
"""Stop internal color pattern playing
"""
if ( self.dev == None ): return ''
buf = [REPORT_ID, ord('p'), 0, 0, 0, 0, 0, 0, 0]
return self.write(buf); | Stop internal color pattern playing | Below is the the instruction that describes the task:
### Input:
Stop internal color pattern playing
### Response:
def stop(self):
"""Stop internal color pattern playing
"""
if ( self.dev == None ): return ''
buf = [REPORT_ID, ord('p'), 0, 0, 0, 0, 0, 0, 0]
return self.write(buf); |
def _replace_tables(self, soup, v_separator=' | ', h_separator='-'):
"""Replaces <table> elements with its ASCII equivalent.
"""
tables = self._parse_tables(soup)
v_sep_len = len(v_separator)
v_left_sep = v_separator.lstrip()
for t in tables:
html = ''
trs = t['trs']
h_length = 1 + (v_sep_len * len(t['col_width'])) + t['width']
head_foot = (h_separator * h_length) + "\n"
html += head_foot
for tr in trs:
html += v_left_sep
for i, td in enumerate(tr):
text = td['text']
col_width = t['col_width'][i] + v_sep_len
if td['colspan'] > 1:
for j in range(td['colspan']-1):
j = j + 1
if (i+j) < len(t['col_width']):
col_width += t['col_width'][i+j] + v_sep_len
html += ('%' + str(col_width) + 's') % (text + v_separator)
html += "\n"
html += head_foot
new_table = soup.new_tag('div')
new_table.string = html
t['table'].replace_with(new_table)
return soup | Replaces <table> elements with its ASCII equivalent. | Below is the the instruction that describes the task:
### Input:
Replaces <table> elements with its ASCII equivalent.
### Response:
def _replace_tables(self, soup, v_separator=' | ', h_separator='-'):
"""Replaces <table> elements with its ASCII equivalent.
"""
tables = self._parse_tables(soup)
v_sep_len = len(v_separator)
v_left_sep = v_separator.lstrip()
for t in tables:
html = ''
trs = t['trs']
h_length = 1 + (v_sep_len * len(t['col_width'])) + t['width']
head_foot = (h_separator * h_length) + "\n"
html += head_foot
for tr in trs:
html += v_left_sep
for i, td in enumerate(tr):
text = td['text']
col_width = t['col_width'][i] + v_sep_len
if td['colspan'] > 1:
for j in range(td['colspan']-1):
j = j + 1
if (i+j) < len(t['col_width']):
col_width += t['col_width'][i+j] + v_sep_len
html += ('%' + str(col_width) + 's') % (text + v_separator)
html += "\n"
html += head_foot
new_table = soup.new_tag('div')
new_table.string = html
t['table'].replace_with(new_table)
return soup |
def reverse_file(infile, outfile):
'''Reverse the content of infile, write to outfile.
Both infile and outfile are filenames or filepaths.
'''
with open(infile, 'rb') as inf:
with open(outfile, 'wb') as outf:
reverse_fd(inf, outf) | Reverse the content of infile, write to outfile.
Both infile and outfile are filenames or filepaths. | Below is the the instruction that describes the task:
### Input:
Reverse the content of infile, write to outfile.
Both infile and outfile are filenames or filepaths.
### Response:
def reverse_file(infile, outfile):
'''Reverse the content of infile, write to outfile.
Both infile and outfile are filenames or filepaths.
'''
with open(infile, 'rb') as inf:
with open(outfile, 'wb') as outf:
reverse_fd(inf, outf) |
def submission_to_json(self, task, data, debug, reloading=False, replace=False, tags={}):
""" Converts a submission to json (keeps only needed fields) """
if "ssh_host" in data:
return json.dumps({'status': "waiting", 'text': "<b>SSH server active</b>",
'ssh_host': data["ssh_host"], 'ssh_port': data["ssh_port"],
'ssh_password': data["ssh_password"]})
# Here we are waiting. Let's send some useful information.
waiting_data = self.submission_manager.get_job_queue_info(data["jobid"]) if "jobid" in data else None
if waiting_data is not None and not reloading:
nb_tasks_before, approx_wait_time = waiting_data
wait_time = round(approx_wait_time)
if nb_tasks_before == -1 and wait_time <= 0:
text = _("<b>INGInious is currently grading your answers.<b/> (almost done)")
elif nb_tasks_before == -1:
text = _("<b>INGInious is currently grading your answers.<b/> (Approx. wait time: {} seconds)").format(
wait_time)
elif nb_tasks_before == 0:
text = _("<b>You are next in the waiting queue!</b>")
elif nb_tasks_before == 1:
text = _("<b>There is one task in front of you in the waiting queue.</b>")
else:
text = _("<b>There are {} tasks in front of you in the waiting queue.</b>").format(nb_tasks_before)
return json.dumps({'status': "waiting", 'text': text})
tojson = {
'status': data['status'],
'result': data.get('result', 'crash'),
'id': str(data["_id"]),
'submitted_on': str(data['submitted_on']),
'grade': str(data.get("grade", 0.0)),
'replace': replace and not reloading # Replace the evaluated submission
}
if "text" in data:
tojson["text"] = data["text"]
if "problems" in data:
tojson["problems"] = data["problems"]
if debug:
tojson["debug"] = data
if tojson['status'] == 'waiting':
tojson["text"] = _("<b>Your submission has been sent...</b>")
elif tojson["result"] == "failed":
tojson["text"] = _("There are some errors in your answer. Your score is {score}%.").format(score=data["grade"])
elif tojson["result"] == "success":
tojson["text"] = _("Your answer passed the tests! Your score is {score}%.").format(score=data["grade"])
elif tojson["result"] == "timeout":
tojson["text"] = _("Your submission timed out. Your score is {score}%.").format(score=data["grade"])
elif tojson["result"] == "overflow":
tojson["text"] = _("Your submission made an overflow. Your score is {score}%.").format(score=data["grade"])
elif tojson["result"] == "killed":
tojson["text"] = _("Your submission was killed.")
else:
tojson["text"] = _("An internal error occurred. Please retry later. "
"If the error persists, send an email to the course administrator.")
tojson["text"] = "<b>" + tojson["text"] + " " + _("[Submission #{submissionid}]").format(submissionid=data["_id"]) + "</b>" + data.get("text", "")
tojson["text"] = self.plugin_manager.call_hook_recursive("feedback_text", task=task, submission=data, text=tojson["text"])["text"]
if reloading:
# Set status='ok' because we are reloading an old submission.
tojson["status"] = 'ok'
# And also include input
tojson["input"] = data.get('input', {})
if "tests" in data:
tojson["tests"] = {}
if tags:
for tag in tags[0]+tags[1]: # Tags only visible for admins should not appear in the json for students.
if (tag.is_visible_for_student() or debug) and tag.get_id() in data["tests"]:
tojson["tests"][tag.get_id()] = data["tests"][tag.get_id()]
if debug: #We add also auto tags when we are admin
for tag in data["tests"]:
if tag.startswith("*auto-tag-"):
tojson["tests"][tag] = data["tests"][tag]
# allow plugins to insert javascript to be run in the browser after the submission is loaded
tojson["feedback_script"] = "".join(self.plugin_manager.call_hook("feedback_script", task=task, submission=data))
return json.dumps(tojson, default=str) | Converts a submission to json (keeps only needed fields) | Below is the the instruction that describes the task:
### Input:
Converts a submission to json (keeps only needed fields)
### Response:
def submission_to_json(self, task, data, debug, reloading=False, replace=False, tags={}):
""" Converts a submission to json (keeps only needed fields) """
if "ssh_host" in data:
return json.dumps({'status': "waiting", 'text': "<b>SSH server active</b>",
'ssh_host': data["ssh_host"], 'ssh_port': data["ssh_port"],
'ssh_password': data["ssh_password"]})
# Here we are waiting. Let's send some useful information.
waiting_data = self.submission_manager.get_job_queue_info(data["jobid"]) if "jobid" in data else None
if waiting_data is not None and not reloading:
nb_tasks_before, approx_wait_time = waiting_data
wait_time = round(approx_wait_time)
if nb_tasks_before == -1 and wait_time <= 0:
text = _("<b>INGInious is currently grading your answers.<b/> (almost done)")
elif nb_tasks_before == -1:
text = _("<b>INGInious is currently grading your answers.<b/> (Approx. wait time: {} seconds)").format(
wait_time)
elif nb_tasks_before == 0:
text = _("<b>You are next in the waiting queue!</b>")
elif nb_tasks_before == 1:
text = _("<b>There is one task in front of you in the waiting queue.</b>")
else:
text = _("<b>There are {} tasks in front of you in the waiting queue.</b>").format(nb_tasks_before)
return json.dumps({'status': "waiting", 'text': text})
tojson = {
'status': data['status'],
'result': data.get('result', 'crash'),
'id': str(data["_id"]),
'submitted_on': str(data['submitted_on']),
'grade': str(data.get("grade", 0.0)),
'replace': replace and not reloading # Replace the evaluated submission
}
if "text" in data:
tojson["text"] = data["text"]
if "problems" in data:
tojson["problems"] = data["problems"]
if debug:
tojson["debug"] = data
if tojson['status'] == 'waiting':
tojson["text"] = _("<b>Your submission has been sent...</b>")
elif tojson["result"] == "failed":
tojson["text"] = _("There are some errors in your answer. Your score is {score}%.").format(score=data["grade"])
elif tojson["result"] == "success":
tojson["text"] = _("Your answer passed the tests! Your score is {score}%.").format(score=data["grade"])
elif tojson["result"] == "timeout":
tojson["text"] = _("Your submission timed out. Your score is {score}%.").format(score=data["grade"])
elif tojson["result"] == "overflow":
tojson["text"] = _("Your submission made an overflow. Your score is {score}%.").format(score=data["grade"])
elif tojson["result"] == "killed":
tojson["text"] = _("Your submission was killed.")
else:
tojson["text"] = _("An internal error occurred. Please retry later. "
"If the error persists, send an email to the course administrator.")
tojson["text"] = "<b>" + tojson["text"] + " " + _("[Submission #{submissionid}]").format(submissionid=data["_id"]) + "</b>" + data.get("text", "")
tojson["text"] = self.plugin_manager.call_hook_recursive("feedback_text", task=task, submission=data, text=tojson["text"])["text"]
if reloading:
# Set status='ok' because we are reloading an old submission.
tojson["status"] = 'ok'
# And also include input
tojson["input"] = data.get('input', {})
if "tests" in data:
tojson["tests"] = {}
if tags:
for tag in tags[0]+tags[1]: # Tags only visible for admins should not appear in the json for students.
if (tag.is_visible_for_student() or debug) and tag.get_id() in data["tests"]:
tojson["tests"][tag.get_id()] = data["tests"][tag.get_id()]
if debug: #We add also auto tags when we are admin
for tag in data["tests"]:
if tag.startswith("*auto-tag-"):
tojson["tests"][tag] = data["tests"][tag]
# allow plugins to insert javascript to be run in the browser after the submission is loaded
tojson["feedback_script"] = "".join(self.plugin_manager.call_hook("feedback_script", task=task, submission=data))
return json.dumps(tojson, default=str) |
def install_from_arguments(self, arguments, **kw):
"""
Download, unpack, build and install the specified requirements.
This function is a simple wrapper for :func:`get_requirements()`,
:func:`install_requirements()` and :func:`cleanup_temporary_directories()`
that implements the default behavior of the pip accelerator. If you're
extending or embedding pip-accel you may want to call the underlying
methods instead.
If the requirement set includes wheels and ``setuptools >= 0.8`` is not
yet installed, it will be added to the requirement set and installed
together with the other requirement(s) in order to enable the usage of
distributions installed from wheels (their metadata is different).
:param arguments: The command line arguments to ``pip install ..`` (a
list of strings).
:param kw: Any keyword arguments are passed on to
:func:`install_requirements()`.
:returns: The result of :func:`install_requirements()`.
"""
try:
requirements = self.get_requirements(arguments, use_wheels=self.arguments_allow_wheels(arguments))
have_wheels = any(req.is_wheel for req in requirements)
if have_wheels and not self.setuptools_supports_wheels():
logger.info("Preparing to upgrade to setuptools >= 0.8 to enable wheel support ..")
requirements.extend(self.get_requirements(['setuptools >= 0.8']))
if requirements:
if '--user' in arguments:
from site import USER_BASE
kw.setdefault('prefix', USER_BASE)
return self.install_requirements(requirements, **kw)
else:
logger.info("Nothing to do! (requirements already installed)")
return 0
finally:
self.cleanup_temporary_directories() | Download, unpack, build and install the specified requirements.
This function is a simple wrapper for :func:`get_requirements()`,
:func:`install_requirements()` and :func:`cleanup_temporary_directories()`
that implements the default behavior of the pip accelerator. If you're
extending or embedding pip-accel you may want to call the underlying
methods instead.
If the requirement set includes wheels and ``setuptools >= 0.8`` is not
yet installed, it will be added to the requirement set and installed
together with the other requirement(s) in order to enable the usage of
distributions installed from wheels (their metadata is different).
:param arguments: The command line arguments to ``pip install ..`` (a
list of strings).
:param kw: Any keyword arguments are passed on to
:func:`install_requirements()`.
:returns: The result of :func:`install_requirements()`. | Below is the the instruction that describes the task:
### Input:
Download, unpack, build and install the specified requirements.
This function is a simple wrapper for :func:`get_requirements()`,
:func:`install_requirements()` and :func:`cleanup_temporary_directories()`
that implements the default behavior of the pip accelerator. If you're
extending or embedding pip-accel you may want to call the underlying
methods instead.
If the requirement set includes wheels and ``setuptools >= 0.8`` is not
yet installed, it will be added to the requirement set and installed
together with the other requirement(s) in order to enable the usage of
distributions installed from wheels (their metadata is different).
:param arguments: The command line arguments to ``pip install ..`` (a
list of strings).
:param kw: Any keyword arguments are passed on to
:func:`install_requirements()`.
:returns: The result of :func:`install_requirements()`.
### Response:
def install_from_arguments(self, arguments, **kw):
"""
Download, unpack, build and install the specified requirements.
This function is a simple wrapper for :func:`get_requirements()`,
:func:`install_requirements()` and :func:`cleanup_temporary_directories()`
that implements the default behavior of the pip accelerator. If you're
extending or embedding pip-accel you may want to call the underlying
methods instead.
If the requirement set includes wheels and ``setuptools >= 0.8`` is not
yet installed, it will be added to the requirement set and installed
together with the other requirement(s) in order to enable the usage of
distributions installed from wheels (their metadata is different).
:param arguments: The command line arguments to ``pip install ..`` (a
list of strings).
:param kw: Any keyword arguments are passed on to
:func:`install_requirements()`.
:returns: The result of :func:`install_requirements()`.
"""
try:
requirements = self.get_requirements(arguments, use_wheels=self.arguments_allow_wheels(arguments))
have_wheels = any(req.is_wheel for req in requirements)
if have_wheels and not self.setuptools_supports_wheels():
logger.info("Preparing to upgrade to setuptools >= 0.8 to enable wheel support ..")
requirements.extend(self.get_requirements(['setuptools >= 0.8']))
if requirements:
if '--user' in arguments:
from site import USER_BASE
kw.setdefault('prefix', USER_BASE)
return self.install_requirements(requirements, **kw)
else:
logger.info("Nothing to do! (requirements already installed)")
return 0
finally:
self.cleanup_temporary_directories() |
def _symlink_to_workdir(data, key):
"""For CWL support, symlink files into a working directory if in read-only imports.
"""
orig_file = tz.get_in(key, data)
if orig_file and not orig_file.startswith(dd.get_work_dir(data)):
variantcaller = genotype.get_variantcaller(data, require_bam=False)
if not variantcaller:
variantcaller = "precalled"
out_file = os.path.join(dd.get_work_dir(data), variantcaller, os.path.basename(orig_file))
utils.safe_makedir(os.path.dirname(out_file))
utils.symlink_plus(orig_file, out_file)
data = tz.update_in(data, key, lambda x: out_file)
return data | For CWL support, symlink files into a working directory if in read-only imports. | Below is the the instruction that describes the task:
### Input:
For CWL support, symlink files into a working directory if in read-only imports.
### Response:
def _symlink_to_workdir(data, key):
"""For CWL support, symlink files into a working directory if in read-only imports.
"""
orig_file = tz.get_in(key, data)
if orig_file and not orig_file.startswith(dd.get_work_dir(data)):
variantcaller = genotype.get_variantcaller(data, require_bam=False)
if not variantcaller:
variantcaller = "precalled"
out_file = os.path.join(dd.get_work_dir(data), variantcaller, os.path.basename(orig_file))
utils.safe_makedir(os.path.dirname(out_file))
utils.symlink_plus(orig_file, out_file)
data = tz.update_in(data, key, lambda x: out_file)
return data |
def start(endpoint='data.logentries.com',
port=10000,
token=None,
tag='salt/engines/logentries'):
'''
Listen to salt events and forward them to Logentries
'''
if __opts__.get('id').endswith('_master'):
event_bus = salt.utils.event.get_master_event(
__opts__,
__opts__['sock_dir'],
listen=True)
else:
event_bus = salt.utils.event.get_event(
'minion',
transport=__opts__['transport'],
opts=__opts__,
sock_dir=__opts__['sock_dir'],
listen=True)
log.debug('Logentries engine started')
try:
val = uuid.UUID(token)
except ValueError:
log.warning('Not a valid logentries token')
appender = SocketAppender(verbose=False, LE_API=endpoint, LE_PORT=port)
appender.reopen_connection()
while True:
event = event_bus.get_event()
if event:
# future lint: disable=blacklisted-function
msg = str(' ').join((
salt.utils.stringutils.to_str(token),
salt.utils.stringutils.to_str(tag),
salt.utils.json.dumps(event)
))
# future lint: enable=blacklisted-function
appender.put(msg)
appender.close_connection() | Listen to salt events and forward them to Logentries | Below is the the instruction that describes the task:
### Input:
Listen to salt events and forward them to Logentries
### Response:
def start(endpoint='data.logentries.com',
port=10000,
token=None,
tag='salt/engines/logentries'):
'''
Listen to salt events and forward them to Logentries
'''
if __opts__.get('id').endswith('_master'):
event_bus = salt.utils.event.get_master_event(
__opts__,
__opts__['sock_dir'],
listen=True)
else:
event_bus = salt.utils.event.get_event(
'minion',
transport=__opts__['transport'],
opts=__opts__,
sock_dir=__opts__['sock_dir'],
listen=True)
log.debug('Logentries engine started')
try:
val = uuid.UUID(token)
except ValueError:
log.warning('Not a valid logentries token')
appender = SocketAppender(verbose=False, LE_API=endpoint, LE_PORT=port)
appender.reopen_connection()
while True:
event = event_bus.get_event()
if event:
# future lint: disable=blacklisted-function
msg = str(' ').join((
salt.utils.stringutils.to_str(token),
salt.utils.stringutils.to_str(tag),
salt.utils.json.dumps(event)
))
# future lint: enable=blacklisted-function
appender.put(msg)
appender.close_connection() |
def switch_domain(self, domain):
"""
Switch from one domain to another. You can call session.login() with
a domain key value to log directly into the domain of choice or alternatively
switch from domain to domain. The user must have permissions to the domain or
unauthorized will be returned. In addition, when switching domains, you will
be logged out of the current domain to close the connection pool associated
with the previous session. This prevents potentially excessive open
connections to SMC
::
session.login() # Log in to 'Shared Domain'
...
session.switch_domain('MyDomain')
:raises SMCConnectionError: Error logging in to specified domain.
This typically means the domain either doesn't exist or the
user does not have privileges to that domain.
"""
if self.domain != domain:
if self in self.manager: # Exit current domain
self.logout()
logger.info('Switching to domain: %r and creating new session', domain)
params = self.copy()
params.update(domain=domain)
self.login(**params) | Switch from one domain to another. You can call session.login() with
a domain key value to log directly into the domain of choice or alternatively
switch from domain to domain. The user must have permissions to the domain or
unauthorized will be returned. In addition, when switching domains, you will
be logged out of the current domain to close the connection pool associated
with the previous session. This prevents potentially excessive open
connections to SMC
::
session.login() # Log in to 'Shared Domain'
...
session.switch_domain('MyDomain')
:raises SMCConnectionError: Error logging in to specified domain.
This typically means the domain either doesn't exist or the
user does not have privileges to that domain. | Below is the the instruction that describes the task:
### Input:
Switch from one domain to another. You can call session.login() with
a domain key value to log directly into the domain of choice or alternatively
switch from domain to domain. The user must have permissions to the domain or
unauthorized will be returned. In addition, when switching domains, you will
be logged out of the current domain to close the connection pool associated
with the previous session. This prevents potentially excessive open
connections to SMC
::
session.login() # Log in to 'Shared Domain'
...
session.switch_domain('MyDomain')
:raises SMCConnectionError: Error logging in to specified domain.
This typically means the domain either doesn't exist or the
user does not have privileges to that domain.
### Response:
def switch_domain(self, domain):
"""
Switch from one domain to another. You can call session.login() with
a domain key value to log directly into the domain of choice or alternatively
switch from domain to domain. The user must have permissions to the domain or
unauthorized will be returned. In addition, when switching domains, you will
be logged out of the current domain to close the connection pool associated
with the previous session. This prevents potentially excessive open
connections to SMC
::
session.login() # Log in to 'Shared Domain'
...
session.switch_domain('MyDomain')
:raises SMCConnectionError: Error logging in to specified domain.
This typically means the domain either doesn't exist or the
user does not have privileges to that domain.
"""
if self.domain != domain:
if self in self.manager: # Exit current domain
self.logout()
logger.info('Switching to domain: %r and creating new session', domain)
params = self.copy()
params.update(domain=domain)
self.login(**params) |
def restart(self, timeout=300, config_callback=None):
"""Restart each member of the replica set."""
for member_id in self.server_map:
host = self.server_map[member_id]
server_id = self._servers.host_to_server_id(host)
server = self._servers._storage[server_id]
server.restart(timeout, config_callback)
self.waiting_member_state() | Restart each member of the replica set. | Below is the the instruction that describes the task:
### Input:
Restart each member of the replica set.
### Response:
def restart(self, timeout=300, config_callback=None):
"""Restart each member of the replica set."""
for member_id in self.server_map:
host = self.server_map[member_id]
server_id = self._servers.host_to_server_id(host)
server = self._servers._storage[server_id]
server.restart(timeout, config_callback)
self.waiting_member_state() |
def email_address(user=None):
"""Return random e-mail address in a hopefully imaginary domain.
If `user` is ``None`` :py:func:`~user_name()` will be used. Otherwise it
will be lowercased and will have spaces replaced with ``_``.
Domain name is created using :py:func:`~domain_name()`.
"""
if not user:
user = user_name()
else:
user = user.strip().replace(' ', '_').lower()
return user + '@' + domain_name() | Return random e-mail address in a hopefully imaginary domain.
If `user` is ``None`` :py:func:`~user_name()` will be used. Otherwise it
will be lowercased and will have spaces replaced with ``_``.
Domain name is created using :py:func:`~domain_name()`. | Below is the the instruction that describes the task:
### Input:
Return random e-mail address in a hopefully imaginary domain.
If `user` is ``None`` :py:func:`~user_name()` will be used. Otherwise it
will be lowercased and will have spaces replaced with ``_``.
Domain name is created using :py:func:`~domain_name()`.
### Response:
def email_address(user=None):
"""Return random e-mail address in a hopefully imaginary domain.
If `user` is ``None`` :py:func:`~user_name()` will be used. Otherwise it
will be lowercased and will have spaces replaced with ``_``.
Domain name is created using :py:func:`~domain_name()`.
"""
if not user:
user = user_name()
else:
user = user.strip().replace(' ', '_').lower()
return user + '@' + domain_name() |
def _remove_persistent_module(mod, comment):
'''
Remove module from configuration file. If comment is true only comment line
where module is.
'''
conf = _get_modules_conf()
mod_name = _strip_module_name(mod)
if not mod_name or mod_name not in mod_list(True):
return set()
escape_mod = re.escape(mod)
if comment:
__salt__['file.comment'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod))
else:
__salt__['file.sed'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod), '')
return set([mod_name]) | Remove module from configuration file. If comment is true only comment line
where module is. | Below is the the instruction that describes the task:
### Input:
Remove module from configuration file. If comment is true only comment line
where module is.
### Response:
def _remove_persistent_module(mod, comment):
'''
Remove module from configuration file. If comment is true only comment line
where module is.
'''
conf = _get_modules_conf()
mod_name = _strip_module_name(mod)
if not mod_name or mod_name not in mod_list(True):
return set()
escape_mod = re.escape(mod)
if comment:
__salt__['file.comment'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod))
else:
__salt__['file.sed'](conf, '^[\t ]*{0}[\t ]?'.format(escape_mod), '')
return set([mod_name]) |
def run_state_estimation(data, clusters, dist='Poiss', reps=1, **kwargs):
"""
Runs state estimation for multiple initializations, returning the result with the highest log-likelihood. All the arguments are passed to the underlying state estimation functions (poisson_estimate_state, nb_estimate_state, zip_estimate_state).
Args:
data (array): genes x cells
clusters (int): number of mixture components. If this is set to 0, this is automatically estimated using gap score.
dist (str, optional): Distribution used in state estimation. Options: 'Poiss', 'NB', 'ZIP', 'LogNorm', 'Gaussian'. Default: 'Poiss'
reps (int, optional): number of times to run the state estimation, taking the result with the highest log-likelihood.
**kwargs: arguments to pass to the underlying state estimation function.
Returns:
M (array): genes x clusters - state means
W (array): clusters x cells - state mixing components for each cell
ll (float): final log-likelihood
"""
clusters = int(clusters)
func = poisson_estimate_state
dist = dist.lower()
if dist=='poiss' or dist=='poisson':
pass
elif dist=='nb':
func = nb_estimate_state
elif dist=='zip':
func = zip_estimate_state
elif dist=='lognorm' or dist=='log-normal' or dist=='lognormal':
func = log_norm_nmf
elif dist=='gaussian' or dist=='norm' or dist=='normal':
func = norm_nmf
else:
print('dist should be one of Poiss, NB, ZIP, LogNorm, or Gaussian. Using Poiss.')
# TODO: estimate number of clusters
if clusters == 0:
from .gap_score import run_gap_k_selection, preproc_data
data_tsvd = preproc_data(data, gene_subset=False)
max_k, gap_vals, sk_vals = run_gap_k_selection(data_tsvd,
k_min=1, k_max=50, skip=5, B=6)
clusters = min(max_k, data.shape[0] - 1, data.shape[1] - 1)
best_ll = np.inf
best_M = None
best_W = None
for i in range(reps):
results = func(data, clusters, **kwargs)
M = results[0]
W = results[1]
if dist=='NB':
ll = results[3]
else:
ll = results[2]
if ll < best_ll:
best_ll = ll
best_M = M
best_W = W
return best_M, best_W, best_ll | Runs state estimation for multiple initializations, returning the result with the highest log-likelihood. All the arguments are passed to the underlying state estimation functions (poisson_estimate_state, nb_estimate_state, zip_estimate_state).
Args:
data (array): genes x cells
clusters (int): number of mixture components. If this is set to 0, this is automatically estimated using gap score.
dist (str, optional): Distribution used in state estimation. Options: 'Poiss', 'NB', 'ZIP', 'LogNorm', 'Gaussian'. Default: 'Poiss'
reps (int, optional): number of times to run the state estimation, taking the result with the highest log-likelihood.
**kwargs: arguments to pass to the underlying state estimation function.
Returns:
M (array): genes x clusters - state means
W (array): clusters x cells - state mixing components for each cell
ll (float): final log-likelihood | Below is the the instruction that describes the task:
### Input:
Runs state estimation for multiple initializations, returning the result with the highest log-likelihood. All the arguments are passed to the underlying state estimation functions (poisson_estimate_state, nb_estimate_state, zip_estimate_state).
Args:
data (array): genes x cells
clusters (int): number of mixture components. If this is set to 0, this is automatically estimated using gap score.
dist (str, optional): Distribution used in state estimation. Options: 'Poiss', 'NB', 'ZIP', 'LogNorm', 'Gaussian'. Default: 'Poiss'
reps (int, optional): number of times to run the state estimation, taking the result with the highest log-likelihood.
**kwargs: arguments to pass to the underlying state estimation function.
Returns:
M (array): genes x clusters - state means
W (array): clusters x cells - state mixing components for each cell
ll (float): final log-likelihood
### Response:
def run_state_estimation(data, clusters, dist='Poiss', reps=1, **kwargs):
"""
Runs state estimation for multiple initializations, returning the result with the highest log-likelihood. All the arguments are passed to the underlying state estimation functions (poisson_estimate_state, nb_estimate_state, zip_estimate_state).
Args:
data (array): genes x cells
clusters (int): number of mixture components. If this is set to 0, this is automatically estimated using gap score.
dist (str, optional): Distribution used in state estimation. Options: 'Poiss', 'NB', 'ZIP', 'LogNorm', 'Gaussian'. Default: 'Poiss'
reps (int, optional): number of times to run the state estimation, taking the result with the highest log-likelihood.
**kwargs: arguments to pass to the underlying state estimation function.
Returns:
M (array): genes x clusters - state means
W (array): clusters x cells - state mixing components for each cell
ll (float): final log-likelihood
"""
clusters = int(clusters)
func = poisson_estimate_state
dist = dist.lower()
if dist=='poiss' or dist=='poisson':
pass
elif dist=='nb':
func = nb_estimate_state
elif dist=='zip':
func = zip_estimate_state
elif dist=='lognorm' or dist=='log-normal' or dist=='lognormal':
func = log_norm_nmf
elif dist=='gaussian' or dist=='norm' or dist=='normal':
func = norm_nmf
else:
print('dist should be one of Poiss, NB, ZIP, LogNorm, or Gaussian. Using Poiss.')
# TODO: estimate number of clusters
if clusters == 0:
from .gap_score import run_gap_k_selection, preproc_data
data_tsvd = preproc_data(data, gene_subset=False)
max_k, gap_vals, sk_vals = run_gap_k_selection(data_tsvd,
k_min=1, k_max=50, skip=5, B=6)
clusters = min(max_k, data.shape[0] - 1, data.shape[1] - 1)
best_ll = np.inf
best_M = None
best_W = None
for i in range(reps):
results = func(data, clusters, **kwargs)
M = results[0]
W = results[1]
if dist=='NB':
ll = results[3]
else:
ll = results[2]
if ll < best_ll:
best_ll = ll
best_M = M
best_W = W
return best_M, best_W, best_ll |
def _push_configs_to_nvram(self):
"""
Push the startup-config and private-config content to the NVRAM.
"""
startup_config_content = self.startup_config_content
if startup_config_content:
nvram_file = self._nvram_file()
try:
if not os.path.exists(nvram_file):
open(nvram_file, "a").close()
nvram_content = None
else:
with open(nvram_file, "rb") as file:
nvram_content = file.read()
except OSError as e:
raise IOUError("Cannot read nvram file {}: {}".format(nvram_file, e))
startup_config_content = startup_config_content.encode("utf-8")
private_config_content = self.private_config_content
if private_config_content is not None:
private_config_content = private_config_content.encode("utf-8")
try:
nvram_content = nvram_import(nvram_content, startup_config_content, private_config_content, self.nvram)
except ValueError as e:
raise IOUError("Cannot push configs to nvram {}: {}".format(nvram_file, e))
try:
with open(nvram_file, "wb") as file:
file.write(nvram_content)
except OSError as e:
raise IOUError("Cannot write nvram file {}: {}".format(nvram_file, e)) | Push the startup-config and private-config content to the NVRAM. | Below is the the instruction that describes the task:
### Input:
Push the startup-config and private-config content to the NVRAM.
### Response:
def _push_configs_to_nvram(self):
"""
Push the startup-config and private-config content to the NVRAM.
"""
startup_config_content = self.startup_config_content
if startup_config_content:
nvram_file = self._nvram_file()
try:
if not os.path.exists(nvram_file):
open(nvram_file, "a").close()
nvram_content = None
else:
with open(nvram_file, "rb") as file:
nvram_content = file.read()
except OSError as e:
raise IOUError("Cannot read nvram file {}: {}".format(nvram_file, e))
startup_config_content = startup_config_content.encode("utf-8")
private_config_content = self.private_config_content
if private_config_content is not None:
private_config_content = private_config_content.encode("utf-8")
try:
nvram_content = nvram_import(nvram_content, startup_config_content, private_config_content, self.nvram)
except ValueError as e:
raise IOUError("Cannot push configs to nvram {}: {}".format(nvram_file, e))
try:
with open(nvram_file, "wb") as file:
file.write(nvram_content)
except OSError as e:
raise IOUError("Cannot write nvram file {}: {}".format(nvram_file, e)) |
def _initTables(self, cursor, deleteOldVersions, recreate):
""" Initialize tables, if needed
Parameters:
----------------------------------------------------------------
cursor: SQL cursor
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
"""
# Delete old versions if they exist
if deleteOldVersions:
self._logger.info(
"Dropping old versions of client_jobs DB; called from: %r",
traceback.format_stack())
for i in range(self._DB_VERSION):
cursor.execute('DROP DATABASE IF EXISTS %s' %
(self.__getDBNameForVersion(i),))
# Create the database if necessary
if recreate:
self._logger.info(
"Dropping client_jobs DB %r; called from: %r",
self.dbName, traceback.format_stack())
cursor.execute('DROP DATABASE IF EXISTS %s' % (self.dbName))
cursor.execute('CREATE DATABASE IF NOT EXISTS %s' % (self.dbName))
# Get the list of tables
cursor.execute('SHOW TABLES IN %s' % (self.dbName))
output = cursor.fetchall()
tableNames = [x[0] for x in output]
# ------------------------------------------------------------------------
# Create the jobs table if it doesn't exist
# Fields that start with '_eng' are intended for private use by the engine
# and should not be used by the UI
if 'jobs' not in tableNames:
self._logger.info("Creating table %r", self.jobsTableName)
fields = [
'job_id INT UNSIGNED NOT NULL AUTO_INCREMENT',
# unique jobID
'client CHAR(%d)' % (self.CLIENT_MAX_LEN),
# name of client (UI, StrmMgr, etc.)
'client_info LONGTEXT',
# Arbitrary data defined by the client
'client_key varchar(255)',
# Foreign key as defined by the client.
'cmd_line LONGTEXT',
# command line to use to launch each worker process
'params LONGTEXT',
# JSON encoded params for the job, for use by the worker processes
'job_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# unique hash of the job, provided by the client. Used for detecting
# identical job requests from the same client when they use the
# jobInsertUnique() method.
'status VARCHAR(16) DEFAULT "notStarted"',
# One of the STATUS_XXX enumerated value strings
'completion_reason VARCHAR(16)',
# One of the CMPL_REASON_XXX enumerated value strings.
# NOTE: This is the job completion reason according to the hadoop
# job-tracker. A success here does not necessarily mean the
# workers were "happy" with the job. To see if the workers
# failed, check the worker_completion_reason
'completion_msg LONGTEXT',
# Why this job completed, according to job-tracker
'worker_completion_reason VARCHAR(16) DEFAULT "%s"' % \
self.CMPL_REASON_SUCCESS,
# One of the CMPL_REASON_XXX enumerated value strings. This is
# may be changed to CMPL_REASON_ERROR if any workers encounter
# an error while running the job.
'worker_completion_msg LONGTEXT',
# Why this job completed, according to workers. If
# worker_completion_reason is set to CMPL_REASON_ERROR, this will
# contain the error information.
'cancel BOOLEAN DEFAULT FALSE',
# set by UI, polled by engine
'start_time DATETIME DEFAULT NULL',
# When job started
'end_time DATETIME DEFAULT NULL',
# When job ended
'results LONGTEXT',
# JSON dict with general information about the results of the job,
# including the ID and value of the best model
# TODO: different semantics for results field of ProductionJob
'_eng_job_type VARCHAR(32)',
# String used to specify the type of job that this is. Current
# choices are hypersearch, production worker, or stream worker
'minimum_workers INT UNSIGNED DEFAULT 0',
# min number of desired workers at a time. If 0, no workers will be
# allocated in a crunch
'maximum_workers INT UNSIGNED DEFAULT 0',
# max number of desired workers at a time. If 0, then use as many
# as practical given load on the cluster.
'priority INT DEFAULT %d' % self.DEFAULT_JOB_PRIORITY,
# job scheduling priority; 0 is the default priority (
# ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are higher
# priority (up to ClientJobsDAO.MAX_JOB_PRIORITY), and negative
# values are lower priority (down to ClientJobsDAO.MIN_JOB_PRIORITY)
'_eng_allocate_new_workers BOOLEAN DEFAULT TRUE',
# Should the scheduling algorithm allocate new workers to this job?
# If a specialized worker willingly gives up control, we set this
# field to FALSE to avoid allocating new workers.
'_eng_untended_dead_workers BOOLEAN DEFAULT FALSE',
# If a specialized worker fails or is killed by the scheduler, we
# set this feild to TRUE to indicate that the worker is dead
'num_failed_workers INT UNSIGNED DEFAULT 0',
# The number of failed specialized workers for this job. If the
# number of failures is >= max.failed.attempts, we mark the job
# as failed
'last_failed_worker_error_msg LONGTEXT',
# Error message of the most recent specialized failed worker
'_eng_cleaning_status VARCHAR(16) DEFAULT "%s"' % \
self.CLEAN_NOT_DONE,
# Has the job been garbage collected, this includes removing
# unneeded # model output caches, s3 checkpoints.
'gen_base_description LONGTEXT',
# The contents of the generated description.py file from hypersearch
# requests. This is generated by the Hypersearch workers and stored
# here for reference, debugging, and development purposes.
'gen_permutations LONGTEXT',
# The contents of the generated permutations.py file from
# hypersearch requests. This is generated by the Hypersearch workers
# and stored here for reference, debugging, and development
# purposes.
'_eng_last_update_time DATETIME DEFAULT NULL',
# time stamp of last update, used for detecting stalled jobs
'_eng_cjm_conn_id INT UNSIGNED',
# ID of the CJM starting up this job
'_eng_worker_state LONGTEXT',
# JSON encoded state of the hypersearch in progress, for private
# use by the Hypersearch workers
'_eng_status LONGTEXT',
# String used for status messages sent from the engine for
# informative purposes only. Usually printed periodically by
# clients watching a job progress.
'_eng_model_milestones LONGTEXT',
# JSon encoded object with information about global model milestone
# results
'PRIMARY KEY (job_id)',
'UNIQUE INDEX (client, job_hash)',
'INDEX (status)',
'INDEX (client_key)'
]
options = [
'AUTO_INCREMENT=1000',
]
query = 'CREATE TABLE IF NOT EXISTS %s (%s) %s' % \
(self.jobsTableName, ','.join(fields), ','.join(options))
cursor.execute(query)
# ------------------------------------------------------------------------
# Create the models table if it doesn't exist
# Fields that start with '_eng' are intended for private use by the engine
# and should not be used by the UI
if 'models' not in tableNames:
self._logger.info("Creating table %r", self.modelsTableName)
fields = [
'model_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT',
# globally unique model ID
'job_id INT UNSIGNED NOT NULL',
# jobID
'params LONGTEXT NOT NULL',
# JSON encoded params for the model
'status VARCHAR(16) DEFAULT "notStarted"',
# One of the STATUS_XXX enumerated value strings
'completion_reason VARCHAR(16)',
# One of the CMPL_REASON_XXX enumerated value strings
'completion_msg LONGTEXT',
# Why this job completed
'results LONGTEXT DEFAULT NULL',
# JSON encoded structure containing metrics produced by the model
'optimized_metric FLOAT ',
#Value of the particular metric we are optimizing in hypersearch
'update_counter INT UNSIGNED DEFAULT 0',
# incremented by engine every time the results is updated
'num_records INT UNSIGNED DEFAULT 0',
# number of records processed so far
'start_time DATETIME DEFAULT NULL',
# When this model started being evaluated
'end_time DATETIME DEFAULT NULL',
# When this model completed
'cpu_time FLOAT DEFAULT 0',
# How much actual CPU time was spent on this model, in seconds. This
# excludes time the process spent sleeping, or otherwise not
# actually executing code.
'model_checkpoint_id LONGTEXT',
# Checkpoint identifier for this model (after it has been saved)
'gen_description LONGTEXT',
# The contents of the generated description.py file from hypersearch
# requests. This is generated by the Hypersearch workers and stored
# here for reference, debugging, and development purposes.
'_eng_params_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# MD5 hash of the params
'_eng_particle_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# MD5 hash of the particle info for PSO algorithm
'_eng_last_update_time DATETIME DEFAULT NULL',
# time stamp of last update, used for detecting stalled workers
'_eng_task_tracker_id TINYBLOB',
# Hadoop Task Tracker ID
'_eng_worker_id TINYBLOB',
# Hadoop Map Task ID
'_eng_attempt_id TINYBLOB',
# Hadoop Map task attempt ID
'_eng_worker_conn_id INT DEFAULT 0',
# database client connection ID of the worker that is running this
# model
'_eng_milestones LONGTEXT',
# A JSON encoded list of metric values for the model at each
# milestone point
'_eng_stop VARCHAR(16) DEFAULT NULL',
# One of the STOP_REASON_XXX enumerated value strings. Set either by
# the swarm terminator of either the current, or another
# Hypersearch worker.
'_eng_matured BOOLEAN DEFAULT FALSE',
# Set by the model maturity-checker when it decides that this model
# has "matured". This means that it has reached the point of
# not getting better results with more data.
'PRIMARY KEY (model_id)',
'UNIQUE INDEX (job_id, _eng_params_hash)',
'UNIQUE INDEX (job_id, _eng_particle_hash)',
]
options = [
'AUTO_INCREMENT=1000',
]
query = 'CREATE TABLE IF NOT EXISTS %s (%s) %s' % \
(self.modelsTableName, ','.join(fields), ','.join(options))
cursor.execute(query)
# ---------------------------------------------------------------------
# Get the field names for each table
cursor.execute('DESCRIBE %s' % (self.jobsTableName))
fields = cursor.fetchall()
self._jobs.dbFieldNames = [str(field[0]) for field in fields]
cursor.execute('DESCRIBE %s' % (self.modelsTableName))
fields = cursor.fetchall()
self._models.dbFieldNames = [str(field[0]) for field in fields]
# ---------------------------------------------------------------------
# Generate the public names
self._jobs.publicFieldNames = [self._columnNameDBToPublic(x)
for x in self._jobs.dbFieldNames]
self._models.publicFieldNames = [self._columnNameDBToPublic(x)
for x in self._models.dbFieldNames]
# ---------------------------------------------------------------------
# Generate the name conversion dicts
self._jobs.pubToDBNameDict = dict(
zip(self._jobs.publicFieldNames, self._jobs.dbFieldNames))
self._jobs.dbToPubNameDict = dict(
zip(self._jobs.dbFieldNames, self._jobs.publicFieldNames))
self._models.pubToDBNameDict = dict(
zip(self._models.publicFieldNames, self._models.dbFieldNames))
self._models.dbToPubNameDict = dict(
zip(self._models.dbFieldNames, self._models.publicFieldNames))
# ---------------------------------------------------------------------
# Generate the dynamic namedtuple classes we use
self._models.modelInfoNamedTuple = collections.namedtuple(
'_modelInfoNamedTuple', self._models.publicFieldNames)
self._jobs.jobInfoNamedTuple = collections.namedtuple(
'_jobInfoNamedTuple', self._jobs.publicFieldNames)
return | Initialize tables, if needed
Parameters:
----------------------------------------------------------------
cursor: SQL cursor
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists. | Below is the the instruction that describes the task:
### Input:
Initialize tables, if needed
Parameters:
----------------------------------------------------------------
cursor: SQL cursor
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
### Response:
def _initTables(self, cursor, deleteOldVersions, recreate):
""" Initialize tables, if needed
Parameters:
----------------------------------------------------------------
cursor: SQL cursor
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists.
"""
# Delete old versions if they exist
if deleteOldVersions:
self._logger.info(
"Dropping old versions of client_jobs DB; called from: %r",
traceback.format_stack())
for i in range(self._DB_VERSION):
cursor.execute('DROP DATABASE IF EXISTS %s' %
(self.__getDBNameForVersion(i),))
# Create the database if necessary
if recreate:
self._logger.info(
"Dropping client_jobs DB %r; called from: %r",
self.dbName, traceback.format_stack())
cursor.execute('DROP DATABASE IF EXISTS %s' % (self.dbName))
cursor.execute('CREATE DATABASE IF NOT EXISTS %s' % (self.dbName))
# Get the list of tables
cursor.execute('SHOW TABLES IN %s' % (self.dbName))
output = cursor.fetchall()
tableNames = [x[0] for x in output]
# ------------------------------------------------------------------------
# Create the jobs table if it doesn't exist
# Fields that start with '_eng' are intended for private use by the engine
# and should not be used by the UI
if 'jobs' not in tableNames:
self._logger.info("Creating table %r", self.jobsTableName)
fields = [
'job_id INT UNSIGNED NOT NULL AUTO_INCREMENT',
# unique jobID
'client CHAR(%d)' % (self.CLIENT_MAX_LEN),
# name of client (UI, StrmMgr, etc.)
'client_info LONGTEXT',
# Arbitrary data defined by the client
'client_key varchar(255)',
# Foreign key as defined by the client.
'cmd_line LONGTEXT',
# command line to use to launch each worker process
'params LONGTEXT',
# JSON encoded params for the job, for use by the worker processes
'job_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# unique hash of the job, provided by the client. Used for detecting
# identical job requests from the same client when they use the
# jobInsertUnique() method.
'status VARCHAR(16) DEFAULT "notStarted"',
# One of the STATUS_XXX enumerated value strings
'completion_reason VARCHAR(16)',
# One of the CMPL_REASON_XXX enumerated value strings.
# NOTE: This is the job completion reason according to the hadoop
# job-tracker. A success here does not necessarily mean the
# workers were "happy" with the job. To see if the workers
# failed, check the worker_completion_reason
'completion_msg LONGTEXT',
# Why this job completed, according to job-tracker
'worker_completion_reason VARCHAR(16) DEFAULT "%s"' % \
self.CMPL_REASON_SUCCESS,
# One of the CMPL_REASON_XXX enumerated value strings. This is
# may be changed to CMPL_REASON_ERROR if any workers encounter
# an error while running the job.
'worker_completion_msg LONGTEXT',
# Why this job completed, according to workers. If
# worker_completion_reason is set to CMPL_REASON_ERROR, this will
# contain the error information.
'cancel BOOLEAN DEFAULT FALSE',
# set by UI, polled by engine
'start_time DATETIME DEFAULT NULL',
# When job started
'end_time DATETIME DEFAULT NULL',
# When job ended
'results LONGTEXT',
# JSON dict with general information about the results of the job,
# including the ID and value of the best model
# TODO: different semantics for results field of ProductionJob
'_eng_job_type VARCHAR(32)',
# String used to specify the type of job that this is. Current
# choices are hypersearch, production worker, or stream worker
'minimum_workers INT UNSIGNED DEFAULT 0',
# min number of desired workers at a time. If 0, no workers will be
# allocated in a crunch
'maximum_workers INT UNSIGNED DEFAULT 0',
# max number of desired workers at a time. If 0, then use as many
# as practical given load on the cluster.
'priority INT DEFAULT %d' % self.DEFAULT_JOB_PRIORITY,
# job scheduling priority; 0 is the default priority (
# ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are higher
# priority (up to ClientJobsDAO.MAX_JOB_PRIORITY), and negative
# values are lower priority (down to ClientJobsDAO.MIN_JOB_PRIORITY)
'_eng_allocate_new_workers BOOLEAN DEFAULT TRUE',
# Should the scheduling algorithm allocate new workers to this job?
# If a specialized worker willingly gives up control, we set this
# field to FALSE to avoid allocating new workers.
'_eng_untended_dead_workers BOOLEAN DEFAULT FALSE',
# If a specialized worker fails or is killed by the scheduler, we
# set this feild to TRUE to indicate that the worker is dead
'num_failed_workers INT UNSIGNED DEFAULT 0',
# The number of failed specialized workers for this job. If the
# number of failures is >= max.failed.attempts, we mark the job
# as failed
'last_failed_worker_error_msg LONGTEXT',
# Error message of the most recent specialized failed worker
'_eng_cleaning_status VARCHAR(16) DEFAULT "%s"' % \
self.CLEAN_NOT_DONE,
# Has the job been garbage collected, this includes removing
# unneeded # model output caches, s3 checkpoints.
'gen_base_description LONGTEXT',
# The contents of the generated description.py file from hypersearch
# requests. This is generated by the Hypersearch workers and stored
# here for reference, debugging, and development purposes.
'gen_permutations LONGTEXT',
# The contents of the generated permutations.py file from
# hypersearch requests. This is generated by the Hypersearch workers
# and stored here for reference, debugging, and development
# purposes.
'_eng_last_update_time DATETIME DEFAULT NULL',
# time stamp of last update, used for detecting stalled jobs
'_eng_cjm_conn_id INT UNSIGNED',
# ID of the CJM starting up this job
'_eng_worker_state LONGTEXT',
# JSON encoded state of the hypersearch in progress, for private
# use by the Hypersearch workers
'_eng_status LONGTEXT',
# String used for status messages sent from the engine for
# informative purposes only. Usually printed periodically by
# clients watching a job progress.
'_eng_model_milestones LONGTEXT',
# JSon encoded object with information about global model milestone
# results
'PRIMARY KEY (job_id)',
'UNIQUE INDEX (client, job_hash)',
'INDEX (status)',
'INDEX (client_key)'
]
options = [
'AUTO_INCREMENT=1000',
]
query = 'CREATE TABLE IF NOT EXISTS %s (%s) %s' % \
(self.jobsTableName, ','.join(fields), ','.join(options))
cursor.execute(query)
# ------------------------------------------------------------------------
# Create the models table if it doesn't exist
# Fields that start with '_eng' are intended for private use by the engine
# and should not be used by the UI
if 'models' not in tableNames:
self._logger.info("Creating table %r", self.modelsTableName)
fields = [
'model_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT',
# globally unique model ID
'job_id INT UNSIGNED NOT NULL',
# jobID
'params LONGTEXT NOT NULL',
# JSON encoded params for the model
'status VARCHAR(16) DEFAULT "notStarted"',
# One of the STATUS_XXX enumerated value strings
'completion_reason VARCHAR(16)',
# One of the CMPL_REASON_XXX enumerated value strings
'completion_msg LONGTEXT',
# Why this job completed
'results LONGTEXT DEFAULT NULL',
# JSON encoded structure containing metrics produced by the model
'optimized_metric FLOAT ',
#Value of the particular metric we are optimizing in hypersearch
'update_counter INT UNSIGNED DEFAULT 0',
# incremented by engine every time the results is updated
'num_records INT UNSIGNED DEFAULT 0',
# number of records processed so far
'start_time DATETIME DEFAULT NULL',
# When this model started being evaluated
'end_time DATETIME DEFAULT NULL',
# When this model completed
'cpu_time FLOAT DEFAULT 0',
# How much actual CPU time was spent on this model, in seconds. This
# excludes time the process spent sleeping, or otherwise not
# actually executing code.
'model_checkpoint_id LONGTEXT',
# Checkpoint identifier for this model (after it has been saved)
'gen_description LONGTEXT',
# The contents of the generated description.py file from hypersearch
# requests. This is generated by the Hypersearch workers and stored
# here for reference, debugging, and development purposes.
'_eng_params_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# MD5 hash of the params
'_eng_particle_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# MD5 hash of the particle info for PSO algorithm
'_eng_last_update_time DATETIME DEFAULT NULL',
# time stamp of last update, used for detecting stalled workers
'_eng_task_tracker_id TINYBLOB',
# Hadoop Task Tracker ID
'_eng_worker_id TINYBLOB',
# Hadoop Map Task ID
'_eng_attempt_id TINYBLOB',
# Hadoop Map task attempt ID
'_eng_worker_conn_id INT DEFAULT 0',
# database client connection ID of the worker that is running this
# model
'_eng_milestones LONGTEXT',
# A JSON encoded list of metric values for the model at each
# milestone point
'_eng_stop VARCHAR(16) DEFAULT NULL',
# One of the STOP_REASON_XXX enumerated value strings. Set either by
# the swarm terminator of either the current, or another
# Hypersearch worker.
'_eng_matured BOOLEAN DEFAULT FALSE',
# Set by the model maturity-checker when it decides that this model
# has "matured". This means that it has reached the point of
# not getting better results with more data.
'PRIMARY KEY (model_id)',
'UNIQUE INDEX (job_id, _eng_params_hash)',
'UNIQUE INDEX (job_id, _eng_particle_hash)',
]
options = [
'AUTO_INCREMENT=1000',
]
query = 'CREATE TABLE IF NOT EXISTS %s (%s) %s' % \
(self.modelsTableName, ','.join(fields), ','.join(options))
cursor.execute(query)
# ---------------------------------------------------------------------
# Get the field names for each table
cursor.execute('DESCRIBE %s' % (self.jobsTableName))
fields = cursor.fetchall()
self._jobs.dbFieldNames = [str(field[0]) for field in fields]
cursor.execute('DESCRIBE %s' % (self.modelsTableName))
fields = cursor.fetchall()
self._models.dbFieldNames = [str(field[0]) for field in fields]
# ---------------------------------------------------------------------
# Generate the public names
self._jobs.publicFieldNames = [self._columnNameDBToPublic(x)
for x in self._jobs.dbFieldNames]
self._models.publicFieldNames = [self._columnNameDBToPublic(x)
for x in self._models.dbFieldNames]
# ---------------------------------------------------------------------
# Generate the name conversion dicts
self._jobs.pubToDBNameDict = dict(
zip(self._jobs.publicFieldNames, self._jobs.dbFieldNames))
self._jobs.dbToPubNameDict = dict(
zip(self._jobs.dbFieldNames, self._jobs.publicFieldNames))
self._models.pubToDBNameDict = dict(
zip(self._models.publicFieldNames, self._models.dbFieldNames))
self._models.dbToPubNameDict = dict(
zip(self._models.dbFieldNames, self._models.publicFieldNames))
# ---------------------------------------------------------------------
# Generate the dynamic namedtuple classes we use
self._models.modelInfoNamedTuple = collections.namedtuple(
'_modelInfoNamedTuple', self._models.publicFieldNames)
self._jobs.jobInfoNamedTuple = collections.namedtuple(
'_jobInfoNamedTuple', self._jobs.publicFieldNames)
return |
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self | Set the line numbers of the node and children. | Below is the the instruction that describes the task:
### Input:
Set the line numbers of the node and children.
### Response:
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self |
def age(self):
"""
Get the age of the PID file.
"""
# Created timestamp
created = self.created()
# Age in seconds / minutes / hours / days
age_secs = time() - created
age_mins = 0 if (age_secs < 60) else (age_secs / 60)
age_hours = 0 if (age_secs < 3600) else (age_mins / 60)
age_days = 0 if (age_secs < 86400) else (age_hours / 24)
# Return the age tuple
return (
int(age_secs),
int(age_mins),
int(age_hours),
int(age_days)
) | Get the age of the PID file. | Below is the the instruction that describes the task:
### Input:
Get the age of the PID file.
### Response:
def age(self):
"""
Get the age of the PID file.
"""
# Created timestamp
created = self.created()
# Age in seconds / minutes / hours / days
age_secs = time() - created
age_mins = 0 if (age_secs < 60) else (age_secs / 60)
age_hours = 0 if (age_secs < 3600) else (age_mins / 60)
age_days = 0 if (age_secs < 86400) else (age_hours / 24)
# Return the age tuple
return (
int(age_secs),
int(age_mins),
int(age_hours),
int(age_days)
) |
def nearest_keys(self, key):
"""Find the nearest_keys (l2 distance) thanks to a cKDTree query"""
if not isinstance(key, tuple):
_key = (key,)
if self.__stale:
self.generate_tree()
d, idx = self.__tree.query(
_key, self.k_neighbors, distance_upper_bound=self.distance_upper_bound)
try:
return [self.__keys[id][0] for id in idx if id < len(self.__keys)]
except TypeError:
# if k_neighbors = 1 query is not returnng arrays
return self.__keys[idx] | Find the nearest_keys (l2 distance) thanks to a cKDTree query | Below is the the instruction that describes the task:
### Input:
Find the nearest_keys (l2 distance) thanks to a cKDTree query
### Response:
def nearest_keys(self, key):
"""Find the nearest_keys (l2 distance) thanks to a cKDTree query"""
if not isinstance(key, tuple):
_key = (key,)
if self.__stale:
self.generate_tree()
d, idx = self.__tree.query(
_key, self.k_neighbors, distance_upper_bound=self.distance_upper_bound)
try:
return [self.__keys[id][0] for id in idx if id < len(self.__keys)]
except TypeError:
# if k_neighbors = 1 query is not returnng arrays
return self.__keys[idx] |
def tag_instances_on_cluster(cluster_name, project='cwc'):
"""Adds project tag to untagged instances in a given cluster.
Parameters
----------
cluster_name : str
The name of the AWS ECS cluster in which running instances
should be tagged.
project : str
The name of the project to tag instances with.
"""
# Get the relevant instance ids from the ecs cluster
ecs = boto3.client('ecs')
task_arns = ecs.list_tasks(cluster=cluster_name)['taskArns']
if not task_arns:
return
tasks = ecs.describe_tasks(cluster=cluster_name, tasks=task_arns)['tasks']
container_instances = ecs.describe_container_instances(
cluster=cluster_name,
containerInstances=[task['containerInstanceArn'] for task in tasks]
)['containerInstances']
ec2_instance_ids = [ci['ec2InstanceId'] for ci in container_instances]
# Instantiate each instance to tag as a resource and create project tag
for instance_id in ec2_instance_ids:
tag_instance(instance_id, project=project)
return | Adds project tag to untagged instances in a given cluster.
Parameters
----------
cluster_name : str
The name of the AWS ECS cluster in which running instances
should be tagged.
project : str
The name of the project to tag instances with. | Below is the the instruction that describes the task:
### Input:
Adds project tag to untagged instances in a given cluster.
Parameters
----------
cluster_name : str
The name of the AWS ECS cluster in which running instances
should be tagged.
project : str
The name of the project to tag instances with.
### Response:
def tag_instances_on_cluster(cluster_name, project='cwc'):
"""Adds project tag to untagged instances in a given cluster.
Parameters
----------
cluster_name : str
The name of the AWS ECS cluster in which running instances
should be tagged.
project : str
The name of the project to tag instances with.
"""
# Get the relevant instance ids from the ecs cluster
ecs = boto3.client('ecs')
task_arns = ecs.list_tasks(cluster=cluster_name)['taskArns']
if not task_arns:
return
tasks = ecs.describe_tasks(cluster=cluster_name, tasks=task_arns)['tasks']
container_instances = ecs.describe_container_instances(
cluster=cluster_name,
containerInstances=[task['containerInstanceArn'] for task in tasks]
)['containerInstances']
ec2_instance_ids = [ci['ec2InstanceId'] for ci in container_instances]
# Instantiate each instance to tag as a resource and create project tag
for instance_id in ec2_instance_ids:
tag_instance(instance_id, project=project)
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.